aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorRafael Espindola <rafael.espindola@gmail.com>2010-06-12 20:13:29 +0000
committerRafael Espindola <rafael.espindola@gmail.com>2010-06-12 20:13:29 +0000
commita704f94c4decc58ffa7fdb002be37cd3ed72bd80 (patch)
treeb8b3cfd8e0694cbc4f4c2f72d39c781d925d4054 /lib
parent7e73fe9ec5f7bd0b99430e8413e3fc93c80e1918 (diff)
downloadexternal_llvm-a704f94c4decc58ffa7fdb002be37cd3ed72bd80.zip
external_llvm-a704f94c4decc58ffa7fdb002be37cd3ed72bd80.tar.gz
external_llvm-a704f94c4decc58ffa7fdb002be37cd3ed72bd80.tar.bz2
Merge getStoreRegOpcode and getLoadRegOpcode.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@105900 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp144
1 files changed, 47 insertions, 97 deletions
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index d91e48d..210e942 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -2057,71 +2057,87 @@ bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
return false;
}
-static unsigned getStoreRegOpcode(unsigned SrcReg,
- const TargetRegisterClass *RC,
- bool isStackAligned,
- TargetMachine &TM) {
- unsigned Opc = 0;
+static unsigned getLoadStoreRegOpcode(unsigned Reg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ const TargetMachine &TM,
+ bool load) {
if (RC == &X86::GR64RegClass || RC == &X86::GR64_NOSPRegClass) {
- Opc = X86::MOV64mr;
+ return load ? X86::MOV64rm : X86::MOV64mr;
} else if (RC == &X86::GR32RegClass || RC == &X86::GR32_NOSPRegClass) {
- Opc = X86::MOV32mr;
+ return load ? X86::MOV32rm : X86::MOV32mr;
} else if (RC == &X86::GR16RegClass) {
- Opc = X86::MOV16mr;
+ return load ? X86::MOV16rm : X86::MOV16mr;
} else if (RC == &X86::GR8RegClass) {
// Copying to or from a physical H register on x86-64 requires a NOREX
// move. Otherwise use a normal move.
- if (isHReg(SrcReg) &&
+ if (isHReg(Reg) &&
TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8mr_NOREX;
+ return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
else
- Opc = X86::MOV8mr;
+ return load ? X86::MOV8rm : X86::MOV8mr;
} else if (RC == &X86::GR64_ABCDRegClass) {
- Opc = X86::MOV64mr;
+ return load ? X86::MOV64rm : X86::MOV64mr;
} else if (RC == &X86::GR32_ABCDRegClass) {
- Opc = X86::MOV32mr;
+ return load ? X86::MOV32rm : X86::MOV32mr;
} else if (RC == &X86::GR16_ABCDRegClass) {
- Opc = X86::MOV16mr;
+ return load ? X86::MOV16rm : X86::MOV16mr;
} else if (RC == &X86::GR8_ABCD_LRegClass) {
- Opc = X86::MOV8mr;
+ return load ? X86::MOV8rm :X86::MOV8mr;
} else if (RC == &X86::GR8_ABCD_HRegClass) {
if (TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8mr_NOREX;
+ return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
else
- Opc = X86::MOV8mr;
+ return load ? X86::MOV8rm : X86::MOV8mr;
} else if (RC == &X86::GR64_NOREXRegClass ||
RC == &X86::GR64_NOREX_NOSPRegClass) {
- Opc = X86::MOV64mr;
+ return load ? X86::MOV64rm : X86::MOV64mr;
} else if (RC == &X86::GR32_NOREXRegClass) {
- Opc = X86::MOV32mr;
+ return load ? X86::MOV32rm : X86::MOV32mr;
} else if (RC == &X86::GR16_NOREXRegClass) {
- Opc = X86::MOV16mr;
+ return load ? X86::MOV16rm : X86::MOV16mr;
} else if (RC == &X86::GR8_NOREXRegClass) {
- Opc = X86::MOV8mr;
+ return load ? X86::MOV8rm : X86::MOV8mr;
} else if (RC == &X86::GR64_TCRegClass) {
- Opc = X86::MOV64mr_TC;
+ return load ? X86::MOV64rm_TC : X86::MOV64mr_TC;
} else if (RC == &X86::GR32_TCRegClass) {
- Opc = X86::MOV32mr_TC;
+ return load ? X86::MOV32rm_TC : X86::MOV32mr_TC;
} else if (RC == &X86::RFP80RegClass) {
- Opc = X86::ST_FpP80m; // pops
+ return load ? X86::LD_Fp80m : X86::ST_FpP80m;
} else if (RC == &X86::RFP64RegClass) {
- Opc = X86::ST_Fp64m;
+ return load ? X86::LD_Fp64m : X86::ST_Fp64m;
} else if (RC == &X86::RFP32RegClass) {
- Opc = X86::ST_Fp32m;
+ return load ? X86::LD_Fp32m : X86::ST_Fp32m;
} else if (RC == &X86::FR32RegClass) {
- Opc = X86::MOVSSmr;
+ return load ? X86::MOVSSrm : X86::MOVSSmr;
} else if (RC == &X86::FR64RegClass) {
- Opc = X86::MOVSDmr;
+ return load ? X86::MOVSDrm : X86::MOVSDmr;
} else if (RC == &X86::VR128RegClass) {
// If stack is realigned we can use aligned stores.
- Opc = isStackAligned ? X86::MOVAPSmr : X86::MOVUPSmr;
+ if (isStackAligned)
+ return load ? X86::MOVAPSrm : X86::MOVAPSmr;
+ else
+ return load ? X86::MOVUPSrm : X86::MOVUPSmr;
} else if (RC == &X86::VR64RegClass) {
- Opc = X86::MMX_MOVQ64mr;
+ return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
} else {
llvm_unreachable("Unknown regclass");
}
+}
+
+static unsigned getStoreRegOpcode(unsigned SrcReg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ TargetMachine &TM) {
+ return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, TM, false);
+}
+
- return Opc;
+static unsigned getLoadRegOpcode(unsigned DestReg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ const TargetMachine &TM) {
+ return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, TM, true);
}
void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -2155,72 +2171,6 @@ void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
NewMIs.push_back(MIB);
}
-static unsigned getLoadRegOpcode(unsigned DestReg,
- const TargetRegisterClass *RC,
- bool isStackAligned,
- const TargetMachine &TM) {
- unsigned Opc = 0;
- if (RC == &X86::GR64RegClass || RC == &X86::GR64_NOSPRegClass) {
- Opc = X86::MOV64rm;
- } else if (RC == &X86::GR32RegClass || RC == &X86::GR32_NOSPRegClass) {
- Opc = X86::MOV32rm;
- } else if (RC == &X86::GR16RegClass) {
- Opc = X86::MOV16rm;
- } else if (RC == &X86::GR8RegClass) {
- // Copying to or from a physical H register on x86-64 requires a NOREX
- // move. Otherwise use a normal move.
- if (isHReg(DestReg) &&
- TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8rm_NOREX;
- else
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR64_ABCDRegClass) {
- Opc = X86::MOV64rm;
- } else if (RC == &X86::GR32_ABCDRegClass) {
- Opc = X86::MOV32rm;
- } else if (RC == &X86::GR16_ABCDRegClass) {
- Opc = X86::MOV16rm;
- } else if (RC == &X86::GR8_ABCD_LRegClass) {
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR8_ABCD_HRegClass) {
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8rm_NOREX;
- else
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR64_NOREXRegClass ||
- RC == &X86::GR64_NOREX_NOSPRegClass) {
- Opc = X86::MOV64rm;
- } else if (RC == &X86::GR32_NOREXRegClass) {
- Opc = X86::MOV32rm;
- } else if (RC == &X86::GR16_NOREXRegClass) {
- Opc = X86::MOV16rm;
- } else if (RC == &X86::GR8_NOREXRegClass) {
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR64_TCRegClass) {
- Opc = X86::MOV64rm_TC;
- } else if (RC == &X86::GR32_TCRegClass) {
- Opc = X86::MOV32rm_TC;
- } else if (RC == &X86::RFP80RegClass) {
- Opc = X86::LD_Fp80m;
- } else if (RC == &X86::RFP64RegClass) {
- Opc = X86::LD_Fp64m;
- } else if (RC == &X86::RFP32RegClass) {
- Opc = X86::LD_Fp32m;
- } else if (RC == &X86::FR32RegClass) {
- Opc = X86::MOVSSrm;
- } else if (RC == &X86::FR64RegClass) {
- Opc = X86::MOVSDrm;
- } else if (RC == &X86::VR128RegClass) {
- // If stack is realigned we can use aligned loads.
- Opc = isStackAligned ? X86::MOVAPSrm : X86::MOVUPSrm;
- } else if (RC == &X86::VR64RegClass) {
- Opc = X86::MMX_MOVQ64rm;
- } else {
- llvm_unreachable("Unknown regclass");
- }
-
- return Opc;
-}
void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,