diff options
author | Richard Sandiford <rsandifo@linux.vnet.ibm.com> | 2013-09-25 11:11:53 +0000 |
---|---|---|
committer | Richard Sandiford <rsandifo@linux.vnet.ibm.com> | 2013-09-25 11:11:53 +0000 |
commit | 259a6006e89576704e52e7392ef2bfd83f277ce3 (patch) | |
tree | e5b9fd1bb8aea7946a99c2fa863e196fdf20c660 /lib/Target/SystemZ | |
parent | 3f22cc1df64a6dd6a3ecc5e7e261f15af083f806 (diff) | |
download | external_llvm-259a6006e89576704e52e7392ef2bfd83f277ce3.zip external_llvm-259a6006e89576704e52e7392ef2bfd83f277ce3.tar.gz external_llvm-259a6006e89576704e52e7392ef2bfd83f277ce3.tar.bz2 |
[SystemZ] Define the GR64 low-word logic instructions as pseudo aliases.
Another patch to avoid duplication of encoding information. Things like
NILF, NILL and NILH are used as both 32-bit and 64-bit instructions.
Here the 64-bit versions are defined as aliases of the 32-bit ones.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@191369 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/SystemZ')
-rw-r--r-- | lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h | 5 | ||||
-rw-r--r-- | lib/Target/SystemZ/SystemZAsmPrinter.cpp | 30 | ||||
-rw-r--r-- | lib/Target/SystemZ/SystemZISelLowering.cpp | 92 | ||||
-rw-r--r-- | lib/Target/SystemZ/SystemZInstrFormats.td | 16 | ||||
-rw-r--r-- | lib/Target/SystemZ/SystemZInstrInfo.cpp | 12 | ||||
-rw-r--r-- | lib/Target/SystemZ/SystemZInstrInfo.td | 106 | ||||
-rw-r--r-- | lib/Target/SystemZ/SystemZPatterns.td | 8 | ||||
-rw-r--r-- | lib/Target/SystemZ/SystemZShortenInst.cpp | 2 |
8 files changed, 160 insertions, 111 deletions
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h index 84184af..f2e5a5a 100644 --- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h +++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h @@ -56,6 +56,11 @@ namespace SystemZMC { inline unsigned getRegAsGR64(unsigned Reg) { return GR64Regs[getFirstReg(Reg)]; } + + // Return the given register as a low GR32. + inline unsigned getRegAsGR32(unsigned Reg) { + return GR32Regs[getFirstReg(Reg)]; + } } MCCodeEmitter *createSystemZMCCodeEmitter(const MCInstrInfo &MCII, diff --git a/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/lib/Target/SystemZ/SystemZAsmPrinter.cpp index 3f3ce6b..380fa87 100644 --- a/lib/Target/SystemZ/SystemZAsmPrinter.cpp +++ b/lib/Target/SystemZ/SystemZAsmPrinter.cpp @@ -26,6 +26,15 @@ using namespace llvm; +// Return an RI instruction like MI with opcode Opcode, but with the +// GR64 register operands turned into GR32s. +static MCInst lowerRILow(const MachineInstr *MI, unsigned Opcode) { + return MCInstBuilder(Opcode) + .addReg(SystemZMC::getRegAsGR32(MI->getOperand(0).getReg())) + .addReg(SystemZMC::getRegAsGR32(MI->getOperand(1).getReg())) + .addImm(MI->getOperand(2).getImm()); +} + void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) { SystemZMCInstLower Lower(Mang, MF->getContext(), *this); MCInst LoweredMI; @@ -55,6 +64,27 @@ void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) { LoweredMI = MCInstBuilder(SystemZ::BR).addReg(SystemZ::R1D); break; + case SystemZ::IILF64: + LoweredMI = MCInstBuilder(SystemZ::IILF) + .addReg(SystemZMC::getRegAsGR32(MI->getOperand(0).getReg())) + .addImm(MI->getOperand(2).getImm()); + break; + +#define LOWER_LOW(NAME) \ + case SystemZ::NAME##64: LoweredMI = lowerRILow(MI, SystemZ::NAME); break + + LOWER_LOW(IILL); + LOWER_LOW(IILH); + LOWER_LOW(NILL); + LOWER_LOW(NILH); + LOWER_LOW(NILF); + LOWER_LOW(OILL); + LOWER_LOW(OILH); + LOWER_LOW(OILF); + LOWER_LOW(XILF); + +#undef LOWER_LOW + default: Lower.lower(MI, LoweredMI); break; diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index eb3dc49..704c4e5 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -2330,11 +2330,11 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, .addReg(RotatedOldVal).addOperand(Src2); if (BitSize < 32) // XILF with the upper BitSize bits set. - BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) + BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize))); else if (BitSize == 32) // XILF with every bit set. - BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) + BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) .addReg(Tmp).addImm(~uint32_t(0)); else { // Use LCGR and add -1 to the result, which is more compact than @@ -2938,96 +2938,96 @@ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { case SystemZ::ATOMIC_LOADW_NR: return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); case SystemZ::ATOMIC_LOADW_NILH: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); case SystemZ::ATOMIC_LOAD_NR: return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); - case SystemZ::ATOMIC_LOAD_NILL32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32); - case SystemZ::ATOMIC_LOAD_NILH32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32); - case SystemZ::ATOMIC_LOAD_NILF32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32); - case SystemZ::ATOMIC_LOAD_NGR: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); case SystemZ::ATOMIC_LOAD_NILL: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); case SystemZ::ATOMIC_LOAD_NILH: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); + case SystemZ::ATOMIC_LOAD_NILF: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); + case SystemZ::ATOMIC_LOAD_NGR: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); + case SystemZ::ATOMIC_LOAD_NILL64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); + case SystemZ::ATOMIC_LOAD_NILH64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); case SystemZ::ATOMIC_LOAD_NIHL: return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64); case SystemZ::ATOMIC_LOAD_NIHH: return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64); - case SystemZ::ATOMIC_LOAD_NILF: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64); + case SystemZ::ATOMIC_LOAD_NILF64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); case SystemZ::ATOMIC_LOAD_NIHF: return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64); case SystemZ::ATOMIC_LOADW_OR: return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); case SystemZ::ATOMIC_LOADW_OILH: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 0); + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); case SystemZ::ATOMIC_LOAD_OR: return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); - case SystemZ::ATOMIC_LOAD_OILL32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL32, 32); - case SystemZ::ATOMIC_LOAD_OILH32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 32); - case SystemZ::ATOMIC_LOAD_OILF32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF32, 32); - case SystemZ::ATOMIC_LOAD_OGR: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); case SystemZ::ATOMIC_LOAD_OILL: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 64); + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); case SystemZ::ATOMIC_LOAD_OILH: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 64); + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); + case SystemZ::ATOMIC_LOAD_OILF: + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); + case SystemZ::ATOMIC_LOAD_OGR: + return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); + case SystemZ::ATOMIC_LOAD_OILL64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); + case SystemZ::ATOMIC_LOAD_OILH64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); case SystemZ::ATOMIC_LOAD_OIHL: return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL, 64); case SystemZ::ATOMIC_LOAD_OIHH: return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH, 64); - case SystemZ::ATOMIC_LOAD_OILF: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 64); + case SystemZ::ATOMIC_LOAD_OILF64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); case SystemZ::ATOMIC_LOAD_OIHF: return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF, 64); case SystemZ::ATOMIC_LOADW_XR: return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); case SystemZ::ATOMIC_LOADW_XILF: - return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 0); + return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); case SystemZ::ATOMIC_LOAD_XR: return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); - case SystemZ::ATOMIC_LOAD_XILF32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 32); + case SystemZ::ATOMIC_LOAD_XILF: + return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); case SystemZ::ATOMIC_LOAD_XGR: return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); - case SystemZ::ATOMIC_LOAD_XILF: - return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 64); + case SystemZ::ATOMIC_LOAD_XILF64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); case SystemZ::ATOMIC_LOAD_XIHF: return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF, 64); case SystemZ::ATOMIC_LOADW_NRi: return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); case SystemZ::ATOMIC_LOADW_NILHi: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0, true); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); case SystemZ::ATOMIC_LOAD_NRi: return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); - case SystemZ::ATOMIC_LOAD_NILL32i: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32, true); - case SystemZ::ATOMIC_LOAD_NILH32i: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32, true); - case SystemZ::ATOMIC_LOAD_NILF32i: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32, true); - case SystemZ::ATOMIC_LOAD_NGRi: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); case SystemZ::ATOMIC_LOAD_NILLi: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64, true); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); case SystemZ::ATOMIC_LOAD_NILHi: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64, true); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); + case SystemZ::ATOMIC_LOAD_NILFi: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); + case SystemZ::ATOMIC_LOAD_NGRi: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); + case SystemZ::ATOMIC_LOAD_NILL64i: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); + case SystemZ::ATOMIC_LOAD_NILH64i: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); case SystemZ::ATOMIC_LOAD_NIHLi: return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64, true); case SystemZ::ATOMIC_LOAD_NIHHi: return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64, true); - case SystemZ::ATOMIC_LOAD_NILFi: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64, true); + case SystemZ::ATOMIC_LOAD_NILF64i: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); case SystemZ::ATOMIC_LOAD_NIHFi: return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64, true); diff --git a/lib/Target/SystemZ/SystemZInstrFormats.td b/lib/Target/SystemZ/SystemZInstrFormats.td index b594f0e..a104329 100644 --- a/lib/Target/SystemZ/SystemZInstrFormats.td +++ b/lib/Target/SystemZ/SystemZInstrFormats.td @@ -1477,3 +1477,19 @@ class Alias<int size, dag outs, dag ins, list<dag> pattern> let isPseudo = 1; let isCodeGenOnly = 1; } + +// An alias of a BinaryRI, but with different register sizes. +class BinaryAliasRI<SDPatternOperator operator, RegisterOperand cls, + Immediate imm> + : Alias<4, (outs cls:$R1), (ins cls:$R1src, imm:$I2), + [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> { + let Constraints = "$R1 = $R1src"; +} + +// An alias of a BinaryRIL, but with different register sizes. +class BinaryAliasRIL<SDPatternOperator operator, RegisterOperand cls, + Immediate imm> + : Alias<6, (outs cls:$R1), (ins cls:$R1src, imm:$I2), + [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> { + let Constraints = "$R1 = $R1src"; +} diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp index d20487a..6d19bdd 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -535,14 +535,14 @@ namespace { static LogicOp interpretAndImmediate(unsigned Opcode) { switch (Opcode) { - case SystemZ::NILL32: return LogicOp(32, 0, 16); - case SystemZ::NILH32: return LogicOp(32, 16, 16); - case SystemZ::NILL: return LogicOp(64, 0, 16); - case SystemZ::NILH: return LogicOp(64, 16, 16); + case SystemZ::NILL: return LogicOp(32, 0, 16); + case SystemZ::NILH: return LogicOp(32, 16, 16); + case SystemZ::NILL64: return LogicOp(64, 0, 16); + case SystemZ::NILH64: return LogicOp(64, 16, 16); case SystemZ::NIHL: return LogicOp(64, 32, 16); case SystemZ::NIHH: return LogicOp(64, 48, 16); - case SystemZ::NILF32: return LogicOp(32, 0, 32); - case SystemZ::NILF: return LogicOp(64, 0, 32); + case SystemZ::NILF: return LogicOp(32, 0, 32); + case SystemZ::NILF64: return LogicOp(64, 0, 32); case SystemZ::NIHF: return LogicOp(64, 32, 32); default: return LogicOp(); } diff --git a/lib/Target/SystemZ/SystemZInstrInfo.td b/lib/Target/SystemZ/SystemZInstrInfo.td index ec59e2d..d2aa276 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.td +++ b/lib/Target/SystemZ/SystemZInstrInfo.td @@ -570,12 +570,10 @@ defm : InsertMem<"inserti8", ICY, GR64, azextloadi8, bdxaddr20pair>; // Insertions of a 16-bit immediate, leaving other bits unaffected. // We don't have or_as_insert equivalents of these operations because // OI is available instead. -let isCodeGenOnly = 1 in { - def IILL32 : BinaryRI<"iill", 0xA53, insertll, GR32, imm32ll16>; - def IILH32 : BinaryRI<"iilh", 0xA52, insertlh, GR32, imm32lh16>; -} -def IILL : BinaryRI<"iill", 0xA53, insertll, GR64, imm64ll16>; -def IILH : BinaryRI<"iilh", 0xA52, insertlh, GR64, imm64lh16>; +def IILL : BinaryRI<"iill", 0xA53, insertll, GR32, imm32ll16>; +def IILH : BinaryRI<"iilh", 0xA52, insertlh, GR32, imm32lh16>; +def IILL64 : BinaryAliasRI<insertll, GR64, imm64ll16>; +def IILH64 : BinaryAliasRI<insertlh, GR64, imm64lh16>; def IIHL : BinaryRI<"iihl", 0xA51, inserthl, GR64, imm64hl16>; def IIHH : BinaryRI<"iihh", 0xA50, inserthh, GR64, imm64hh16>; @@ -583,11 +581,9 @@ def IIHH : BinaryRI<"iihh", 0xA50, inserthh, GR64, imm64hh16>; // full-width move. (We use IILF rather than something like LLILF // for 32-bit moves because IILF leaves the upper 32 bits of the // GR64 unchanged.) -let isCodeGenOnly = 1, isAsCheapAsAMove = 1, isMoveImm = 1, - isReMaterializable = 1 in { - def IILF32 : UnaryRIL<"iilf", 0xC09, bitconvert, GR32, uimm32>; -} -def IILF : BinaryRIL<"iilf", 0xC09, insertlf, GR64, imm64lf32>; +let isAsCheapAsAMove = 1, isMoveImm = 1, isReMaterializable = 1 in + def IILF : UnaryRIL<"iilf", 0xC09, bitconvert, GR32, uimm32>; +def IILF64 : BinaryAliasRIL<insertlf, GR64, imm64lf32>; def IIHF : BinaryRIL<"iihf", 0xC08, inserthf, GR64, imm64hf32>; // An alternative model of inserthf, with the first operand being @@ -730,21 +726,19 @@ let Defs = [CC] in { let isConvertibleToThreeAddress = 1 in { // ANDs of a 16-bit immediate, leaving other bits unaffected. // The CC result only reflects the 16-bit field, not the full register. - let isCodeGenOnly = 1 in { - def NILL32 : BinaryRI<"nill", 0xA57, and, GR32, imm32ll16c>; - def NILH32 : BinaryRI<"nilh", 0xA56, and, GR32, imm32lh16c>; - } - def NILL : BinaryRI<"nill", 0xA57, and, GR64, imm64ll16c>; - def NILH : BinaryRI<"nilh", 0xA56, and, GR64, imm64lh16c>; + def NILL : BinaryRI<"nill", 0xA57, and, GR32, imm32ll16c>; + def NILH : BinaryRI<"nilh", 0xA56, and, GR32, imm32lh16c>; + def NILL64 : BinaryAliasRI<and, GR64, imm64ll16c>; + def NILH64 : BinaryAliasRI<and, GR64, imm64lh16c>; def NIHL : BinaryRI<"nihl", 0xA55, and, GR64, imm64hl16c>; def NIHH : BinaryRI<"nihh", 0xA54, and, GR64, imm64hh16c>; // ANDs of a 32-bit immediate, leaving other bits unaffected. // The CC result only reflects the 32-bit field, which means we can // use it as a zero indicator for i32 operations but not otherwise. - let isCodeGenOnly = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in - def NILF32 : BinaryRIL<"nilf", 0xC0B, and, GR32, uimm32>; - def NILF : BinaryRIL<"nilf", 0xC0B, and, GR64, imm64lf32c>; + let CCValues = 0xC, CompareZeroCCMask = 0x8 in + def NILF : BinaryRIL<"nilf", 0xC0B, and, GR32, uimm32>; + def NILF64 : BinaryAliasRIL<and, GR64, imm64lf32c>; def NIHF : BinaryRIL<"nihf", 0xC0A, and, GR64, imm64hf32c>; } @@ -777,21 +771,19 @@ let Defs = [CC] in { // ORs of a 16-bit immediate, leaving other bits unaffected. // The CC result only reflects the 16-bit field, not the full register. - let isCodeGenOnly = 1 in { - def OILL32 : BinaryRI<"oill", 0xA5B, or, GR32, imm32ll16>; - def OILH32 : BinaryRI<"oilh", 0xA5A, or, GR32, imm32lh16>; - } - def OILL : BinaryRI<"oill", 0xA5B, or, GR64, imm64ll16>; - def OILH : BinaryRI<"oilh", 0xA5A, or, GR64, imm64lh16>; + def OILL : BinaryRI<"oill", 0xA5B, or, GR32, imm32ll16>; + def OILH : BinaryRI<"oilh", 0xA5A, or, GR32, imm32lh16>; + def OILL64 : BinaryAliasRI<or, GR64, imm64ll16>; + def OILH64 : BinaryAliasRI<or, GR64, imm64lh16>; def OIHL : BinaryRI<"oihl", 0xA59, or, GR64, imm64hl16>; def OIHH : BinaryRI<"oihh", 0xA58, or, GR64, imm64hh16>; // ORs of a 32-bit immediate, leaving other bits unaffected. // The CC result only reflects the 32-bit field, which means we can // use it as a zero indicator for i32 operations but not otherwise. - let isCodeGenOnly = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in - def OILF32 : BinaryRIL<"oilf", 0xC0D, or, GR32, uimm32>; - def OILF : BinaryRIL<"oilf", 0xC0D, or, GR64, imm64lf32>; + let CCValues = 0xC, CompareZeroCCMask = 0x8 in + def OILF : BinaryRIL<"oilf", 0xC0D, or, GR32, uimm32>; + def OILF64 : BinaryAliasRIL<or, GR64, imm64lf32>; def OIHF : BinaryRIL<"oihf", 0xC0C, or, GR64, imm64hf32>; // ORs of memory. @@ -824,9 +816,9 @@ let Defs = [CC] in { // XORs of a 32-bit immediate, leaving other bits unaffected. // The CC result only reflects the 32-bit field, which means we can // use it as a zero indicator for i32 operations but not otherwise. - let isCodeGenOnly = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in - def XILF32 : BinaryRIL<"xilf", 0xC07, xor, GR32, uimm32>; - def XILF : BinaryRIL<"xilf", 0xC07, xor, GR64, imm64lf32>; + let CCValues = 0xC, CompareZeroCCMask = 0x8 in + def XILF : BinaryRIL<"xilf", 0xC07, xor, GR32, uimm32>; + def XILF64 : BinaryAliasRIL<xor, GR64, imm64lf32>; def XIHF : BinaryRIL<"xihf", 0xC06, xor, GR64, imm64hf32>; // XORs of memory. @@ -1036,18 +1028,16 @@ let mayLoad = 1, Defs = [CC], Uses = [R0W] in // Test under mask. let Defs = [CC] in { - let isCodeGenOnly = 1 in { - def TMLL32 : CompareRI<"tmll", 0xA71, z_tm_reg, GR32, imm32ll16>; - def TMLH32 : CompareRI<"tmlh", 0xA70, z_tm_reg, GR32, imm32lh16>; - } + def TMLL : CompareRI<"tmll", 0xA71, z_tm_reg, GR32, imm32ll16>; + def TMLH : CompareRI<"tmlh", 0xA70, z_tm_reg, GR32, imm32lh16>; - def TMLL : CompareRI<"tmll", 0xA71, z_tm_reg, GR64, imm64ll16>; - def TMLH : CompareRI<"tmlh", 0xA70, z_tm_reg, GR64, imm64lh16>; def TMHL : CompareRI<"tmhl", 0xA73, z_tm_reg, GR64, imm64hl16>; def TMHH : CompareRI<"tmhh", 0xA72, z_tm_reg, GR64, imm64hh16>; defm TM : CompareSIPair<"tm", 0x91, 0xEB51, z_tm_mem, anyextloadi8, imm32zx8>; } +def : CompareGR64RI<TMLL, z_tm_reg, imm64ll16>; +def : CompareGR64RI<TMLH, z_tm_reg, imm64lh16>; //===----------------------------------------------------------------------===// // Prefetch @@ -1080,58 +1070,58 @@ def ATOMIC_LOAD_SGR : AtomicLoadBinaryReg64<atomic_load_sub_64>; def ATOMIC_LOADW_NR : AtomicLoadWBinaryReg<z_atomic_loadw_and>; def ATOMIC_LOADW_NILH : AtomicLoadWBinaryImm<z_atomic_loadw_and, imm32lh16c>; def ATOMIC_LOAD_NR : AtomicLoadBinaryReg32<atomic_load_and_32>; -def ATOMIC_LOAD_NILL32 : AtomicLoadBinaryImm32<atomic_load_and_32, imm32ll16c>; -def ATOMIC_LOAD_NILH32 : AtomicLoadBinaryImm32<atomic_load_and_32, imm32lh16c>; -def ATOMIC_LOAD_NILF32 : AtomicLoadBinaryImm32<atomic_load_and_32, uimm32>; +def ATOMIC_LOAD_NILL : AtomicLoadBinaryImm32<atomic_load_and_32, imm32ll16c>; +def ATOMIC_LOAD_NILH : AtomicLoadBinaryImm32<atomic_load_and_32, imm32lh16c>; +def ATOMIC_LOAD_NILF : AtomicLoadBinaryImm32<atomic_load_and_32, uimm32>; def ATOMIC_LOAD_NGR : AtomicLoadBinaryReg64<atomic_load_and_64>; -def ATOMIC_LOAD_NILL : AtomicLoadBinaryImm64<atomic_load_and_64, imm64ll16c>; -def ATOMIC_LOAD_NILH : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lh16c>; +def ATOMIC_LOAD_NILL64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64ll16c>; +def ATOMIC_LOAD_NILH64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lh16c>; def ATOMIC_LOAD_NIHL : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hl16c>; def ATOMIC_LOAD_NIHH : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hh16c>; -def ATOMIC_LOAD_NILF : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lf32c>; +def ATOMIC_LOAD_NILF64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lf32c>; def ATOMIC_LOAD_NIHF : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hf32c>; def ATOMIC_LOADW_OR : AtomicLoadWBinaryReg<z_atomic_loadw_or>; def ATOMIC_LOADW_OILH : AtomicLoadWBinaryImm<z_atomic_loadw_or, imm32lh16>; def ATOMIC_LOAD_OR : AtomicLoadBinaryReg32<atomic_load_or_32>; -def ATOMIC_LOAD_OILL32 : AtomicLoadBinaryImm32<atomic_load_or_32, imm32ll16>; -def ATOMIC_LOAD_OILH32 : AtomicLoadBinaryImm32<atomic_load_or_32, imm32lh16>; -def ATOMIC_LOAD_OILF32 : AtomicLoadBinaryImm32<atomic_load_or_32, uimm32>; +def ATOMIC_LOAD_OILL : AtomicLoadBinaryImm32<atomic_load_or_32, imm32ll16>; +def ATOMIC_LOAD_OILH : AtomicLoadBinaryImm32<atomic_load_or_32, imm32lh16>; +def ATOMIC_LOAD_OILF : AtomicLoadBinaryImm32<atomic_load_or_32, uimm32>; def ATOMIC_LOAD_OGR : AtomicLoadBinaryReg64<atomic_load_or_64>; -def ATOMIC_LOAD_OILL : AtomicLoadBinaryImm64<atomic_load_or_64, imm64ll16>; -def ATOMIC_LOAD_OILH : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lh16>; +def ATOMIC_LOAD_OILL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64ll16>; +def ATOMIC_LOAD_OILH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lh16>; def ATOMIC_LOAD_OIHL : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hl16>; def ATOMIC_LOAD_OIHH : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hh16>; -def ATOMIC_LOAD_OILF : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lf32>; +def ATOMIC_LOAD_OILF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lf32>; def ATOMIC_LOAD_OIHF : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hf32>; def ATOMIC_LOADW_XR : AtomicLoadWBinaryReg<z_atomic_loadw_xor>; def ATOMIC_LOADW_XILF : AtomicLoadWBinaryImm<z_atomic_loadw_xor, uimm32>; def ATOMIC_LOAD_XR : AtomicLoadBinaryReg32<atomic_load_xor_32>; -def ATOMIC_LOAD_XILF32 : AtomicLoadBinaryImm32<atomic_load_xor_32, uimm32>; +def ATOMIC_LOAD_XILF : AtomicLoadBinaryImm32<atomic_load_xor_32, uimm32>; def ATOMIC_LOAD_XGR : AtomicLoadBinaryReg64<atomic_load_xor_64>; -def ATOMIC_LOAD_XILF : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64lf32>; +def ATOMIC_LOAD_XILF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64lf32>; def ATOMIC_LOAD_XIHF : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64hf32>; def ATOMIC_LOADW_NRi : AtomicLoadWBinaryReg<z_atomic_loadw_nand>; def ATOMIC_LOADW_NILHi : AtomicLoadWBinaryImm<z_atomic_loadw_nand, imm32lh16c>; def ATOMIC_LOAD_NRi : AtomicLoadBinaryReg32<atomic_load_nand_32>; -def ATOMIC_LOAD_NILL32i : AtomicLoadBinaryImm32<atomic_load_nand_32, +def ATOMIC_LOAD_NILLi : AtomicLoadBinaryImm32<atomic_load_nand_32, imm32ll16c>; -def ATOMIC_LOAD_NILH32i : AtomicLoadBinaryImm32<atomic_load_nand_32, +def ATOMIC_LOAD_NILHi : AtomicLoadBinaryImm32<atomic_load_nand_32, imm32lh16c>; -def ATOMIC_LOAD_NILF32i : AtomicLoadBinaryImm32<atomic_load_nand_32, uimm32>; +def ATOMIC_LOAD_NILFi : AtomicLoadBinaryImm32<atomic_load_nand_32, uimm32>; def ATOMIC_LOAD_NGRi : AtomicLoadBinaryReg64<atomic_load_nand_64>; -def ATOMIC_LOAD_NILLi : AtomicLoadBinaryImm64<atomic_load_nand_64, +def ATOMIC_LOAD_NILL64i : AtomicLoadBinaryImm64<atomic_load_nand_64, imm64ll16c>; -def ATOMIC_LOAD_NILHi : AtomicLoadBinaryImm64<atomic_load_nand_64, +def ATOMIC_LOAD_NILH64i : AtomicLoadBinaryImm64<atomic_load_nand_64, imm64lh16c>; def ATOMIC_LOAD_NIHLi : AtomicLoadBinaryImm64<atomic_load_nand_64, imm64hl16c>; def ATOMIC_LOAD_NIHHi : AtomicLoadBinaryImm64<atomic_load_nand_64, imm64hh16c>; -def ATOMIC_LOAD_NILFi : AtomicLoadBinaryImm64<atomic_load_nand_64, +def ATOMIC_LOAD_NILF64i : AtomicLoadBinaryImm64<atomic_load_nand_64, imm64lf32c>; def ATOMIC_LOAD_NIHFi : AtomicLoadBinaryImm64<atomic_load_nand_64, imm64hf32c>; diff --git a/lib/Target/SystemZ/SystemZPatterns.td b/lib/Target/SystemZ/SystemZPatterns.td index a1344a3..203247c 100644 --- a/lib/Target/SystemZ/SystemZPatterns.td +++ b/lib/Target/SystemZ/SystemZPatterns.td @@ -112,6 +112,14 @@ multiclass CondStores64<Instruction insn, Instruction insninv, uimm8zx4:$valid, uimm8zx4:$cc)>; } +// INSN performs a comparison between a 32-bit register and a constant. +// Record that it is equivalent to comparing the low word of a GR64 with IMM. +class CompareGR64RI<Instruction insn, SDPatternOperator compare, + Immediate imm> + : Pat<(compare GR64:$R1, imm:$I2), + (insn (EXTRACT_SUBREG GR64:$R1, subreg_32bit), + (imm.OperandTransform imm:$I2))>; + // Try to use MVC instruction INSN for a load of type LOAD followed by a store // of the same size. VT is the type of the intermediate (legalized) value and // LENGTH is the number of bytes loaded by LOAD. diff --git a/lib/Target/SystemZ/SystemZShortenInst.cpp b/lib/Target/SystemZ/SystemZShortenInst.cpp index 526ae5c..b480864 100644 --- a/lib/Target/SystemZ/SystemZShortenInst.cpp +++ b/lib/Target/SystemZ/SystemZShortenInst.cpp @@ -119,7 +119,7 @@ bool SystemZShortenInst::processBlock(MachineBasicBlock *MBB) { MBBE = MBB->rend(); MBBI != MBBE; ++MBBI) { MachineInstr &MI = *MBBI; unsigned Opcode = MI.getOpcode(); - if (Opcode == SystemZ::IILF32) + if (Opcode == SystemZ::IILF) Changed |= shortenIIF(MI, LowGPRs, LiveHigh, SystemZ::LLILL, SystemZ::LLILH); unsigned UsedLow = 0; |