aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.cpp90
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.h3
-rw-r--r--lib/Target/SystemZ/SystemZInstrFP.td7
-rw-r--r--lib/Target/SystemZ/SystemZInstrFormats.td13
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.td20
-rw-r--r--lib/Target/SystemZ/SystemZOperators.td24
-rw-r--r--lib/Target/SystemZ/SystemZPatterns.td8
7 files changed, 156 insertions, 9 deletions
diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp
index 0b0dbea..955b88e 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -1696,6 +1696,59 @@ SystemZTargetLowering::emitSelect(MachineInstr *MI,
return JoinMBB;
}
+// Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
+// StoreOpcode is the store to use and Invert says whether the store should
+// happen when the condition is false rather than true.
+MachineBasicBlock *
+SystemZTargetLowering::emitCondStore(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ unsigned StoreOpcode, bool Invert) const {
+ const SystemZInstrInfo *TII = TM.getInstrInfo();
+
+ MachineOperand Base = MI->getOperand(0);
+ int64_t Disp = MI->getOperand(1).getImm();
+ unsigned IndexReg = MI->getOperand(2).getReg();
+ unsigned SrcReg = MI->getOperand(3).getReg();
+ unsigned CCMask = MI->getOperand(4).getImm();
+ DebugLoc DL = MI->getDebugLoc();
+
+ StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
+
+ // Get the condition needed to branch around the store.
+ if (!Invert)
+ CCMask = CCMask ^ SystemZ::CCMASK_ANY;
+
+ MachineBasicBlock *StartMBB = MBB;
+ MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB);
+ MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
+
+ // StartMBB:
+ // BRC CCMask, JoinMBB
+ // # fallthrough to FalseMBB
+ //
+ // The original DAG glues comparisons to their uses, both to ensure
+ // that no CC-clobbering instructions are inserted between them, and
+ // to ensure that comparison results are not reused. This means that
+ // this CondStore is the sole user of any preceding comparison instruction
+ // and that we can try to use a fused compare and branch instead.
+ MBB = StartMBB;
+ if (!convertPrevCompareToBranch(MBB, MI, CCMask, JoinMBB))
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(CCMask).addMBB(JoinMBB);
+ MBB->addSuccessor(JoinMBB);
+ MBB->addSuccessor(FalseMBB);
+
+ // FalseMBB:
+ // store %SrcReg, %Disp(%Index,%Base)
+ // # fallthrough to JoinMBB
+ MBB = FalseMBB;
+ BuildMI(MBB, DL, TII->get(StoreOpcode))
+ .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg);
+ MBB->addSuccessor(JoinMBB);
+
+ MI->eraseFromParent();
+ return JoinMBB;
+}
+
// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
// or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
// performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
@@ -2100,6 +2153,43 @@ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const {
case SystemZ::SelectF128:
return emitSelect(MI, MBB);
+ case SystemZ::CondStore8_32:
+ return emitCondStore(MI, MBB, SystemZ::STC32, false);
+ case SystemZ::CondStore8_32Inv:
+ return emitCondStore(MI, MBB, SystemZ::STC32, true);
+ case SystemZ::CondStore16_32:
+ return emitCondStore(MI, MBB, SystemZ::STH32, false);
+ case SystemZ::CondStore16_32Inv:
+ return emitCondStore(MI, MBB, SystemZ::STH32, true);
+ case SystemZ::CondStore32_32:
+ return emitCondStore(MI, MBB, SystemZ::ST32, false);
+ case SystemZ::CondStore32_32Inv:
+ return emitCondStore(MI, MBB, SystemZ::ST32, true);
+ case SystemZ::CondStore8:
+ return emitCondStore(MI, MBB, SystemZ::STC, false);
+ case SystemZ::CondStore8Inv:
+ return emitCondStore(MI, MBB, SystemZ::STC, true);
+ case SystemZ::CondStore16:
+ return emitCondStore(MI, MBB, SystemZ::STH, false);
+ case SystemZ::CondStore16Inv:
+ return emitCondStore(MI, MBB, SystemZ::STH, true);
+ case SystemZ::CondStore32:
+ return emitCondStore(MI, MBB, SystemZ::ST, false);
+ case SystemZ::CondStore32Inv:
+ return emitCondStore(MI, MBB, SystemZ::ST, true);
+ case SystemZ::CondStore64:
+ return emitCondStore(MI, MBB, SystemZ::STG, false);
+ case SystemZ::CondStore64Inv:
+ return emitCondStore(MI, MBB, SystemZ::STG, true);
+ case SystemZ::CondStoreF32:
+ return emitCondStore(MI, MBB, SystemZ::STE, false);
+ case SystemZ::CondStoreF32Inv:
+ return emitCondStore(MI, MBB, SystemZ::STE, true);
+ case SystemZ::CondStoreF64:
+ return emitCondStore(MI, MBB, SystemZ::STD, false);
+ case SystemZ::CondStoreF64Inv:
+ return emitCondStore(MI, MBB, SystemZ::STD, true);
+
case SystemZ::AEXT128_64:
return emitExt128(MI, MBB, false, SystemZ::subreg_low);
case SystemZ::ZEXT128_32:
diff --git a/lib/Target/SystemZ/SystemZISelLowering.h b/lib/Target/SystemZ/SystemZISelLowering.h
index f48cc4f..f6c49f0 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/lib/Target/SystemZ/SystemZISelLowering.h
@@ -203,6 +203,9 @@ private:
// Implement EmitInstrWithCustomInserter for individual operation types.
MachineBasicBlock *emitSelect(MachineInstr *MI,
MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitCondStore(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned StoreOpcode, bool Invert) const;
MachineBasicBlock *emitExt128(MachineInstr *MI,
MachineBasicBlock *MBB,
bool ClearEven, unsigned SubReg) const;
diff --git a/lib/Target/SystemZ/SystemZInstrFP.td b/lib/Target/SystemZ/SystemZInstrFP.td
index 86ef14c..7499d2f 100644
--- a/lib/Target/SystemZ/SystemZInstrFP.td
+++ b/lib/Target/SystemZ/SystemZInstrFP.td
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
-// Control-flow instructions
+// Select instructions
//===----------------------------------------------------------------------===//
// C's ?: operator for floating-point operands.
@@ -16,6 +16,11 @@ def SelectF32 : SelectWrapper<FP32>;
def SelectF64 : SelectWrapper<FP64>;
def SelectF128 : SelectWrapper<FP128>;
+defm CondStoreF32 : CondStores<FP32, nonvolatile_store,
+ nonvolatile_load, bdxaddr20only>;
+defm CondStoreF64 : CondStores<FP64, nonvolatile_store,
+ nonvolatile_load, bdxaddr20only>;
+
//===----------------------------------------------------------------------===//
// Move instructions
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/SystemZ/SystemZInstrFormats.td b/lib/Target/SystemZ/SystemZInstrFormats.td
index ad050fd..ac0300c 100644
--- a/lib/Target/SystemZ/SystemZInstrFormats.td
+++ b/lib/Target/SystemZ/SystemZInstrFormats.td
@@ -956,6 +956,19 @@ class SelectWrapper<RegisterOperand cls>
let Uses = [CC];
}
+// Stores $new to $addr if $cc is true ("" case) or false (Inv case).
+multiclass CondStores<RegisterOperand cls, SDPatternOperator store,
+ SDPatternOperator load, AddressingMode mode> {
+ let Defs = [CC], Uses = [CC], usesCustomInserter = 1 in {
+ def "" : Pseudo<(outs), (ins mode:$addr, cls:$new, i8imm:$cc),
+ [(store (z_select_ccmask cls:$new, (load mode:$addr),
+ imm:$cc), mode:$addr)]>;
+ def Inv : Pseudo<(outs), (ins mode:$addr, cls:$new, i8imm:$cc),
+ [(store (z_select_ccmask (load mode:$addr), cls:$new,
+ imm:$cc), mode:$addr)]>;
+ }
+}
+
// OPERATOR is ATOMIC_SWAP or an ATOMIC_LOAD_* operation. PAT and OPERAND
// describe the second (non-memory) operand.
class AtomicLoadBinary<SDPatternOperator operator, RegisterOperand cls,
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.td b/lib/Target/SystemZ/SystemZInstrInfo.td
index c9ec6bc..44ff1d0 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -163,9 +163,29 @@ defm AsmJE : IntCondExtendedMnemonic<8, "e", "nlh">;
defm AsmJHE : IntCondExtendedMnemonic<10, "he", "nl">;
defm AsmJLE : IntCondExtendedMnemonic<12, "le", "nh">;
+//===----------------------------------------------------------------------===//
+// Select instructions
+//===----------------------------------------------------------------------===//
+
def Select32 : SelectWrapper<GR32>;
def Select64 : SelectWrapper<GR64>;
+defm CondStore8_32 : CondStores<GR32, nonvolatile_truncstorei8,
+ nonvolatile_anyextloadi8, bdxaddr20only>;
+defm CondStore16_32 : CondStores<GR32, nonvolatile_truncstorei16,
+ nonvolatile_anyextloadi16, bdxaddr20only>;
+defm CondStore32_32 : CondStores<GR32, nonvolatile_store,
+ nonvolatile_load, bdxaddr20only>;
+
+defm CondStore8 : CondStores<GR64, nonvolatile_truncstorei8,
+ nonvolatile_anyextloadi8, bdxaddr20only>;
+defm CondStore16 : CondStores<GR64, nonvolatile_truncstorei16,
+ nonvolatile_anyextloadi16, bdxaddr20only>;
+defm CondStore32 : CondStores<GR64, nonvolatile_truncstorei32,
+ nonvolatile_anyextloadi32, bdxaddr20only>;
+defm CondStore64 : CondStores<GR64, nonvolatile_store,
+ nonvolatile_load, bdxaddr20only>;
+
//===----------------------------------------------------------------------===//
// Call instructions
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/SystemZ/SystemZOperators.td b/lib/Target/SystemZ/SystemZOperators.td
index ab01b25..021824e 100644
--- a/lib/Target/SystemZ/SystemZOperators.td
+++ b/lib/Target/SystemZ/SystemZOperators.td
@@ -120,6 +120,20 @@ def zext32 : PatFrag<(ops node:$src), (zext (i32 node:$src))>;
def loadf32 : PatFrag<(ops node:$src), (f32 (load node:$src))>;
def loadf64 : PatFrag<(ops node:$src), (f64 (load node:$src))>;
+// Extending loads in which the extension type doesn't matter.
+def anyextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getExtensionType() != ISD::NON_EXTLOAD;
+}]>;
+def anyextloadi8 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def anyextloadi16 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def anyextloadi32 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
// Aligned loads.
class AlignedLoad<SDPatternOperator load>
: PatFrag<(ops node:$addr), (load node:$addr), [{
@@ -149,7 +163,10 @@ class NonvolatileLoad<SDPatternOperator load>
LoadSDNode *Load = cast<LoadSDNode>(N);
return !Load->isVolatile();
}]>;
-def nonvolatile_load : NonvolatileLoad<load>;
+def nonvolatile_load : NonvolatileLoad<load>;
+def nonvolatile_anyextloadi8 : NonvolatileLoad<anyextloadi8>;
+def nonvolatile_anyextloadi16 : NonvolatileLoad<anyextloadi16>;
+def nonvolatile_anyextloadi32 : NonvolatileLoad<anyextloadi32>;
// Non-volatile stores.
class NonvolatileStore<SDPatternOperator store>
@@ -157,7 +174,10 @@ class NonvolatileStore<SDPatternOperator store>
StoreSDNode *Store = cast<StoreSDNode>(N);
return !Store->isVolatile();
}]>;
-def nonvolatile_store : NonvolatileStore<store>;
+def nonvolatile_store : NonvolatileStore<store>;
+def nonvolatile_truncstorei8 : NonvolatileStore<truncstorei8>;
+def nonvolatile_truncstorei16 : NonvolatileStore<truncstorei16>;
+def nonvolatile_truncstorei32 : NonvolatileStore<truncstorei32>;
// Insertions.
def inserti8 : PatFrag<(ops node:$src1, node:$src2),
diff --git a/lib/Target/SystemZ/SystemZPatterns.td b/lib/Target/SystemZ/SystemZPatterns.td
index 3689f74..fb6c221 100644
--- a/lib/Target/SystemZ/SystemZPatterns.td
+++ b/lib/Target/SystemZ/SystemZPatterns.td
@@ -50,12 +50,8 @@ class RMWI<SDPatternOperator load, SDPatternOperator operator,
// memory location. IMM is the type of the second operand.
multiclass RMWIByte<SDPatternOperator operator, AddressingMode mode,
Instruction insn> {
- def : RMWI<zextloadi8, operator, truncstorei8, mode, imm32, insn>;
- def : RMWI<zextloadi8, operator, truncstorei8, mode, imm64, insn>;
- def : RMWI<sextloadi8, operator, truncstorei8, mode, imm32, insn>;
- def : RMWI<sextloadi8, operator, truncstorei8, mode, imm64, insn>;
- def : RMWI<extloadi8, operator, truncstorei8, mode, imm32, insn>;
- def : RMWI<extloadi8, operator, truncstorei8, mode, imm64, insn>;
+ def : RMWI<anyextloadi8, operator, truncstorei8, mode, imm32, insn>;
+ def : RMWI<anyextloadi8, operator, truncstorei8, mode, imm64, insn>;
}
// Record that INSN performs insertion TYPE into a register of class CLS.