aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/Target/SystemZ/SystemZISelDAGToDAG.cpp152
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.td163
-rw-r--r--lib/Target/SystemZ/SystemZOperands.td15
-rw-r--r--test/CodeGen/SystemZ/05-MemLoadsStores.ll44
-rw-r--r--test/CodeGen/SystemZ/05-MemLoadsStores16.ll85
-rw-r--r--test/CodeGen/SystemZ/05-MemRegStores.ll6
6 files changed, 353 insertions, 112 deletions
diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index 603cfec..2bc186b 100644
--- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -77,6 +77,10 @@ namespace {
SystemZTargetLowering &Lowering;
const SystemZSubtarget &Subtarget;
+ void getAddressOperands(const SystemZRRIAddressMode &AM,
+ SDValue &Base, SDValue &Disp,
+ SDValue &Index);
+
public:
SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel)
: SelectionDAGISel(TM, OptLevel),
@@ -109,13 +113,16 @@ namespace {
SDValue &Base, SDValue &Disp);
bool SelectAddrRI(const SDValue& Op, SDValue& Addr,
SDValue &Base, SDValue &Disp);
- bool SelectAddrRRI(SDValue Op, SDValue Addr,
- SDValue &Base, SDValue &Disp, SDValue &Index);
+ bool SelectAddrRRI12(SDValue Op, SDValue Addr,
+ SDValue &Base, SDValue &Disp, SDValue &Index);
+ bool SelectAddrRRI20(SDValue Op, SDValue Addr,
+ SDValue &Base, SDValue &Disp, SDValue &Index);
bool SelectLAAddr(SDValue Op, SDValue Addr,
SDValue &Base, SDValue &Disp, SDValue &Index);
SDNode *Select(SDValue Op);
- bool MatchAddress(SDValue N, SystemZRRIAddressMode &AM, unsigned Depth = 0);
+ bool MatchAddress(SDValue N, SystemZRRIAddressMode &AM,
+ bool is12Bit, unsigned Depth = 0);
bool MatchAddressBase(SDValue N, SystemZRRIAddressMode &AM);
#ifndef NDEBUG
@@ -155,24 +162,26 @@ static bool isImmSExt20(SDValue Op, int64_t &Imm) {
return isImmSExt20(Op.getNode(), Imm);
}
-/// isImmSExt12 - This method tests to see if the node is either a 32-bit
+/// isImmZExt12 - This method tests to see if the node is either a 32-bit
/// or 64-bit immediate, and if the value can be accurately represented as a
/// zero extension from a 12-bit value. If so, this returns true and the
/// immediate.
-static bool isImmZExt12(SDNode *N, uint64_t &Imm) {
- if (N->getOpcode() != ISD::Constant)
- return false;
-
- uint64_t Val = cast<ConstantSDNode>(N)->getZExtValue();
- if (Val <= 0xFFF) {
+static bool isImmZExt12(int64_t Val, int64_t &Imm) {
+ if (Val >= 0 && Val <= 0xFFF) {
Imm = Val;
return true;
}
-
return false;
}
-static bool isImmZExt12(SDValue Op, uint64_t &Imm) {
+static bool isImmZExt12(SDNode *N, int64_t &Imm) {
+ if (N->getOpcode() != ISD::Constant)
+ return false;
+
+ return isImmZExt12(cast<ConstantSDNode>(N)->getSExtValue(), Imm);
+}
+
+static bool isImmZExt12(SDValue Op, int64_t &Imm) {
return isImmZExt12(Op.getNode(), Imm);
}
@@ -185,7 +194,7 @@ bool SystemZDAGToDAGISel::SelectAddrRI32(const SDValue& Op, SDValue& Addr,
MVT VT = Addr.getValueType();
if (Addr.getOpcode() == ISD::ADD) {
- uint64_t Imm = 0;
+ int64_t Imm = 0;
if (isImmZExt12(Addr.getOperand(1), Imm)) {
Disp = CurDAG->getTargetConstant(Imm, MVT::i64);
if (FrameIndexSDNode *FI =
@@ -197,7 +206,7 @@ bool SystemZDAGToDAGISel::SelectAddrRI32(const SDValue& Op, SDValue& Addr,
return true; // [r+i]
}
} else if (Addr.getOpcode() == ISD::OR) {
- uint64_t Imm = 0;
+ int64_t Imm = 0;
if (isImmZExt12(Addr.getOperand(1), Imm)) {
// If this is an or of disjoint bitfields, we can codegen this as an add
// (for better address arithmetic) if the LHS and RHS of the OR are
@@ -221,7 +230,7 @@ bool SystemZDAGToDAGISel::SelectAddrRI32(const SDValue& Op, SDValue& Addr,
// If this address fits entirely in a 12-bit zext immediate field, codegen
// this as "d(r0)"
- uint64_t Imm;
+ int64_t Imm;
if (isImmZExt12(CN, Imm)) {
Disp = CurDAG->getTargetConstant(Imm, MVT::i64);
Base = CurDAG->getRegister(0, VT);
@@ -302,7 +311,7 @@ bool SystemZDAGToDAGISel::SelectAddrRI(const SDValue& Op, SDValue& Addr,
/// returning true if it cannot be done. This just pattern matches for the
/// addressing mode.
bool SystemZDAGToDAGISel::MatchAddress(SDValue N, SystemZRRIAddressMode &AM,
- unsigned Depth) {
+ bool is12Bit, unsigned Depth) {
DebugLoc dl = N.getDebugLoc();
DOUT << "MatchAddress: "; DEBUG(AM.dump());
// Limit recursion.
@@ -317,7 +326,10 @@ bool SystemZDAGToDAGISel::MatchAddress(SDValue N, SystemZRRIAddressMode &AM,
case ISD::Constant: {
int64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
int64_t Imm;
- if (isImmSExt20(AM.Disp + Val, Imm)) {
+ bool Match = (is12Bit ?
+ isImmZExt12(AM.Disp + Val, Imm) :
+ isImmSExt20(AM.Disp + Val, Imm));
+ if (Match) {
AM.Disp = Imm;
return false;
}
@@ -325,8 +337,8 @@ bool SystemZDAGToDAGISel::MatchAddress(SDValue N, SystemZRRIAddressMode &AM,
}
case ISD::FrameIndex:
- if (AM.BaseType == SystemZRRIAddressMode::RegBase
- && AM.Base.Reg.getNode() == 0) {
+ if (AM.BaseType == SystemZRRIAddressMode::RegBase &&
+ AM.Base.Reg.getNode() == 0) {
AM.BaseType = SystemZRRIAddressMode::FrameIndexBase;
AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
return false;
@@ -343,7 +355,7 @@ bool SystemZDAGToDAGISel::MatchAddress(SDValue N, SystemZRRIAddressMode &AM,
// Test if the LHS of the sub can be folded.
SystemZRRIAddressMode Backup = AM;
- if (MatchAddress(N.getNode()->getOperand(0), AM, Depth+1)) {
+ if (MatchAddress(N.getNode()->getOperand(0), AM, is12Bit, Depth+1)) {
AM = Backup;
break;
}
@@ -383,12 +395,12 @@ bool SystemZDAGToDAGISel::MatchAddress(SDValue N, SystemZRRIAddressMode &AM,
case ISD::ADD: {
SystemZRRIAddressMode Backup = AM;
- if (!MatchAddress(N.getNode()->getOperand(0), AM, Depth+1) &&
- !MatchAddress(N.getNode()->getOperand(1), AM, Depth+1))
+ if (!MatchAddress(N.getNode()->getOperand(0), AM, is12Bit, Depth+1) &&
+ !MatchAddress(N.getNode()->getOperand(1), AM, is12Bit, Depth+1))
return false;
AM = Backup;
- if (!MatchAddress(N.getNode()->getOperand(1), AM, Depth+1) &&
- !MatchAddress(N.getNode()->getOperand(0), AM, Depth+1))
+ if (!MatchAddress(N.getNode()->getOperand(1), AM, is12Bit, Depth+1) &&
+ !MatchAddress(N.getNode()->getOperand(0), AM, is12Bit, Depth+1))
return false;
AM = Backup;
@@ -410,10 +422,13 @@ bool SystemZDAGToDAGISel::MatchAddress(SDValue N, SystemZRRIAddressMode &AM,
SystemZRRIAddressMode Backup = AM;
int64_t Offset = CN->getSExtValue();
int64_t Imm;
- // Start with the LHS as an addr mode.
- if (!MatchAddress(N.getOperand(0), AM, Depth+1) &&
- // The resultant disp must fit in 20-bits.
- isImmSExt20(AM.Disp + Offset, Imm) &&
+ bool MatchOffset = (is12Bit ?
+ isImmZExt12(AM.Disp + Offset, Imm) :
+ isImmSExt20(AM.Disp + Offset, Imm));
+ // The resultant disp must fit in 12 or 20-bits.
+ if (MatchOffset &&
+ // LHS should be an addr mode.
+ !MatchAddress(N.getOperand(0), AM, is12Bit, Depth+1) &&
// Check to see if the LHS & C is zero.
CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
AM.Disp = Imm;
@@ -449,9 +464,69 @@ bool SystemZDAGToDAGISel::MatchAddressBase(SDValue N,
return false;
}
+void SystemZDAGToDAGISel::getAddressOperands(const SystemZRRIAddressMode &AM,
+ SDValue &Base, SDValue &Disp,
+ SDValue &Index) {
+ if (AM.BaseType == SystemZRRIAddressMode::RegBase)
+ Base = AM.Base.Reg;
+ else
+ Base = CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy());
+ Index = AM.IndexReg;
+ Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i64);
+}
+
+/// Returns true if the address can be represented by a base register plus
+/// index register plus an unsigned 12-bit displacement [base + idx + imm].
+bool SystemZDAGToDAGISel::SelectAddrRRI12(SDValue Op, SDValue Addr,
+ SDValue &Base, SDValue &Disp, SDValue &Index) {
+ SystemZRRIAddressMode AM20, AM12;
+ bool Done = false;
+
+ if (!Addr.hasOneUse()) {
+ unsigned Opcode = Addr.getOpcode();
+ if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex) {
+ // If we are able to fold N into addressing mode, then we'll allow it even
+ // if N has multiple uses. In general, addressing computation is used as
+ // addresses by all of its uses. But watch out for CopyToReg uses, that
+ // means the address computation is liveout. It will be computed by a LA
+ // so we want to avoid computing the address twice.
+ for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
+ UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
+ if (UI->getOpcode() == ISD::CopyToReg) {
+ MatchAddressBase(Addr, AM12);
+ Done = true;
+ break;
+ }
+ }
+ }
+ }
+ if (!Done && MatchAddress(Addr, AM12, /* is12Bit */ true))
+ return false;
+
+ // Check, whether we can match stuff using 20-bit displacements
+ if (!Done && !MatchAddress(Addr, AM20, /* is12Bit */ false))
+ if (AM12.Disp == 0 && AM20.Disp != 0)
+ return false;
+
+ DOUT << "MatchAddress (final): "; DEBUG(AM12.dump());
+
+ MVT VT = Addr.getValueType();
+ if (AM12.BaseType == SystemZRRIAddressMode::RegBase) {
+ if (!AM12.Base.Reg.getNode())
+ AM12.Base.Reg = CurDAG->getRegister(0, VT);
+ }
+
+ if (!AM12.IndexReg.getNode())
+ AM12.IndexReg = CurDAG->getRegister(0, VT);
+
+ getAddressOperands(AM12, Base, Disp, Index);
+
+ return true;
+}
+
/// Returns true if the address can be represented by a base register plus
/// index register plus a signed 20-bit displacement [base + idx + imm].
-bool SystemZDAGToDAGISel::SelectAddrRRI(SDValue Op, SDValue Addr,
+bool SystemZDAGToDAGISel::SelectAddrRRI20(SDValue Op, SDValue Addr,
SDValue &Base, SDValue &Disp, SDValue &Index) {
SystemZRRIAddressMode AM;
bool Done = false;
@@ -474,7 +549,7 @@ bool SystemZDAGToDAGISel::SelectAddrRRI(SDValue Op, SDValue Addr,
}
}
}
- if (!Done && MatchAddress(Addr, AM))
+ if (!Done && MatchAddress(Addr, AM, /* is12Bit */ false))
return false;
DOUT << "MatchAddress (final): "; DEBUG(AM.dump());
@@ -488,12 +563,7 @@ bool SystemZDAGToDAGISel::SelectAddrRRI(SDValue Op, SDValue Addr,
if (!AM.IndexReg.getNode())
AM.IndexReg = CurDAG->getRegister(0, VT);
- if (AM.BaseType == SystemZRRIAddressMode::RegBase)
- Base = AM.Base.Reg;
- else
- Base = CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy());
- Index = AM.IndexReg;
- Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i64);
+ getAddressOperands(AM, Base, Disp, Index);
return true;
}
@@ -504,7 +574,7 @@ bool SystemZDAGToDAGISel::SelectLAAddr(SDValue Op, SDValue Addr,
SDValue &Base, SDValue &Disp, SDValue &Index) {
SystemZRRIAddressMode AM;
- if (MatchAddress(Addr, AM))
+ if (MatchAddress(Addr, AM, false))
return false;
MVT VT = Addr.getValueType();
@@ -526,13 +596,7 @@ bool SystemZDAGToDAGISel::SelectLAAddr(SDValue Op, SDValue Addr,
Complexity += 1;
if (Complexity > 2) {
- if (AM.BaseType == SystemZRRIAddressMode::RegBase)
- Base = AM.Base.Reg;
- else
- Base = CurDAG->getTargetFrameIndex(AM.Base.FrameIndex,
- TLI.getPointerTy());
- Index = AM.IndexReg;
- Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i64);
+ getAddressOperands(AM, Base, Disp, Index);
return true;
}
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.td b/lib/Target/SystemZ/SystemZInstrInfo.td
index 688cb0a..845b5f0 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -222,21 +222,27 @@ def MOV64rihi32 : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
}
let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in {
-def MOV32rm : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
- "ly\t{$dst, $src}",
- [(set GR32:$dst, (load rriaddr:$src))]>;
-def MOV64rm : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
- "lg\t{$dst, $src}",
- [(set GR64:$dst, (load rriaddr:$src))]>;
+def MOV32rm : Pseudo<(outs GR32:$dst), (ins rriaddr12:$src),
+ "l\t{$dst, $src}",
+ [(set GR32:$dst, (load rriaddr12:$src))]>;
+def MOV32rmy : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
+ "ly\t{$dst, $src}",
+ [(set GR32:$dst, (load rriaddr:$src))]>;
+def MOV64rm : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
+ "lg\t{$dst, $src}",
+ [(set GR64:$dst, (load rriaddr:$src))]>;
}
-def MOV32mr : Pseudo<(outs), (ins rriaddr:$dst, GR32:$src),
- "sty\t{$src, $dst}",
- [(store GR32:$src, rriaddr:$dst)]>;
-def MOV64mr : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
- "stg\t{$src, $dst}",
- [(store GR64:$src, rriaddr:$dst)]>;
+def MOV32mr : Pseudo<(outs), (ins rriaddr12:$dst, GR32:$src),
+ "st\t{$src, $dst}",
+ [(store GR32:$src, rriaddr12:$dst)]>;
+def MOV32mry : Pseudo<(outs), (ins rriaddr:$dst, GR32:$src),
+ "sty\t{$src, $dst}",
+ [(store GR32:$src, rriaddr:$dst)]>;
+def MOV64mr : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
+ "stg\t{$src, $dst}",
+ [(store GR64:$src, rriaddr:$dst)]>;
// FIXME: displacements here are really 12 bit, not 20!
def MOV8mi : Pseudo<(outs), (ins riaddr:$dst, i32i8imm:$src),
@@ -271,21 +277,24 @@ def MOVSX64rr16 : Pseudo<(outs GR64:$dst), (ins GR64:$src),
[(set GR64:$dst, (sext_inreg GR64:$src, i16))]>;
// extloads
-def MOVSX32rm8 : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
- "lb\t{$dst, $src}",
- [(set GR32:$dst, (sextloadi32i8 rriaddr:$src))]>;
-def MOVSX32rm16 : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
+def MOVSX32rm8 : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
+ "lb\t{$dst, $src}",
+ [(set GR32:$dst, (sextloadi32i8 rriaddr:$src))]>;
+def MOVSX32rm16 : Pseudo<(outs GR32:$dst), (ins rriaddr12:$src),
+ "lh\t{$dst, $src}",
+ [(set GR32:$dst, (sextloadi32i16 rriaddr12:$src))]>;
+def MOVSX32rm16y : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
"lhy\t{$dst, $src}",
[(set GR32:$dst, (sextloadi32i16 rriaddr:$src))]>;
-def MOVSX64rm8 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
- "lgb\t{$dst, $src}",
- [(set GR64:$dst, (sextloadi64i8 rriaddr:$src))]>;
-def MOVSX64rm16 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
- "lgh\t{$dst, $src}",
- [(set GR64:$dst, (sextloadi64i16 rriaddr:$src))]>;
-def MOVSX64rm32 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
- "lgf\t{$dst, $src}",
- [(set GR64:$dst, (sextloadi64i32 rriaddr:$src))]>;
+def MOVSX64rm8 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
+ "lgb\t{$dst, $src}",
+ [(set GR64:$dst, (sextloadi64i8 rriaddr:$src))]>;
+def MOVSX64rm16 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
+ "lgh\t{$dst, $src}",
+ [(set GR64:$dst, (sextloadi64i16 rriaddr:$src))]>;
+def MOVSX64rm32 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
+ "lgf\t{$dst, $src}",
+ [(set GR64:$dst, (sextloadi64i32 rriaddr:$src))]>;
def MOVZX32rm8 : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
"llc\t{$dst, $src}",
@@ -304,26 +313,45 @@ def MOVZX64rm32 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
[(set GR64:$dst, (zextloadi64i32 rriaddr:$src))]>;
// truncstores
-// FIXME: Implement 12-bit displacement stuff someday
-def MOV32m8r : Pseudo<(outs), (ins rriaddr:$dst, GR32:$src),
- "stcy\t{$src, $dst}",
- [(truncstorei8 GR32:$src, rriaddr:$dst)]>;
+def MOV32m8r : Pseudo<(outs), (ins rriaddr12:$dst, GR32:$src),
+ "stc\t{$src, $dst}",
+ [(truncstorei8 GR32:$src, rriaddr12:$dst)]>;
-def MOV32m16r : Pseudo<(outs), (ins rriaddr:$dst, GR32:$src),
- "sthy\t{$src, $dst}",
- [(truncstorei16 GR32:$src, rriaddr:$dst)]>;
+def MOV32m8ry : Pseudo<(outs), (ins rriaddr:$dst, GR32:$src),
+ "stcy\t{$src, $dst}",
+ [(truncstorei8 GR32:$src, rriaddr:$dst)]>;
-def MOV64m8r : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
- "stcy\t{$src, $dst}",
- [(truncstorei8 GR64:$src, rriaddr:$dst)]>;
+def MOV32m16r : Pseudo<(outs), (ins rriaddr12:$dst, GR32:$src),
+ "sth\t{$src, $dst}",
+ [(truncstorei16 GR32:$src, rriaddr12:$dst)]>;
-def MOV64m16r : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
- "sthy\t{$src, $dst}",
- [(truncstorei16 GR64:$src, rriaddr:$dst)]>;
+def MOV32m16ry : Pseudo<(outs), (ins rriaddr:$dst, GR32:$src),
+ "sthy\t{$src, $dst}",
+ [(truncstorei16 GR32:$src, rriaddr:$dst)]>;
-def MOV64m32r : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
- "sty\t{$src, $dst}",
- [(truncstorei32 GR64:$src, rriaddr:$dst)]>;
+def MOV64m8r : Pseudo<(outs), (ins rriaddr12:$dst, GR64:$src),
+ "stc\t{$src, $dst}",
+ [(truncstorei8 GR64:$src, rriaddr12:$dst)]>;
+
+def MOV64m8ry : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
+ "stcy\t{$src, $dst}",
+ [(truncstorei8 GR64:$src, rriaddr:$dst)]>;
+
+def MOV64m16r : Pseudo<(outs), (ins rriaddr12:$dst, GR64:$src),
+ "sth\t{$src, $dst}",
+ [(truncstorei16 GR64:$src, rriaddr12:$dst)]>;
+
+def MOV64m16ry : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
+ "sthy\t{$src, $dst}",
+ [(truncstorei16 GR64:$src, rriaddr:$dst)]>;
+
+def MOV64m32r : Pseudo<(outs), (ins rriaddr12:$dst, GR64:$src),
+ "st\t{$src, $dst}",
+ [(truncstorei32 GR64:$src, rriaddr12:$dst)]>;
+
+def MOV64m32ry : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
+ "sty\t{$src, $dst}",
+ [(truncstorei32 GR64:$src, rriaddr:$dst)]>;
// multiple regs moves
// FIXME: should we use multiple arg nodes?
@@ -537,12 +565,15 @@ def MUL64ri32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, s32imm64:$src2),
[(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>,
Requires<[IsZ10]>;
-def MUL32rm : Pseudo<(outs GR32:$dst), (ins GR32:$src1, rriaddr:$src2),
- "msy\t{$dst, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, (load rriaddr:$src2)))]>;
-def MUL64rm : Pseudo<(outs GR64:$dst), (ins GR64:$src1, rriaddr:$src2),
- "msg\t{$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, (load rriaddr:$src2)))]>;
+def MUL32rm : Pseudo<(outs GR32:$dst), (ins GR32:$src1, rriaddr12:$src2),
+ "ms\t{$dst, $src2}",
+ [(set GR32:$dst, (mul GR32:$src1, (load rriaddr12:$src2)))]>;
+def MUL32rmy : Pseudo<(outs GR32:$dst), (ins GR32:$src1, rriaddr:$src2),
+ "msy\t{$dst, $src2}",
+ [(set GR32:$dst, (mul GR32:$src1, (load rriaddr:$src2)))]>;
+def MUL64rm : Pseudo<(outs GR64:$dst), (ins GR64:$src1, rriaddr:$src2),
+ "msg\t{$dst, $src2}",
+ [(set GR64:$dst, (mul GR64:$src1, (load rriaddr:$src2)))]>;
def MULSX64rr32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR32:$src2),
"msgfr\t{$dst, $src2}",
@@ -638,14 +669,18 @@ def CMP64ri32 : Pseudo<(outs), (ins GR64:$src1, s32imm64:$src2),
[(SystemZcmp GR64:$src1, i64immSExt32:$src2),
(implicit PSW)]>;
-def CMP32rm : Pseudo<(outs), (ins GR32:$src1, rriaddr:$src2),
- "cy\t$src1, $src2",
- [(SystemZcmp GR32:$src1, (load rriaddr:$src2)),
- (implicit PSW)]>;
-def CMP64rm : Pseudo<(outs), (ins GR64:$src1, rriaddr:$src2),
- "cg\t$src1, $src2",
- [(SystemZcmp GR64:$src1, (load rriaddr:$src2)),
+def CMP32rm : Pseudo<(outs), (ins GR32:$src1, rriaddr12:$src2),
+ "c\t$src1, $src2",
+ [(SystemZcmp GR32:$src1, (load rriaddr12:$src2)),
(implicit PSW)]>;
+def CMP32rmy : Pseudo<(outs), (ins GR32:$src1, rriaddr:$src2),
+ "cy\t$src1, $src2",
+ [(SystemZcmp GR32:$src1, (load rriaddr:$src2)),
+ (implicit PSW)]>;
+def CMP64rm : Pseudo<(outs), (ins GR64:$src1, rriaddr:$src2),
+ "cg\t$src1, $src2",
+ [(SystemZcmp GR64:$src1, (load rriaddr:$src2)),
+ (implicit PSW)]>;
def UCMP32rr : Pseudo<(outs), (ins GR32:$src1, GR32:$src2),
"clr\t$src1, $src2",
@@ -662,14 +697,18 @@ def UCMP64ri32 : Pseudo<(outs), (ins GR64:$src1, i64i32imm:$src2),
[(SystemZucmp GR64:$src1, i64immZExt32:$src2),
(implicit PSW)]>;
-def UCMP32rm : Pseudo<(outs), (ins GR32:$src1, rriaddr:$src2),
- "cly\t$src1, $src2",
- [(SystemZucmp GR32:$src1, (load rriaddr:$src2)),
- (implicit PSW)]>;
-def UCMP64rm : Pseudo<(outs), (ins GR64:$src1, rriaddr:$src2),
- "clg\t$src1, $src2",
- [(SystemZucmp GR64:$src1, (load rriaddr:$src2)),
- (implicit PSW)]>;
+def UCMP32rm : Pseudo<(outs), (ins GR32:$src1, rriaddr12:$src2),
+ "cl\t$src1, $src2",
+ [(SystemZucmp GR32:$src1, (load rriaddr12:$src2)),
+ (implicit PSW)]>;
+def UCMP32rmy : Pseudo<(outs), (ins GR32:$src1, rriaddr:$src2),
+ "cly\t$src1, $src2",
+ [(SystemZucmp GR32:$src1, (load rriaddr:$src2)),
+ (implicit PSW)]>;
+def UCMP64rm : Pseudo<(outs), (ins GR64:$src1, rriaddr:$src2),
+ "clg\t$src1, $src2",
+ [(SystemZucmp GR64:$src1, (load rriaddr:$src2)),
+ (implicit PSW)]>;
def CMPSX64rr32 : Pseudo<(outs), (ins GR64:$src1, GR32:$src2),
"cgfr\t$src1, $src2",
diff --git a/lib/Target/SystemZ/SystemZOperands.td b/lib/Target/SystemZ/SystemZOperands.td
index 446e426..7ac6cc2 100644
--- a/lib/Target/SystemZ/SystemZOperands.td
+++ b/lib/Target/SystemZ/SystemZOperands.td
@@ -214,10 +214,14 @@ def i64i32imm : Operand<i64>;
// Branch targets have OtherVT type.
def brtarget : Operand<OtherVT>;
-// Unigned i12
+// Unsigned i12
def u12imm : Operand<i32> {
- let PrintMethod = "printU16ImmOperand";
+ let PrintMethod = "printU12ImmOperand";
}
+def u12imm64 : Operand<i64> {
+ let PrintMethod = "printU12ImmOperand";
+}
+
// Signed i16
def s16imm : Operand<i32> {
let PrintMethod = "printS16ImmOperand";
@@ -262,8 +266,13 @@ def riaddr : Operand<i64>,
//===----------------------------------------------------------------------===//
// rriaddr := reg + reg + imm
+def rriaddr12 : Operand<i64>,
+ ComplexPattern<i64, 3, "SelectAddrRRI12", [], []> {
+ let PrintMethod = "printRRIAddrOperand";
+ let MIOperandInfo = (ops ADDR64:$base, u12imm64:$disp, ADDR64:$index);
+}
def rriaddr : Operand<i64>,
- ComplexPattern<i64, 3, "SelectAddrRRI", [], []> {
+ ComplexPattern<i64, 3, "SelectAddrRRI20", [], []> {
let PrintMethod = "printRRIAddrOperand";
let MIOperandInfo = (ops ADDR64:$base, s20imm64:$disp, ADDR64:$index);
}
diff --git a/test/CodeGen/SystemZ/05-MemLoadsStores.ll b/test/CodeGen/SystemZ/05-MemLoadsStores.ll
new file mode 100644
index 0000000..6eb2ddb
--- /dev/null
+++ b/test/CodeGen/SystemZ/05-MemLoadsStores.ll
@@ -0,0 +1,44 @@
+; RUN: llvm-as < %s | llc | grep ly | count 2
+; RUN: llvm-as < %s | llc | grep sty | count 2
+; RUN: llvm-as < %s | llc | grep {l.%} | count 2
+; RUN: llvm-as < %s | llc | grep {st.%} | count 2
+
+target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
+target triple = "s390x-linux"
+
+define void @foo1(i32* nocapture %foo, i32* nocapture %bar) nounwind {
+entry:
+ %tmp1 = load i32* %foo ; <i32> [#uses=1]
+ store i32 %tmp1, i32* %bar
+ ret void
+}
+
+define void @foo2(i32* nocapture %foo, i32* nocapture %bar, i64 %idx) nounwind {
+entry:
+ %add.ptr = getelementptr i32* %foo, i64 1 ; <i32*> [#uses=1]
+ %tmp1 = load i32* %add.ptr ; <i32> [#uses=1]
+ %add.ptr3.sum = add i64 %idx, 1 ; <i64> [#uses=1]
+ %add.ptr5 = getelementptr i32* %bar, i64 %add.ptr3.sum ; <i32*> [#uses=1]
+ store i32 %tmp1, i32* %add.ptr5
+ ret void
+}
+
+define void @foo3(i32* nocapture %foo, i32* nocapture %bar, i64 %idx) nounwind {
+entry:
+ %sub.ptr = getelementptr i32* %foo, i64 -1 ; <i32*> [#uses=1]
+ %tmp1 = load i32* %sub.ptr ; <i32> [#uses=1]
+ %sub.ptr3.sum = add i64 %idx, -1 ; <i64> [#uses=1]
+ %add.ptr = getelementptr i32* %bar, i64 %sub.ptr3.sum ; <i32*> [#uses=1]
+ store i32 %tmp1, i32* %add.ptr
+ ret void
+}
+
+define void @foo4(i32* nocapture %foo, i32* nocapture %bar, i64 %idx) nounwind {
+entry:
+ %add.ptr = getelementptr i32* %foo, i64 8192 ; <i32*> [#uses=1]
+ %tmp1 = load i32* %add.ptr ; <i32> [#uses=1]
+ %add.ptr3.sum = add i64 %idx, 8192 ; <i64> [#uses=1]
+ %add.ptr5 = getelementptr i32* %bar, i64 %add.ptr3.sum ; <i32*> [#uses=1]
+ store i32 %tmp1, i32* %add.ptr5
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/05-MemLoadsStores16.ll b/test/CodeGen/SystemZ/05-MemLoadsStores16.ll
new file mode 100644
index 0000000..7f83b1a
--- /dev/null
+++ b/test/CodeGen/SystemZ/05-MemLoadsStores16.ll
@@ -0,0 +1,85 @@
+; RUN: llvm-as < %s | llc | grep {sthy.%} | count 2
+; RUN: llvm-as < %s | llc | grep {lhy.%} | count 2
+; RUN: llvm-as < %s | llc | grep {lh.%} | count 6
+; RUN: llvm-as < %s | llc | grep {sth.%} | count 2
+
+target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
+target triple = "s390x-linux"
+
+define void @foo1(i16* nocapture %foo, i16* nocapture %bar) nounwind {
+entry:
+ %tmp1 = load i16* %foo ; <i16> [#uses=1]
+ store i16 %tmp1, i16* %bar
+ ret void
+}
+
+define void @foo2(i16* nocapture %foo, i16* nocapture %bar, i64 %idx) nounwind {
+entry:
+ %add.ptr = getelementptr i16* %foo, i64 1 ; <i16*> [#uses=1]
+ %tmp1 = load i16* %add.ptr ; <i16> [#uses=1]
+ %add.ptr3.sum = add i64 %idx, 1 ; <i64> [#uses=1]
+ %add.ptr5 = getelementptr i16* %bar, i64 %add.ptr3.sum ; <i16*> [#uses=1]
+ store i16 %tmp1, i16* %add.ptr5
+ ret void
+}
+
+define void @foo3(i16* nocapture %foo, i16* nocapture %bar, i64 %idx) nounwind {
+entry:
+ %sub.ptr = getelementptr i16* %foo, i64 -1 ; <i16*> [#uses=1]
+ %tmp1 = load i16* %sub.ptr ; <i16> [#uses=1]
+ %sub.ptr3.sum = add i64 %idx, -1 ; <i64> [#uses=1]
+ %add.ptr = getelementptr i16* %bar, i64 %sub.ptr3.sum ; <i16*> [#uses=1]
+ store i16 %tmp1, i16* %add.ptr
+ ret void
+}
+
+define void @foo4(i16* nocapture %foo, i16* nocapture %bar, i64 %idx) nounwind {
+entry:
+ %add.ptr = getelementptr i16* %foo, i64 8192 ; <i16*> [#uses=1]
+ %tmp1 = load i16* %add.ptr ; <i16> [#uses=1]
+ %add.ptr3.sum = add i64 %idx, 8192 ; <i64> [#uses=1]
+ %add.ptr5 = getelementptr i16* %bar, i64 %add.ptr3.sum ; <i16*> [#uses=1]
+ store i16 %tmp1, i16* %add.ptr5
+ ret void
+}
+
+define void @foo5(i16* nocapture %foo, i32* nocapture %bar) nounwind {
+entry:
+ %tmp1 = load i16* %foo ; <i16> [#uses=1]
+ %conv = sext i16 %tmp1 to i32 ; <i32> [#uses=1]
+ store i32 %conv, i32* %bar
+ ret void
+}
+
+define void @foo6(i16* nocapture %foo, i32* nocapture %bar, i64 %idx) nounwind {
+entry:
+ %add.ptr = getelementptr i16* %foo, i64 1 ; <i16*> [#uses=1]
+ %tmp1 = load i16* %add.ptr ; <i16> [#uses=1]
+ %conv = sext i16 %tmp1 to i32 ; <i32> [#uses=1]
+ %add.ptr3.sum = add i64 %idx, 1 ; <i64> [#uses=1]
+ %add.ptr5 = getelementptr i32* %bar, i64 %add.ptr3.sum ; <i32*> [#uses=1]
+ store i32 %conv, i32* %add.ptr5
+ ret void
+}
+
+define void @foo7(i16* nocapture %foo, i32* nocapture %bar, i64 %idx) nounwind {
+entry:
+ %sub.ptr = getelementptr i16* %foo, i64 -1 ; <i16*> [#uses=1]
+ %tmp1 = load i16* %sub.ptr ; <i16> [#uses=1]
+ %conv = sext i16 %tmp1 to i32 ; <i32> [#uses=1]
+ %sub.ptr3.sum = add i64 %idx, -1 ; <i64> [#uses=1]
+ %add.ptr = getelementptr i32* %bar, i64 %sub.ptr3.sum ; <i32*> [#uses=1]
+ store i32 %conv, i32* %add.ptr
+ ret void
+}
+
+define void @foo8(i16* nocapture %foo, i32* nocapture %bar, i64 %idx) nounwind {
+entry:
+ %add.ptr = getelementptr i16* %foo, i64 8192 ; <i16*> [#uses=1]
+ %tmp1 = load i16* %add.ptr ; <i16> [#uses=1]
+ %conv = sext i16 %tmp1 to i32 ; <i32> [#uses=1]
+ %add.ptr3.sum = add i64 %idx, 8192 ; <i64> [#uses=1]
+ %add.ptr5 = getelementptr i32* %bar, i64 %add.ptr3.sum ; <i32*> [#uses=1]
+ store i32 %conv, i32* %add.ptr5
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/05-MemRegStores.ll b/test/CodeGen/SystemZ/05-MemRegStores.ll
index 1d86df8..4849711 100644
--- a/test/CodeGen/SystemZ/05-MemRegStores.ll
+++ b/test/CodeGen/SystemZ/05-MemRegStores.ll
@@ -1,8 +1,8 @@
; RUN: llvm-as < %s | llc | not grep aghi
; RUN: llvm-as < %s | llc | grep stg | count 1
-; RUN: llvm-as < %s | llc | grep sty | count 2
-; RUN: llvm-as < %s | llc | grep sthy | count 2
-; RUN: llvm-as < %s | llc | grep stcy | count 2
+; RUN: llvm-as < %s | llc | grep st | count 8
+; RUN: llvm-as < %s | llc | grep sth | count 2
+; RUN: llvm-as < %s | llc | grep stc | count 2
target datalayout = "E-p:64:64:64-i1:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128"
target triple = "s390x-unknown-linux-gnu"