aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/ARM/ARMISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/ARM/ARMISelLowering.cpp')
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp384
1 files changed, 346 insertions, 38 deletions
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index ec0e9c2..caec11e 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -74,7 +74,7 @@ namespace {
class ARMCCState : public CCState {
public:
ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
- const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs,
+ const TargetMachine &TM, SmallVectorImpl<CCValAssign> &locs,
LLVMContext &C, ParmContext PC)
: CCState(CC, isVarArg, MF, TM, locs, C) {
assert(((PC == Call) || (PC == Prologue)) &&
@@ -693,10 +693,36 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::SDIV, MVT::i32, Expand);
setOperationAction(ISD::UDIV, MVT::i32, Expand);
}
+
+ // FIXME: Also set divmod for SREM on EABI
setOperationAction(ISD::SREM, MVT::i32, Expand);
setOperationAction(ISD::UREM, MVT::i32, Expand);
- setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+ // Register based DivRem for AEABI (RTABI 4.2)
+ if (Subtarget->isTargetAEABI()) {
+ setLibcallName(RTLIB::SDIVREM_I8, "__aeabi_idivmod");
+ setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod");
+ setLibcallName(RTLIB::SDIVREM_I32, "__aeabi_idivmod");
+ setLibcallName(RTLIB::SDIVREM_I64, "__aeabi_ldivmod");
+ setLibcallName(RTLIB::UDIVREM_I8, "__aeabi_uidivmod");
+ setLibcallName(RTLIB::UDIVREM_I16, "__aeabi_uidivmod");
+ setLibcallName(RTLIB::UDIVREM_I32, "__aeabi_uidivmod");
+ setLibcallName(RTLIB::UDIVREM_I64, "__aeabi_uldivmod");
+
+ setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS);
+
+ setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
+ } else {
+ setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+ }
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
@@ -717,8 +743,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
if (!Subtarget->isTargetDarwin()) {
// Non-Darwin platforms may return values in these registers via the
// personality function.
- setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
setExceptionPointerRegister(ARM::R0);
setExceptionSelectorRegister(ARM::R1);
}
@@ -1068,6 +1092,19 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD";
case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD";
case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD";
+
+ case ARMISD::ATOMADD64_DAG: return "ATOMADD64_DAG";
+ case ARMISD::ATOMSUB64_DAG: return "ATOMSUB64_DAG";
+ case ARMISD::ATOMOR64_DAG: return "ATOMOR64_DAG";
+ case ARMISD::ATOMXOR64_DAG: return "ATOMXOR64_DAG";
+ case ARMISD::ATOMAND64_DAG: return "ATOMAND64_DAG";
+ case ARMISD::ATOMNAND64_DAG: return "ATOMNAND64_DAG";
+ case ARMISD::ATOMSWAP64_DAG: return "ATOMSWAP64_DAG";
+ case ARMISD::ATOMCMPXCHG64_DAG: return "ATOMCMPXCHG64_DAG";
+ case ARMISD::ATOMMIN64_DAG: return "ATOMMIN64_DAG";
+ case ARMISD::ATOMUMIN64_DAG: return "ATOMUMIN64_DAG";
+ case ARMISD::ATOMMAX64_DAG: return "ATOMMAX64_DAG";
+ case ARMISD::ATOMUMAX64_DAG: return "ATOMUMAX64_DAG";
}
}
@@ -1332,7 +1369,7 @@ void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
RegsToPassVector &RegsToPass,
CCValAssign &VA, CCValAssign &NextVA,
SDValue &StackPtr,
- SmallVector<SDValue, 8> &MemOpChains,
+ SmallVectorImpl<SDValue> &MemOpChains,
ISD::ArgFlagsTy Flags) const {
SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
@@ -1360,9 +1397,9 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
@@ -1711,10 +1748,17 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
const uint32_t *Mask;
const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo*>(TRI);
- if (isThisReturn)
- // For 'this' returns, use the R0-preserving mask
+ if (isThisReturn) {
+ // For 'this' returns, use the R0-preserving mask if applicable
Mask = ARI->getThisReturnPreservedMask(CallConv);
- else
+ if (!Mask) {
+ // Set isThisReturn to false if the calling convention is not one that
+ // allows 'returned' to be modeled in this way, so LowerCallResult does
+ // not try to pass 'this' straight through
+ isThisReturn = false;
+ Mask = ARI->getCallPreservedMask(CallConv);
+ }
+ } else
Mask = ARI->getCallPreservedMask(CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
@@ -2550,8 +2594,18 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
DAG.getConstant(0, MVT::i32));
}
+ ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
+ AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
+ unsigned Domain = ARM_MB::ISH;
+ if (Subtarget->isSwift() && Ord == Release) {
+ // Swift happens to implement ISHST barriers in a way that's compatible with
+ // Release semantics but weaker than ISH so we'd be fools not to use
+ // it. Beware: other processors probably don't!
+ Domain = ARM_MB::ISHST;
+ }
+
return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
- DAG.getConstant(ARM_MB::ISH, MVT::i32));
+ DAG.getConstant(Domain, MVT::i32));
}
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
@@ -2717,7 +2771,7 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
lastRegToSaveIndex = REnd - ARM::R0;
} else {
firstRegToSaveIndex = CCInfo.getFirstUnallocated
- (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0]));
+ (GPRArgRegs, array_lengthof(GPRArgRegs));
lastRegToSaveIndex = 4;
}
@@ -4620,7 +4674,9 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
if (ValueCounts.size() == 0)
return DAG.getUNDEF(VT);
- if (isOnlyLowElement)
+ // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
+ // Keep going if we are hitting this case.
+ if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
unsigned EltSize = VT.getVectorElementType().getSizeInBits();
@@ -4719,6 +4775,24 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
}
+ // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
+ // know the default expansion would otherwise fall back on something even
+ // worse. For a vector with one or two non-undef values, that's
+ // scalar_to_vector for the elements followed by a shuffle (provided the
+ // shuffle is valid for the target) and materialization element by element
+ // on the stack followed by a load for everything else.
+ if (!isConstant && !usesOnlyOneValue) {
+ SDValue Vec = DAG.getUNDEF(VT);
+ for (unsigned i = 0 ; i < NumElts; ++i) {
+ SDValue V = Op.getOperand(i);
+ if (V.getOpcode() == ISD::UNDEF)
+ continue;
+ SDValue LaneIdx = DAG.getConstant(i, MVT::i32);
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
+ }
+ return Vec;
+ }
+
return SDValue();
}
@@ -5830,6 +5904,8 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
case ISD::ATOMIC_LOAD:
case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG);
+ case ISD::SDIVREM:
+ case ISD::UDIVREM: return LowerDivRem(Op, DAG);
}
}
@@ -7948,8 +8024,11 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
assert(AddcNode->getNumValues() == 2 &&
AddcNode->getValueType(0) == MVT::i32 &&
- AddcNode->getValueType(1) == MVT::Glue &&
- "Expect ADDC with two result values: i32, glue");
+ "Expect ADDC with two result values. First: i32");
+
+ // Check that we have a glued ADDC node.
+ if (AddcNode->getValueType(1) != MVT::Glue)
+ return SDValue();
// Check that the ADDC adds the low result of the S/UMUL_LOHI.
if (AddcOp0->getOpcode() != ISD::UMUL_LOHI &&
@@ -8328,22 +8407,29 @@ static SDValue PerformORCombine(SDNode *N,
unsigned SplatBitSize;
bool HasAnyUndefs;
+ APInt SplatBits0, SplatBits1;
BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
- APInt SplatBits0;
+ BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
+ // Ensure that the second operand of both ands are constants
if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
- HasAnyUndefs) && !HasAnyUndefs) {
- BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
- APInt SplatBits1;
- if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
- HasAnyUndefs) && !HasAnyUndefs &&
- SplatBits0 == ~SplatBits1) {
- // Canonicalize the vector type to make instruction selection simpler.
- EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
- SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
- N0->getOperand(1), N0->getOperand(0),
- N1->getOperand(0));
- return DAG.getNode(ISD::BITCAST, dl, VT, Result);
- }
+ HasAnyUndefs) && !HasAnyUndefs) {
+ if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
+ HasAnyUndefs) && !HasAnyUndefs) {
+ // Ensure that the bit width of the constants are the same and that
+ // the splat arguments are logical inverses as per the pattern we
+ // are trying to simplify.
+ if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
+ SplatBits0 == ~SplatBits1) {
+ // Canonicalize the vector type to make instruction selection
+ // simpler.
+ EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
+ SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
+ N0->getOperand(1),
+ N0->getOperand(0),
+ N1->getOperand(0));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Result);
+ }
+ }
}
}
@@ -8753,6 +8839,98 @@ static SDValue PerformBUILD_VECTORCombine(SDNode *N,
return DAG.getNode(ISD::BITCAST, dl, VT, BV);
}
+/// \brief Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
+static SDValue
+PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
+ // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR.
+ // At that time, we may have inserted bitcasts from integer to float.
+ // If these bitcasts have survived DAGCombine, change the lowering of this
+ // BUILD_VECTOR in something more vector friendly, i.e., that does not
+ // force to use floating point types.
+
+ // Make sure we can change the type of the vector.
+ // This is possible iff:
+ // 1. The vector is only used in a bitcast to a integer type. I.e.,
+ // 1.1. Vector is used only once.
+ // 1.2. Use is a bit convert to an integer type.
+ // 2. The size of its operands are 32-bits (64-bits are not legal).
+ EVT VT = N->getValueType(0);
+ EVT EltVT = VT.getVectorElementType();
+
+ // Check 1.1. and 2.
+ if (EltVT.getSizeInBits() != 32 || !N->hasOneUse())
+ return SDValue();
+
+ // By construction, the input type must be float.
+ assert(EltVT == MVT::f32 && "Unexpected type!");
+
+ // Check 1.2.
+ SDNode *Use = *N->use_begin();
+ if (Use->getOpcode() != ISD::BITCAST ||
+ Use->getValueType(0).isFloatingPoint())
+ return SDValue();
+
+ // Check profitability.
+ // Model is, if more than half of the relevant operands are bitcast from
+ // i32, turn the build_vector into a sequence of insert_vector_elt.
+ // Relevant operands are everything that is not statically
+ // (i.e., at compile time) bitcasted.
+ unsigned NumOfBitCastedElts = 0;
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned NumOfRelevantElts = NumElts;
+ for (unsigned Idx = 0; Idx < NumElts; ++Idx) {
+ SDValue Elt = N->getOperand(Idx);
+ if (Elt->getOpcode() == ISD::BITCAST) {
+ // Assume only bit cast to i32 will go away.
+ if (Elt->getOperand(0).getValueType() == MVT::i32)
+ ++NumOfBitCastedElts;
+ } else if (Elt.getOpcode() == ISD::UNDEF || isa<ConstantSDNode>(Elt))
+ // Constants are statically casted, thus do not count them as
+ // relevant operands.
+ --NumOfRelevantElts;
+ }
+
+ // Check if more than half of the elements require a non-free bitcast.
+ if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ // Create the new vector type.
+ EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
+ // Check if the type is legal.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (!TLI.isTypeLegal(VecVT))
+ return SDValue();
+
+ // Combine:
+ // ARMISD::BUILD_VECTOR E1, E2, ..., EN.
+ // => BITCAST INSERT_VECTOR_ELT
+ // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1),
+ // (BITCAST EN), N.
+ SDValue Vec = DAG.getUNDEF(VecVT);
+ SDLoc dl(N);
+ for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
+ SDValue V = N->getOperand(Idx);
+ if (V.getOpcode() == ISD::UNDEF)
+ continue;
+ if (V.getOpcode() == ISD::BITCAST &&
+ V->getOperand(0).getValueType() == MVT::i32)
+ // Fold obvious case.
+ V = V.getOperand(0);
+ else {
+ V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
+ // Make the DAGCombiner fold the bitcasts.
+ DCI.AddToWorklist(V.getNode());
+ }
+ SDValue LaneIdx = DAG.getConstant(Idx, MVT::i32);
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx);
+ }
+ Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec);
+ // Make the DAGCombiner fold the bitcasts.
+ DCI.AddToWorklist(Vec.getNode());
+ return Vec;
+}
+
/// PerformInsertEltCombine - Target-specific dag combine xforms for
/// ISD::INSERT_VECTOR_ELT.
static SDValue PerformInsertEltCombine(SDNode *N,
@@ -9131,12 +9309,27 @@ static SDValue PerformVCVTCombine(SDNode *N,
!isConstVecPow2(ConstVec, isSigned, C))
return SDValue();
+ MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
+ MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
+ if (FloatTy.getSizeInBits() != 32 || IntTy.getSizeInBits() > 32) {
+ // These instructions only exist converting from f32 to i32. We can handle
+ // smaller integers by generating an extra truncate, but larger ones would
+ // be lossy.
+ return SDValue();
+ }
+
unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
Intrinsic::arm_neon_vcvtfp2fxu;
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N),
- N->getValueType(0),
- DAG.getConstant(IntrinsicOpcode, MVT::i32), N0,
- DAG.getConstant(Log2_64(C), MVT::i32));
+ unsigned NumLanes = Op.getValueType().getVectorNumElements();
+ SDValue FixConv = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N),
+ NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
+ DAG.getConstant(IntrinsicOpcode, MVT::i32), N0,
+ DAG.getConstant(Log2_64(C), MVT::i32));
+
+ if (IntTy.getSizeInBits() < FloatTy.getSizeInBits())
+ FixConv = DAG.getNode(ISD::TRUNCATE, SDLoc(N), N->getValueType(0), FixConv);
+
+ return FixConv;
}
/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
@@ -9167,12 +9360,28 @@ static SDValue PerformVDIVCombine(SDNode *N,
!isConstVecPow2(ConstVec, isSigned, C))
return SDValue();
+ MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
+ MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
+ if (FloatTy.getSizeInBits() != 32 || IntTy.getSizeInBits() > 32) {
+ // These instructions only exist converting from i32 to f32. We can handle
+ // smaller integers by generating an extra extend, but larger ones would
+ // be lossy.
+ return SDValue();
+ }
+
+ SDValue ConvInput = Op.getOperand(0);
+ unsigned NumLanes = Op.getValueType().getVectorNumElements();
+ if (IntTy.getSizeInBits() < FloatTy.getSizeInBits())
+ ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
+ SDLoc(N), NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
+ ConvInput);
+
unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
Intrinsic::arm_neon_vcvtfxu2fp;
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N),
Op.getValueType(),
DAG.getConstant(IntrinsicOpcode, MVT::i32),
- Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32));
+ ConvInput, DAG.getConstant(Log2_64(C), MVT::i32));
}
/// Getvshiftimm - Check if this is a valid build_vector for the immediate
@@ -9658,6 +9867,8 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
case ARMISD::VLD3DUP:
case ARMISD::VLD4DUP:
return CombineBaseUpdate(N, DCI);
+ case ARMISD::BUILD_VECTOR:
+ return PerformARMBUILD_VECTORCombine(N, DCI);
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN:
switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
@@ -9782,6 +9993,21 @@ bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
return false;
}
+bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
+ if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
+ return false;
+
+ if (!isTypeLegal(EVT::getEVT(Ty1)))
+ return false;
+
+ assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
+
+ // Assuming the caller doesn't have a zeroext or signext return parameter,
+ // truncation all the way down to i1 is valid.
+ return true;
+}
+
+
static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
if (V < 0)
return false;
@@ -10181,9 +10407,19 @@ void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth) const {
- KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
+ unsigned BitWidth = KnownOne.getBitWidth();
+ KnownZero = KnownOne = APInt(BitWidth, 0);
switch (Op.getOpcode()) {
default: break;
+ case ARMISD::ADDC:
+ case ARMISD::ADDE:
+ case ARMISD::SUBC:
+ case ARMISD::SUBE:
+ // These nodes' second result is a boolean
+ if (Op.getResNo() == 0)
+ break;
+ KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
+ break;
case ARMISD::CMOV: {
// Bits are known zero/one if known on the LHS and RHS.
DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
@@ -10297,7 +10533,7 @@ ARMTargetLowering::getSingleConstraintMatchWeight(
typedef std::pair<unsigned, const TargetRegisterClass*> RCPair;
RCPair
ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
// GCC ARM Constraint Letters
switch (Constraint[0]) {
@@ -10506,6 +10742,54 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
+SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
+ assert(Subtarget->isTargetAEABI() && "Register-based DivRem lowering only");
+ unsigned Opcode = Op->getOpcode();
+ assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
+ "Invalid opcode for Div/Rem lowering");
+ bool isSigned = (Opcode == ISD::SDIVREM);
+ EVT VT = Op->getValueType(0);
+ Type *Ty = VT.getTypeForEVT(*DAG.getContext());
+
+ RTLIB::Libcall LC;
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unexpected request for libcall!");
+ case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
+ case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
+ case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
+ case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
+ }
+
+ SDValue InChain = DAG.getEntryNode();
+
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
+ EVT ArgVT = Op->getOperand(i).getValueType();
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Entry.Node = Op->getOperand(i);
+ Entry.Ty = ArgTy;
+ Entry.isSExt = isSigned;
+ Entry.isZExt = !isSigned;
+ Args.push_back(Entry);
+ }
+
+ SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
+ getPointerTy());
+
+ Type *RetTy = (Type*)StructType::get(Ty, Ty, NULL);
+
+ SDLoc dl(Op);
+ TargetLowering::
+ CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, true,
+ 0, getLibcallCallingConv(LC), /*isTailCall=*/false,
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
+ Callee, Args, DAG, dl);
+ std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
+
+ return CallInfo.first;
+}
+
bool
ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
// The ARM target isn't yet aware of offsets.
@@ -10591,6 +10875,30 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.writeMem = true;
return true;
}
+ case Intrinsic::arm_ldrex: {
+ PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::getVT(PtrTy->getElementType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
+ Info.vol = true;
+ Info.readMem = true;
+ Info.writeMem = false;
+ return true;
+ }
+ case Intrinsic::arm_strex: {
+ PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::getVT(PtrTy->getElementType());
+ Info.ptrVal = I.getArgOperand(1);
+ Info.offset = 0;
+ Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
+ Info.vol = true;
+ Info.readMem = false;
+ Info.writeMem = true;
+ return true;
+ }
case Intrinsic::arm_strexd: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::i64;