aboutsummaryrefslogtreecommitdiffstats
path: root/lib/CodeGen/SelectionDAG
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/SelectionDAG')
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp351
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp80
-rw-r--r--lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp19
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.cpp12
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.h2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp95
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp36
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp89
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.cpp5
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.h8
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp21
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp18
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp119
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp12
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp25
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp19
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp106
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp621
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h10
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp7
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp66
-rw-r--r--lib/CodeGen/SelectionDAG/TargetLowering.cpp4
22 files changed, 944 insertions, 781 deletions
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 046dd41..cb88941 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -154,7 +154,7 @@ namespace {
SDValue PromoteExtend(SDValue Op);
bool PromoteLoad(SDValue Op);
- void ExtendSetCCUses(SmallVector<SDNode*, 4> SetCCs,
+ void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
SDValue Trunc, SDValue ExtLoad, SDLoc DL,
ISD::NodeType ExtType);
@@ -279,7 +279,7 @@ namespace {
/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
/// looking for aliasing nodes and adding them to the Aliases vector.
void GatherAllAliases(SDNode *N, SDValue OriginalChain,
- SmallVector<SDValue, 8> &Aliases);
+ SmallVectorImpl<SDValue> &Aliases);
/// isAlias - Return true if there is any possibility that the two addresses
/// overlap.
@@ -326,7 +326,10 @@ namespace {
/// getShiftAmountTy - Returns a type large enough to hold any valid
/// shift amount - before type legalization these can be huge.
EVT getShiftAmountTy(EVT LHSTy) {
- return LegalTypes ? TLI.getShiftAmountTy(LHSTy) : TLI.getPointerTy();
+ assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
+ if (LHSTy.isVector())
+ return LHSTy;
+ return LegalTypes ? TLI.getScalarShiftAmountTy(LHSTy) : TLI.getPointerTy();
}
/// isTypeLegal - This method returns true if we are running before type
@@ -1251,7 +1254,7 @@ static SDValue getInputChainForNode(SDNode *N) {
if (unsigned NumOps = N->getNumOperands()) {
if (N->getOperand(0).getValueType() == MVT::Other)
return N->getOperand(0);
- else if (N->getOperand(NumOps-1).getValueType() == MVT::Other)
+ if (N->getOperand(NumOps-1).getValueType() == MVT::Other)
return N->getOperand(NumOps-1);
for (unsigned i = 1; i < NumOps-1; ++i)
if (N->getOperand(i).getValueType() == MVT::Other)
@@ -1610,13 +1613,19 @@ SDValue DAGCombiner::visitADDE(SDNode *N) {
// Since it may not be valid to emit a fold to zero for vector initializers
// check if we can before folding.
static SDValue tryFoldToZero(SDLoc DL, const TargetLowering &TLI, EVT VT,
- SelectionDAG &DAG, bool LegalOperations) {
- if (!VT.isVector()) {
+ SelectionDAG &DAG,
+ bool LegalOperations, bool LegalTypes) {
+ if (!VT.isVector())
return DAG.getConstant(0, VT);
- }
if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) {
// Produce a vector of zeros.
- SDValue El = DAG.getConstant(0, VT.getVectorElementType());
+ EVT ElemTy = VT.getVectorElementType();
+ if (LegalTypes && TLI.getTypeAction(*DAG.getContext(), ElemTy) ==
+ TargetLowering::TypePromoteInteger)
+ ElemTy = TLI.getTypeToTransformTo(*DAG.getContext(), ElemTy);
+ assert((!LegalTypes || TLI.isTypeLegal(ElemTy)) &&
+ "Type for zero vector elements is not legal");
+ SDValue El = DAG.getConstant(0, ElemTy);
std::vector<SDValue> Ops(VT.getVectorNumElements(), El);
return DAG.getNode(ISD::BUILD_VECTOR, DL, VT,
&Ops[0], Ops.size());
@@ -1646,7 +1655,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
// fold (sub x, x) -> 0
// FIXME: Refactor this and xor and other similar operations together.
if (N0 == N1)
- return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations);
+ return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations, LegalTypes);
// fold (sub c1, c2) -> c1-c2
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::SUB, VT, N0C, N1C);
@@ -1762,43 +1771,73 @@ SDValue DAGCombiner::visitSUBE(SDNode *N) {
return SDValue();
}
+/// isConstantSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are
+/// all the same constant or undefined.
+static bool isConstantSplatVector(SDNode *N, APInt& SplatValue) {
+ BuildVectorSDNode *C = dyn_cast<BuildVectorSDNode>(N);
+ if (!C)
+ return false;
+
+ APInt SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ EVT EltVT = N->getValueType(0).getVectorElementType();
+ return (C->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
+ HasAnyUndefs) &&
+ EltVT.getSizeInBits() >= SplatBitSize);
+}
+
SDValue DAGCombiner::visitMUL(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
EVT VT = N0.getValueType();
+ // fold (mul x, undef) -> 0
+ if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
+ return DAG.getConstant(0, VT);
+
+ bool N0IsConst = false;
+ bool N1IsConst = false;
+ APInt ConstValue0, ConstValue1;
// fold vector ops
if (VT.isVector()) {
SDValue FoldedVOp = SimplifyVBinOp(N);
if (FoldedVOp.getNode()) return FoldedVOp;
+
+ N0IsConst = isConstantSplatVector(N0.getNode(), ConstValue0);
+ N1IsConst = isConstantSplatVector(N1.getNode(), ConstValue1);
+ } else {
+ N0IsConst = dyn_cast<ConstantSDNode>(N0) != 0;
+ ConstValue0 = N0IsConst? (dyn_cast<ConstantSDNode>(N0))->getAPIntValue() : APInt();
+ N1IsConst = dyn_cast<ConstantSDNode>(N1) != 0;
+ ConstValue1 = N1IsConst? (dyn_cast<ConstantSDNode>(N1))->getAPIntValue() : APInt();
}
- // fold (mul x, undef) -> 0
- if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
- return DAG.getConstant(0, VT);
// fold (mul c1, c2) -> c1*c2
- if (N0C && N1C)
- return DAG.FoldConstantArithmetic(ISD::MUL, VT, N0C, N1C);
+ if (N0IsConst && N1IsConst)
+ return DAG.FoldConstantArithmetic(ISD::MUL, VT, N0.getNode(), N1.getNode());
+
// canonicalize constant to RHS
- if (N0C && !N1C)
+ if (N0IsConst && !N1IsConst)
return DAG.getNode(ISD::MUL, SDLoc(N), VT, N1, N0);
// fold (mul x, 0) -> 0
- if (N1C && N1C->isNullValue())
+ if (N1IsConst && ConstValue1 == 0)
return N1;
+ // fold (mul x, 1) -> x
+ if (N1IsConst && ConstValue1 == 1)
+ return N0;
// fold (mul x, -1) -> 0-x
- if (N1C && N1C->isAllOnesValue())
+ if (N1IsConst && ConstValue1.isAllOnesValue())
return DAG.getNode(ISD::SUB, SDLoc(N), VT,
DAG.getConstant(0, VT), N0);
// fold (mul x, (1 << c)) -> x << c
- if (N1C && N1C->getAPIntValue().isPowerOf2())
+ if (N1IsConst && ConstValue1.isPowerOf2())
return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0,
- DAG.getConstant(N1C->getAPIntValue().logBase2(),
+ DAG.getConstant(ConstValue1.logBase2(),
getShiftAmountTy(N0.getValueType())));
// fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
- if (N1C && (-N1C->getAPIntValue()).isPowerOf2()) {
- unsigned Log2Val = (-N1C->getAPIntValue()).logBase2();
+ if (N1IsConst && (-ConstValue1).isPowerOf2()) {
+ unsigned Log2Val = (-ConstValue1).logBase2();
// FIXME: If the input is something that is easily negated (e.g. a
// single-use add), we should put the negate there.
return DAG.getNode(ISD::SUB, SDLoc(N), VT,
@@ -1807,9 +1846,12 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
DAG.getConstant(Log2Val,
getShiftAmountTy(N0.getValueType()))));
}
+
+ APInt Val;
// (mul (shl X, c1), c2) -> (mul X, c2 << c1)
- if (N1C && N0.getOpcode() == ISD::SHL &&
- isa<ConstantSDNode>(N0.getOperand(1))) {
+ if (N1IsConst && N0.getOpcode() == ISD::SHL &&
+ (isConstantSplatVector(N0.getOperand(1).getNode(), Val) ||
+ isa<ConstantSDNode>(N0.getOperand(1)))) {
SDValue C3 = DAG.getNode(ISD::SHL, SDLoc(N), VT,
N1, N0.getOperand(1));
AddToWorkList(C3.getNode());
@@ -1822,7 +1864,9 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
{
SDValue Sh(0,0), Y(0,0);
// Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)).
- if (N0.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N0.getOperand(1)) &&
+ if (N0.getOpcode() == ISD::SHL &&
+ (isConstantSplatVector(N0.getOperand(1).getNode(), Val) ||
+ isa<ConstantSDNode>(N0.getOperand(1))) &&
N0.getNode()->hasOneUse()) {
Sh = N0; Y = N1;
} else if (N1.getOpcode() == ISD::SHL &&
@@ -1840,8 +1884,9 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
}
// fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
- if (N1C && N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() &&
- isa<ConstantSDNode>(N0.getOperand(1)))
+ if (N1IsConst && N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() &&
+ (isConstantSplatVector(N0.getOperand(1).getNode(), Val) ||
+ isa<ConstantSDNode>(N0.getOperand(1))))
return DAG.getNode(ISD::ADD, SDLoc(N), VT,
DAG.getNode(ISD::MUL, SDLoc(N0), VT,
N0.getOperand(0), N1),
@@ -2502,7 +2547,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
- // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) ->
+ // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) ->
// (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must
// already be zero by virtue of the width of the base type of the load.
//
@@ -2716,7 +2761,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
? cast<LoadSDNode>(N0.getOperand(0))
: cast<LoadSDNode>(N0);
if (LN0->getExtensionType() != ISD::SEXTLOAD &&
- LN0->isUnindexed() && N0.hasOneUse() && LN0->hasOneUse()) {
+ LN0->isUnindexed() && N0.hasOneUse() && SDValue(LN0, 0).hasOneUse()) {
uint32_t ActiveBits = N1C->getAPIntValue().getActiveBits();
if (ActiveBits > 0 && APIntOps::isMask(ActiveBits, N1C->getAPIntValue())){
EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
@@ -2905,7 +2950,7 @@ SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
/// isBSwapHWordElement - Return true if the specified node is an element
/// that makes up a 32-bit packed halfword byteswap. i.e.
/// ((x&0xff)<<8)|((x&0xff00)>>8)|((x&0x00ff0000)<<8)|((x&0xff000000)>>8)
-static bool isBSwapHWordElement(SDValue N, SmallVector<SDNode*,4> &Parts) {
+static bool isBSwapHWordElement(SDValue N, SmallVectorImpl<SDNode *> &Parts) {
if (!N.getNode()->hasOneUse())
return false;
@@ -3304,25 +3349,21 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL) {
LHSShiftAmt == RHSShiftAmt.getOperand(1)) {
if (ConstantSDNode *SUBC =
dyn_cast<ConstantSDNode>(RHSShiftAmt.getOperand(0))) {
- if (SUBC->getAPIntValue() == OpSizeInBits) {
+ if (SUBC->getAPIntValue() == OpSizeInBits)
return DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, LHSShiftArg,
HasROTL ? LHSShiftAmt : RHSShiftAmt).getNode();
- }
}
}
// fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotr x, y)
// fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotl x, (sub 32, y))
if (LHSShiftAmt.getOpcode() == ISD::SUB &&
- RHSShiftAmt == LHSShiftAmt.getOperand(1)) {
+ RHSShiftAmt == LHSShiftAmt.getOperand(1))
if (ConstantSDNode *SUBC =
- dyn_cast<ConstantSDNode>(LHSShiftAmt.getOperand(0))) {
- if (SUBC->getAPIntValue() == OpSizeInBits) {
+ dyn_cast<ConstantSDNode>(LHSShiftAmt.getOperand(0)))
+ if (SUBC->getAPIntValue() == OpSizeInBits)
return DAG.getNode(HasROTR ? ISD::ROTR : ISD::ROTL, DL, VT, LHSShiftArg,
HasROTR ? RHSShiftAmt : LHSShiftAmt).getNode();
- }
- }
- }
// Look for sign/zext/any-extended or truncate cases:
if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
@@ -3342,13 +3383,11 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL) {
// fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) ->
// (rotr x, (sub 32, y))
if (ConstantSDNode *SUBC =
- dyn_cast<ConstantSDNode>(RExtOp0.getOperand(0))) {
- if (SUBC->getAPIntValue() == OpSizeInBits) {
+ dyn_cast<ConstantSDNode>(RExtOp0.getOperand(0)))
+ if (SUBC->getAPIntValue() == OpSizeInBits)
return DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT,
LHSShiftArg,
HasROTL ? LHSShiftAmt : RHSShiftAmt).getNode();
- }
- }
} else if (LExtOp0.getOpcode() == ISD::SUB &&
RExtOp0 == LExtOp0.getOperand(1)) {
// fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) ->
@@ -3356,13 +3395,11 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, SDLoc DL) {
// fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) ->
// (rotl x, (sub 32, y))
if (ConstantSDNode *SUBC =
- dyn_cast<ConstantSDNode>(LExtOp0.getOperand(0))) {
- if (SUBC->getAPIntValue() == OpSizeInBits) {
+ dyn_cast<ConstantSDNode>(LExtOp0.getOperand(0)))
+ if (SUBC->getAPIntValue() == OpSizeInBits)
return DAG.getNode(HasROTR ? ISD::ROTR : ISD::ROTL, DL, VT,
LHSShiftArg,
HasROTR ? RHSShiftAmt : LHSShiftAmt).getNode();
- }
- }
}
}
@@ -3489,7 +3526,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
}
// fold (xor x, x) -> 0
if (N0 == N1)
- return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations);
+ return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations, LegalTypes);
// Simplify: xor (op x...), (op y...) -> (op (xor x, y))
if (N0.getOpcode() == N1.getOpcode()) {
@@ -3915,8 +3952,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
DAG.getConstant(~0ULL >> ShAmt, VT));
}
-
- // fold (srl (anyextend x), c) -> (anyextend (srl x, c))
+ // fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask)
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
// Shifting in all undef bits?
EVT SmallVT = N0.getOperand(0).getValueType();
@@ -3929,7 +3965,10 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
N0.getOperand(0),
DAG.getConstant(ShiftAmt, getShiftAmountTy(SmallVT)));
AddToWorkList(SmallShift.getNode());
- return DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, SmallShift);
+ APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits()).lshr(ShiftAmt);
+ return DAG.getNode(ISD::AND, SDLoc(N), VT,
+ DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, SmallShift),
+ DAG.getConstant(Mask, VT));
}
}
@@ -4233,20 +4272,22 @@ SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
// Determine if the condition we're dealing with is constant
SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()),
N0, N1, CC, SDLoc(N), false);
- if (SCC.getNode()) AddToWorkList(SCC.getNode());
+ if (SCC.getNode()) {
+ AddToWorkList(SCC.getNode());
- if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode())) {
- if (!SCCC->isNullValue())
- return N2; // cond always true -> true val
- else
- return N3; // cond always false -> false val
- }
+ if (ConstantSDNode *SCCC = dyn_cast<ConstantSDNode>(SCC.getNode())) {
+ if (!SCCC->isNullValue())
+ return N2; // cond always true -> true val
+ else
+ return N3; // cond always false -> false val
+ }
- // Fold to a simpler select_cc
- if (SCC.getNode() && SCC.getOpcode() == ISD::SETCC)
- return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N2.getValueType(),
- SCC.getOperand(0), SCC.getOperand(1), N2, N3,
- SCC.getOperand(2));
+ // Fold to a simpler select_cc
+ if (SCC.getOpcode() == ISD::SETCC)
+ return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N2.getValueType(),
+ SCC.getOperand(0), SCC.getOperand(1), N2, N3,
+ SCC.getOperand(2));
+ }
// If we can fold this based on the true/false value, do so.
if (SimplifySelectOps(N, N2, N3))
@@ -4268,7 +4309,7 @@ SDValue DAGCombiner::visitSETCC(SDNode *N) {
// mentioned transformation is profitable.
static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0,
unsigned ExtOpc,
- SmallVector<SDNode*, 4> &ExtendNodes,
+ SmallVectorImpl<SDNode *> &ExtendNodes,
const TargetLowering &TLI) {
bool HasCopyToRegUses = false;
bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType());
@@ -4326,7 +4367,7 @@ static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0,
return true;
}
-void DAGCombiner::ExtendSetCCUses(SmallVector<SDNode*, 4> SetCCs,
+void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
SDValue Trunc, SDValue ExtLoad, SDLoc DL,
ISD::NodeType ExtType) {
// Extend SetCC uses if necessary.
@@ -4508,7 +4549,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
// sext(setcc) -> sext_in_reg(vsetcc) for vectors.
// Only do this before legalize for now.
if (VT.isVector() && !LegalOperations &&
- TLI.getBooleanContents(true) ==
+ TLI.getBooleanContents(true) ==
TargetLowering::ZeroOrNegativeOneBooleanContent) {
EVT N0VT = N0.getOperand(0).getValueType();
// On some architectures (such as SSE/NEON/etc) the SETCC result type is
@@ -4547,14 +4588,16 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
NegOne, DAG.getConstant(0, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
if (SCC.getNode()) return SCC;
- if (!VT.isVector() && (!LegalOperations ||
- TLI.isOperationLegal(ISD::SETCC, getSetCCResultType(VT))))
- return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
- DAG.getSetCC(SDLoc(N),
- getSetCCResultType(VT),
- N0.getOperand(0), N0.getOperand(1),
- cast<CondCodeSDNode>(N0.getOperand(2))->get()),
- NegOne, DAG.getConstant(0, VT));
+ if (!VT.isVector() &&
+ (!LegalOperations ||
+ TLI.isOperationLegal(ISD::SETCC, getSetCCResultType(VT)))) {
+ return DAG.getSelect(SDLoc(N), VT,
+ DAG.getSetCC(SDLoc(N),
+ getSetCCResultType(VT),
+ N0.getOperand(0), N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get()),
+ NegOne, DAG.getConstant(0, VT));
+ }
}
// fold (sext x) -> (zext x) if the sign bit is known zero.
@@ -5039,9 +5082,8 @@ SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) {
assert(CV != 0 && "Const value should be ConstSDNode.");
const APInt &CVal = CV->getAPIntValue();
APInt NewVal = CVal & Mask;
- if (NewVal != CVal) {
+ if (NewVal != CVal)
return DAG.getConstant(NewVal, V.getValueType());
- }
break;
}
case ISD::OR:
@@ -5169,12 +5211,19 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
// For the transform to be legal, the load must produce only two values
// (the value loaded and the chain). Don't transform a pre-increment
- // load, for example, which produces an extra value. Otherwise the
+ // load, for example, which produces an extra value. Otherwise the
// transformation is not equivalent, and the downstream logic to replace
// uses gets things wrong.
if (LN0->getNumValues() > 2)
return SDValue();
+ // If the load that we're shrinking is an extload and we're not just
+ // discarding the extension we can't simply shrink the load. Bail.
+ // TODO: It would be possible to merge the extensions in some cases.
+ if (LN0->getExtensionType() != ISD::NON_EXTLOAD &&
+ LN0->getMemoryVT().getSizeInBits() < ExtVT.getSizeInBits() + ShAmt)
+ return SDValue();
+
EVT PtrType = N0.getOperand(1).getValueType();
if (PtrType == MVT::Untyped || PtrType.isExtended())
@@ -5251,10 +5300,9 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
// fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
- EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT())) {
+ EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT()))
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
N0.getOperand(0), N1);
- }
// fold (sext_in_reg (sext x)) -> (sext x)
// fold (sext_in_reg (aext x)) -> (sext x)
@@ -5400,7 +5448,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue EltNo = N0->getOperand(1);
if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
- EVT IndexTy = N0->getOperand(1).getValueType();
+ EVT IndexTy = TLI.getVectorIdxTy();
int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
SDValue V = DAG.getNode(ISD::BITCAST, SDLoc(N),
@@ -5632,8 +5680,8 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
// fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
// fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
// This often reduces constant pool loads.
- if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(VT)) ||
- (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(VT))) &&
+ if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(N0.getValueType())) ||
+ (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) &&
N0.getNode()->hasOneUse() && VT.isInteger() &&
!VT.isVector() && !N0.getValueType().isVector()) {
SDValue NewConv = DAG.getNode(ISD::BITCAST, SDLoc(N0), VT,
@@ -5892,22 +5940,20 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
// We don't need test this condition for transformation like following, as
// the DAG being transformed implies it is legal to take FP constant as
// operand.
- //
+ //
// (fadd (fmul c, x), x) -> (fmul c+1, x)
- //
+ //
bool AllowNewFpConst = (Level < AfterLegalizeDAG);
// If allow, fold (fadd (fneg x), x) -> 0.0
if (AllowNewFpConst && DAG.getTarget().Options.UnsafeFPMath &&
- N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1) {
+ N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1)
return DAG.getConstantFP(0.0, VT);
- }
// If allow, fold (fadd x, (fneg x)) -> 0.0
if (AllowNewFpConst && DAG.getTarget().Options.UnsafeFPMath &&
- N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0) {
+ N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0)
return DAG.getConstantFP(0.0, VT);
- }
// In unsafe math mode, we can fold chains of FADD's of the same value
// into multiplications. This transform is not safe in general because
@@ -5919,7 +5965,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
ConstantFPSDNode *CFP00 = dyn_cast<ConstantFPSDNode>(N0.getOperand(0));
ConstantFPSDNode *CFP01 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
- // (fadd (fmul c, x), x) -> (fmul c+1, x)
+ // (fadd (fmul c, x), x) -> (fmul x, c+1)
if (CFP00 && !CFP01 && N0.getOperand(1) == N1) {
SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT,
SDValue(CFP00, 0),
@@ -5928,7 +5974,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
N1, NewCFP);
}
- // (fadd (fmul x, c), x) -> (fmul c+1, x)
+ // (fadd (fmul x, c), x) -> (fmul x, c+1)
if (CFP01 && !CFP00 && N0.getOperand(0) == N1) {
SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT,
SDValue(CFP01, 0),
@@ -5937,7 +5983,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
N1, NewCFP);
}
- // (fadd (fmul c, x), (fadd x, x)) -> (fmul c+2, x)
+ // (fadd (fmul c, x), (fadd x, x)) -> (fmul x, c+2)
if (CFP00 && !CFP01 && N1.getOpcode() == ISD::FADD &&
N1.getOperand(0) == N1.getOperand(1) &&
N0.getOperand(1) == N1.getOperand(0)) {
@@ -5948,7 +5994,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
N0.getOperand(1), NewCFP);
}
- // (fadd (fmul x, c), (fadd x, x)) -> (fmul c+2, x)
+ // (fadd (fmul x, c), (fadd x, x)) -> (fmul x, c+2)
if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD &&
N1.getOperand(0) == N1.getOperand(1) &&
N0.getOperand(0) == N1.getOperand(0)) {
@@ -5964,7 +6010,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
ConstantFPSDNode *CFP10 = dyn_cast<ConstantFPSDNode>(N1.getOperand(0));
ConstantFPSDNode *CFP11 = dyn_cast<ConstantFPSDNode>(N1.getOperand(1));
- // (fadd x, (fmul c, x)) -> (fmul c+1, x)
+ // (fadd x, (fmul c, x)) -> (fmul x, c+1)
if (CFP10 && !CFP11 && N1.getOperand(1) == N0) {
SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT,
SDValue(CFP10, 0),
@@ -5973,7 +6019,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
N0, NewCFP);
}
- // (fadd x, (fmul x, c)) -> (fmul c+1, x)
+ // (fadd x, (fmul x, c)) -> (fmul x, c+1)
if (CFP11 && !CFP10 && N1.getOperand(0) == N0) {
SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT,
SDValue(CFP11, 0),
@@ -5983,79 +6029,74 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
}
- // (fadd (fadd x, x), (fmul c, x)) -> (fmul c+2, x)
- if (CFP10 && !CFP11 && N1.getOpcode() == ISD::FADD &&
- N1.getOperand(0) == N1.getOperand(1) &&
- N0.getOperand(1) == N1.getOperand(0)) {
+ // (fadd (fadd x, x), (fmul c, x)) -> (fmul x, c+2)
+ if (CFP10 && !CFP11 && N0.getOpcode() == ISD::FADD &&
+ N0.getOperand(0) == N0.getOperand(1) &&
+ N1.getOperand(1) == N0.getOperand(0)) {
SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT,
SDValue(CFP10, 0),
DAG.getConstantFP(2.0, VT));
return DAG.getNode(ISD::FMUL, SDLoc(N), VT,
- N0.getOperand(1), NewCFP);
+ N1.getOperand(1), NewCFP);
}
- // (fadd (fadd x, x), (fmul x, c)) -> (fmul c+2, x)
- if (CFP11 && !CFP10 && N1.getOpcode() == ISD::FADD &&
- N1.getOperand(0) == N1.getOperand(1) &&
- N0.getOperand(0) == N1.getOperand(0)) {
+ // (fadd (fadd x, x), (fmul x, c)) -> (fmul x, c+2)
+ if (CFP11 && !CFP10 && N0.getOpcode() == ISD::FADD &&
+ N0.getOperand(0) == N0.getOperand(1) &&
+ N1.getOperand(0) == N0.getOperand(0)) {
SDValue NewCFP = DAG.getNode(ISD::FADD, SDLoc(N), VT,
SDValue(CFP11, 0),
DAG.getConstantFP(2.0, VT));
return DAG.getNode(ISD::FMUL, SDLoc(N), VT,
- N0.getOperand(0), NewCFP);
+ N1.getOperand(0), NewCFP);
}
}
if (N0.getOpcode() == ISD::FADD && AllowNewFpConst) {
ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N0.getOperand(0));
- // (fadd (fadd x, x), x) -> (fmul 3.0, x)
+ // (fadd (fadd x, x), x) -> (fmul x, 3.0)
if (!CFP && N0.getOperand(0) == N0.getOperand(1) &&
- (N0.getOperand(0) == N1)) {
+ (N0.getOperand(0) == N1))
return DAG.getNode(ISD::FMUL, SDLoc(N), VT,
N1, DAG.getConstantFP(3.0, VT));
- }
}
if (N1.getOpcode() == ISD::FADD && AllowNewFpConst) {
ConstantFPSDNode *CFP10 = dyn_cast<ConstantFPSDNode>(N1.getOperand(0));
- // (fadd x, (fadd x, x)) -> (fmul 3.0, x)
+ // (fadd x, (fadd x, x)) -> (fmul x, 3.0)
if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) &&
- N1.getOperand(0) == N0) {
+ N1.getOperand(0) == N0)
return DAG.getNode(ISD::FMUL, SDLoc(N), VT,
N0, DAG.getConstantFP(3.0, VT));
- }
}
- // (fadd (fadd x, x), (fadd x, x)) -> (fmul 4.0, x)
+ // (fadd (fadd x, x), (fadd x, x)) -> (fmul x, 4.0)
if (AllowNewFpConst &&
N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD &&
N0.getOperand(0) == N0.getOperand(1) &&
N1.getOperand(0) == N1.getOperand(1) &&
- N0.getOperand(0) == N1.getOperand(0)) {
+ N0.getOperand(0) == N1.getOperand(0))
return DAG.getNode(ISD::FMUL, SDLoc(N), VT,
N0.getOperand(0),
DAG.getConstantFP(4.0, VT));
- }
}
// FADD -> FMA combines:
if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
DAG.getTarget().Options.UnsafeFPMath) &&
- DAG.getTarget().getTargetLowering()->isFMAFasterThanMulAndAdd(VT) &&
- TLI.isOperationLegalOrCustom(ISD::FMA, VT)) {
+ DAG.getTarget().getTargetLowering()->isFMAFasterThanFMulAndFAdd(VT) &&
+ (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT))) {
// fold (fadd (fmul x, y), z) -> (fma x, y, z)
- if (N0.getOpcode() == ISD::FMUL && N0->hasOneUse()) {
+ if (N0.getOpcode() == ISD::FMUL && N0->hasOneUse())
return DAG.getNode(ISD::FMA, SDLoc(N), VT,
N0.getOperand(0), N0.getOperand(1), N1);
- }
// fold (fadd x, (fmul y, z)) -> (fma y, z, x)
// Note: Commutes FADD operands.
- if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse()) {
+ if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse())
return DAG.getNode(ISD::FMA, SDLoc(N), VT,
N1.getOperand(0), N1.getOperand(1), N0);
- }
}
return SDValue();
@@ -6110,8 +6151,9 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
if (N10 == N0 && isNegatibleForFree(N11, LegalOperations, TLI,
&DAG.getTarget().Options))
return GetNegatedExpression(N11, DAG, LegalOperations);
- else if (N11 == N0 && isNegatibleForFree(N10, LegalOperations, TLI,
- &DAG.getTarget().Options))
+
+ if (N11 == N0 && isNegatibleForFree(N10, LegalOperations, TLI,
+ &DAG.getTarget().Options))
return GetNegatedExpression(N10, DAG, LegalOperations);
}
}
@@ -6119,27 +6161,25 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
// FSUB -> FMA combines:
if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
DAG.getTarget().Options.UnsafeFPMath) &&
- DAG.getTarget().getTargetLowering()->isFMAFasterThanMulAndAdd(VT) &&
- TLI.isOperationLegalOrCustom(ISD::FMA, VT)) {
+ DAG.getTarget().getTargetLowering()->isFMAFasterThanFMulAndFAdd(VT) &&
+ (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT))) {
// fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
- if (N0.getOpcode() == ISD::FMUL && N0->hasOneUse()) {
+ if (N0.getOpcode() == ISD::FMUL && N0->hasOneUse())
return DAG.getNode(ISD::FMA, dl, VT,
N0.getOperand(0), N0.getOperand(1),
DAG.getNode(ISD::FNEG, dl, VT, N1));
- }
// fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x)
// Note: Commutes FSUB operands.
- if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse()) {
+ if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse())
return DAG.getNode(ISD::FMA, dl, VT,
DAG.getNode(ISD::FNEG, dl, VT,
N1.getOperand(0)),
N1.getOperand(1), N0);
- }
- // fold (fsub (-(fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
- if (N0.getOpcode() == ISD::FNEG &&
+ // fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
+ if (N0.getOpcode() == ISD::FNEG &&
N0.getOperand(0).getOpcode() == ISD::FMUL &&
N0->hasOneUse() && N0.getOperand(0).hasOneUse()) {
SDValue N00 = N0.getOperand(0).getOperand(0);
@@ -6195,7 +6235,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
// fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y)
if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI,
&DAG.getTarget().Options)) {
- if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI,
+ if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI,
&DAG.getTarget().Options)) {
// Both can be negated for free, check to see if at least one is cheaper
// negated.
@@ -6276,21 +6316,17 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
}
// (fma x, c, x) -> (fmul x, (c+1))
- if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && N0 == N2) {
- return DAG.getNode(ISD::FMUL, dl, VT,
- N0,
+ if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && N0 == N2)
+ return DAG.getNode(ISD::FMUL, dl, VT, N0,
DAG.getNode(ISD::FADD, dl, VT,
N1, DAG.getConstantFP(1.0, VT)));
- }
// (fma x, c, (fneg x)) -> (fmul x, (c-1))
if (DAG.getTarget().Options.UnsafeFPMath && N1CFP &&
- N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) {
- return DAG.getNode(ISD::FMUL, dl, VT,
- N0,
+ N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0)
+ return DAG.getNode(ISD::FMUL, dl, VT, N0,
DAG.getNode(ISD::FADD, dl, VT,
N1, DAG.getConstantFP(-1.0, VT)));
- }
return SDValue();
@@ -6670,12 +6706,11 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
// (fneg (fmul c, x)) -> (fmul -c, x)
if (N0.getOpcode() == ISD::FMUL) {
ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
- if (CFP1) {
+ if (CFP1)
return DAG.getNode(ISD::FMUL, SDLoc(N), VT,
N0.getOperand(0),
DAG.getNode(ISD::FNEG, SDLoc(N), VT,
N0.getOperand(1)));
- }
}
return SDValue();
@@ -6740,7 +6775,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
// Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading
// constant pool values.
- if (!TLI.isFAbsFree(VT) &&
+ if (!TLI.isFAbsFree(VT) &&
N0.getOpcode() == ISD::BITCAST && N0.getNode()->hasOneUse() &&
N0.getOperand(0).getValueType().isInteger() &&
!N0.getOperand(0).getValueType().isVector()) {
@@ -7165,7 +7200,7 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
// x0 * offset0 + y0 * ptr0 = t0
// knowing that
// x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store)
- //
+ //
// where x0, x1, y0 and y1 in {-1, 1} are given by the types of the
// indexed load/store and the expresion that needs to be re-written.
//
@@ -7287,7 +7322,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
for (SDNode::use_iterator III = Use->use_begin(),
EEE = Use->use_end(); III != EEE; ++III) {
SDNode *UseUse = *III;
- if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI))
+ if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI))
RealUse = true;
}
@@ -8577,7 +8612,9 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
// be converted to a BUILD_VECTOR). Fill in the Ops vector with the
// vector elements.
SmallVector<SDValue, 8> Ops;
- if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
+ // Do not combine these two vectors if the output vector will not replace
+ // the input vector.
+ if (InVec.getOpcode() == ISD::BUILD_VECTOR && InVec.hasOneUse()) {
Ops.append(InVec.getNode()->op_begin(),
InVec.getNode()->op_end());
} else if (InVec.getOpcode() == ISD::UNDEF) {
@@ -8650,7 +8687,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
OrigElt -= NumElem;
}
- EVT IndexTy = N->getOperand(1).getValueType();
+ EVT IndexTy = TLI.getVectorIdxTy();
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), NVT,
InVec, DAG.getConstant(OrigElt, IndexTy));
}
@@ -8789,7 +8826,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
} else {
Load = DAG.getLoad(LVT, SDLoc(N), LN0->getChain(), NewPtr,
LN0->getPointerInfo().getWithOffset(PtrOff),
- LN0->isVolatile(), LN0->isNonTemporal(),
+ LN0->isVolatile(), LN0->isNonTemporal(),
LN0->isInvariant(), Align);
Chain = Load.getValue(1);
if (NVT.bitsLT(LVT))
@@ -9167,7 +9204,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
// The extract index must be constant.
if (!CS)
return SDValue();
-
+
// Check that we are reading from the identity index.
if (CS->getZExtValue() != IdentityIndex)
return SDValue();
@@ -9175,7 +9212,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
if (SingleSource.getNode())
return SingleSource;
-
+
return SDValue();
}
@@ -9605,8 +9642,8 @@ SDValue DAGCombiner::SimplifySelect(SDLoc DL, SDValue N0,
SCC.getOperand(0), SCC.getOperand(1),
SCC.getOperand(4));
AddToWorkList(SETCC.getNode());
- return DAG.getNode(ISD::SELECT, SDLoc(SCC), SCC.getValueType(),
- SCC.getOperand(2), SCC.getOperand(3), SETCC);
+ return DAG.getSelect(SDLoc(SCC), SCC.getValueType(),
+ SCC.getOperand(2), SCC.getOperand(3), SETCC);
}
return SCC;
@@ -9675,10 +9712,10 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
if (LLD->isPredecessorOf(RLD) ||
RLD->isPredecessorOf(LLD))
return false;
- Addr = DAG.getNode(ISD::SELECT, SDLoc(TheSelect),
- LLD->getBasePtr().getValueType(),
- TheSelect->getOperand(0), LLD->getBasePtr(),
- RLD->getBasePtr());
+ Addr = DAG.getSelect(SDLoc(TheSelect),
+ LLD->getBasePtr().getValueType(),
+ TheSelect->getOperand(0), LLD->getBasePtr(),
+ RLD->getBasePtr());
} else { // Otherwise SELECT_CC
SDNode *CondLHS = TheSelect->getOperand(0).getNode();
SDNode *CondRHS = TheSelect->getOperand(1).getNode();
@@ -9812,8 +9849,8 @@ SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
getSetCCResultType(N0.getValueType()),
N0, N1, CC);
AddToWorkList(Cond.getNode());
- SDValue CstOffset = DAG.getNode(ISD::SELECT, DL, Zero.getValueType(),
- Cond, One, Zero);
+ SDValue CstOffset = DAG.getSelect(DL, Zero.getValueType(),
+ Cond, One, Zero);
AddToWorkList(CstOffset.getNode());
CPIdx = DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(), CPIdx,
CstOffset);
@@ -10205,7 +10242,7 @@ bool DAGCombiner::FindAliasInfo(SDNode *N,
/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
/// looking for aliasing nodes and adding them to the Aliases vector.
void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
- SmallVector<SDValue, 8> &Aliases) {
+ SmallVectorImpl<SDValue> &Aliases) {
SmallVector<SDValue, 8> Chains; // List of chains to visit.
SmallPtrSet<SDNode *, 16> Visited; // Visited node set.
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index eb80c64..b4ac948f 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -41,6 +41,7 @@
#define DEBUG_TYPE "isel"
#include "llvm/CodeGen/FastISel.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/CodeGen/Analysis.h"
@@ -75,15 +76,12 @@ STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
void FastISel::startNewBlock() {
LocalValueMap.clear();
+ // Instructions are appended to FuncInfo.MBB. If the basic block already
+ // contains labels or copies, use the last instruction as the last local
+ // value.
EmitStartPt = 0;
-
- // Advance the emit start point past any EH_LABEL instructions.
- MachineBasicBlock::iterator
- I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
- while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
- EmitStartPt = I;
- ++I;
- }
+ if (!FuncInfo.MBB->empty())
+ EmitStartPt = &FuncInfo.MBB->back();
LastLocalValue = EmitStartPt;
}
@@ -92,18 +90,16 @@ bool FastISel::LowerArguments() {
// Fallback to SDISel argument lowering code to deal with sret pointer
// parameter.
return false;
-
+
if (!FastLowerArguments())
return false;
- // Enter non-dead arguments into ValueMap for uses in non-entry BBs.
+ // Enter arguments into ValueMap for uses in non-entry BBs.
for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
E = FuncInfo.Fn->arg_end(); I != E; ++I) {
- if (!I->use_empty()) {
- DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(I);
- assert(VI != LocalValueMap.end() && "Missed an argument?");
- FuncInfo.ValueMap[I] = VI->second;
- }
+ DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(I);
+ assert(VI != LocalValueMap.end() && "Missed an argument?");
+ FuncInfo.ValueMap[I] = VI->second;
}
return true;
}
@@ -601,7 +597,10 @@ bool FastISel::SelectCall(const User *I) {
case Intrinsic::dbg_declare: {
const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
- if (!DIVariable(DI->getVariable()).Verify() ||
+ DIVariable DIVar(DI->getVariable());
+ assert((!DIVar || DIVar.isVariable()) &&
+ "Variable in DbgDeclareInst should be either null or a DIVariable.");
+ if (!DIVar ||
!FuncInfo.MF->getMMI().hasDebugInfo()) {
DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
@@ -613,16 +612,16 @@ bool FastISel::SelectCall(const User *I) {
return true;
}
- unsigned Reg = 0;
unsigned Offset = 0;
- if (const Argument *Arg = dyn_cast<Argument>(Address)) {
+ Optional<MachineOperand> Op;
+ if (const Argument *Arg = dyn_cast<Argument>(Address))
// Some arguments' frame index is recorded during argument lowering.
Offset = FuncInfo.getArgumentFrameIndex(Arg);
- if (Offset)
- Reg = TRI.getFrameRegister(*FuncInfo.MF);
- }
- if (!Reg)
- Reg = lookUpRegForValue(Address);
+ if (Offset)
+ Op = MachineOperand::CreateFI(Offset);
+ if (!Op)
+ if (unsigned Reg = lookUpRegForValue(Address))
+ Op = MachineOperand::CreateReg(Reg, false);
// If we have a VLA that has a "use" in a metadata node that's then used
// here but it has no other uses, then we have a problem. E.g.,
@@ -635,16 +634,29 @@ bool FastISel::SelectCall(const User *I) {
// If we assign 'a' a vreg and fast isel later on has to use the selection
// DAG isel, it will want to copy the value to the vreg. However, there are
// no uses, which goes counter to what selection DAG isel expects.
- if (!Reg && !Address->use_empty() && isa<Instruction>(Address) &&
+ if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
(!isa<AllocaInst>(Address) ||
!FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
- Reg = FuncInfo.InitializeRegForValue(Address);
-
- if (Reg)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(TargetOpcode::DBG_VALUE))
- .addReg(Reg, RegState::Debug).addImm(Offset)
- .addMetadata(DI->getVariable());
+ Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
+ false);
+
+ if (Op)
+ if (Op->isReg()) {
+ // Set the indirect flag if the type and the DIVariable's
+ // indirect field are in disagreement: Indirectly-addressed
+ // variables that are nonpointer types should be marked as
+ // indirect, and VLAs should be marked as indirect eventhough
+ // they are a pointer type.
+ bool IsIndirect = DI->getAddress()->getType()->isPointerTy()
+ ^ DIVar.isIndirect();
+ Op->setIsDebug(true);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::DBG_VALUE),
+ IsIndirect, Op->getReg(), Offset, DI->getVariable());
+ } else
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::DBG_VALUE)).addOperand(*Op).addImm(0)
+ .addMetadata(DI->getVariable());
else
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
@@ -676,9 +688,9 @@ bool FastISel::SelectCall(const User *I) {
.addFPImm(CF).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
} else if (unsigned Reg = lookUpRegForValue(V)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
- .addMetadata(DI->getVariable());
+ bool IsIndirect = DI->getOffset() != 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect,
+ Reg, DI->getOffset(), DI->getVariable());
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 86e188a..4309dc1 100644
--- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -55,15 +55,12 @@ static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
return false;
}
-FunctionLoweringInfo::FunctionLoweringInfo(const TargetMachine &TM)
- : TM(TM), TLI(0) {
-}
-
void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
+ const TargetLowering *TLI = TM.getTargetLowering();
+
Fn = &fn;
MF = &mf;
RegInfo = &MF->getRegInfo();
- TLI = TM.getTargetLowering();
// Check whether the function can return without sret-demotion.
SmallVector<ISD::OutputArg, 4> Outs;
@@ -115,8 +112,11 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
// in a predictable order.
if (const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(I)) {
MachineModuleInfo &MMI = MF->getMMI();
+ DIVariable DIVar(DI->getVariable());
+ assert((!DIVar || DIVar.isVariable()) &&
+ "Variable in DbgDeclareInst should be either null or a DIVariable.");
if (MMI.hasDebugInfo() &&
- DIVariable(DI->getVariable()).Verify() &&
+ DIVar &&
!DI->getDebugLoc().isUnknown()) {
// Don't handle byval struct arguments or VLAs, for example.
// Non-byval arguments are handled here (they refer to the stack
@@ -209,7 +209,8 @@ void FunctionLoweringInfo::clear() {
/// CreateReg - Allocate a single virtual register for the given type.
unsigned FunctionLoweringInfo::CreateReg(MVT VT) {
- return RegInfo->createVirtualRegister(TLI->getRegClassFor(VT));
+ return RegInfo->
+ createVirtualRegister(TM.getTargetLowering()->getRegClassFor(VT));
}
/// CreateRegs - Allocate the appropriate number of virtual registers of
@@ -220,6 +221,8 @@ unsigned FunctionLoweringInfo::CreateReg(MVT VT) {
/// will assign registers for each member or element.
///
unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
+ const TargetLowering *TLI = TM.getTargetLowering();
+
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*TLI, Ty, ValueVTs);
@@ -267,6 +270,8 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
if (!Ty->isIntegerTy() || Ty->isVectorTy())
return;
+ const TargetLowering *TLI = TM.getTargetLowering();
+
SmallVector<EVT, 1> ValueVTs;
ComputeValueVTs(*TLI, Ty, ValueVTs);
assert(ValueVTs.size() == 1 &&
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 3b1abd7..e107276 100644
--- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -639,8 +639,8 @@ InstrEmitter::EmitDbgValue(SDDbgValue *SD,
if (SD->getKind() == SDDbgValue::FRAMEIX) {
// Stack address; this needs to be lowered in target-dependent fashion.
// EmitTargetCodeForFrameDebugValue is responsible for allocation.
- unsigned FrameIx = SD->getFrameIx();
- return TII->emitFrameIndexDebugValue(*MF, FrameIx, Offset, MDPtr, DL);
+ return BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE))
+ .addFrameIndex(SD->getFrameIx()).addImm(Offset).addMetadata(MDPtr);
}
// Otherwise, we're going to create an instruction here.
const MCInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
@@ -678,7 +678,13 @@ InstrEmitter::EmitDbgValue(SDDbgValue *SD,
MIB.addReg(0U);
}
- MIB.addImm(Offset).addMetadata(MDPtr);
+ if (Offset != 0) // Indirect addressing.
+ MIB.addImm(Offset);
+ else
+ MIB.addReg(0U, RegState::Debug);
+
+ MIB.addMetadata(MDPtr);
+
return &*MIB;
}
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.h b/lib/CodeGen/SelectionDAG/InstrEmitter.h
index a9c2203..920dda8 100644
--- a/lib/CodeGen/SelectionDAG/InstrEmitter.h
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.h
@@ -1,4 +1,4 @@
-//===---- InstrEmitter.h - Emit MachineInstrs for the SelectionDAG class ---==//
+//===- InstrEmitter.h - Emit MachineInstrs for the SelectionDAG -*- C++ -*--==//
//
// The LLVM Compiler Infrastructure
//
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 224fa5f..bd844e5 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -610,7 +610,7 @@ PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx,
false, false, 0);
// Load the updated vector.
return DAG.getLoad(VT, dl, Ch, StackPtr,
- MachinePointerInfo::getFixedStack(SPFI), false, false,
+ MachinePointerInfo::getFixedStack(SPFI), false, false,
false, 0);
}
@@ -1493,7 +1493,7 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
StoreChain = DAG.getEntryNode();
// Result is a load from the stack slot.
- return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo,
+ return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo,
false, false, false, 0);
}
@@ -1553,9 +1553,9 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1);
// Select between the nabs and abs value based on the sign bit of
// the input.
- return DAG.getNode(ISD::SELECT, dl, AbsVal.getValueType(), SignBit,
- DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal),
- AbsVal);
+ return DAG.getSelect(dl, AbsVal.getValueType(), SignBit,
+ DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal),
+ AbsVal);
}
void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
@@ -1614,12 +1614,12 @@ void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
unsigned Opc = 0;
switch (CCCode) {
default: llvm_unreachable("Don't know how to expand this condition!");
- case ISD::SETO:
+ case ISD::SETO:
assert(TLI.getCondCodeAction(ISD::SETOEQ, OpVT)
== TargetLowering::Legal
&& "If SETO is expanded, SETOEQ must be legal!");
CC1 = ISD::SETOEQ; CC2 = ISD::SETOEQ; Opc = ISD::AND; break;
- case ISD::SETUO:
+ case ISD::SETUO:
assert(TLI.getCondCodeAction(ISD::SETUNE, OpVT)
== TargetLowering::Legal
&& "If SETUO is expanded, SETUNE must be legal!");
@@ -1629,12 +1629,12 @@ void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
case ISD::SETOGE:
case ISD::SETOLT:
case ISD::SETOLE:
- case ISD::SETONE:
- case ISD::SETUEQ:
- case ISD::SETUNE:
- case ISD::SETUGT:
- case ISD::SETUGE:
- case ISD::SETULT:
+ case ISD::SETONE:
+ case ISD::SETUEQ:
+ case ISD::SETUNE:
+ case ISD::SETUGT:
+ case ISD::SETUGE:
+ case ISD::SETULT:
case ISD::SETULE:
// If we are floating point, assign and break, otherwise fall through.
if (!OpVT.isInteger()) {
@@ -1663,7 +1663,7 @@ void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
CC = SDValue();
return;
}
-
+
SDValue SetCC1, SetCC2;
if (CCCode != ISD::SETO && CCCode != ISD::SETUO) {
// If we aren't the ordered or unorder operation,
@@ -2136,7 +2136,7 @@ static bool canCombineSinCosLibcall(SDNode *Node, const TargetLowering &TLI,
static bool useSinCos(SDNode *Node) {
unsigned OtherOpcode = Node->getOpcode() == ISD::FSIN
? ISD::FCOS : ISD::FSIN;
-
+
SDValue Op0 = Node->getOperand(0);
for (SDNode::use_iterator UI = Op0.getNode()->use_begin(),
UE = Op0.getNode()->use_end(); UI != UE; ++UI) {
@@ -2164,25 +2164,25 @@ SelectionDAGLegalize::ExpandSinCosLibCall(SDNode *Node,
case MVT::f128: LC = RTLIB::SINCOS_F128; break;
case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break;
}
-
+
// The input chain to this libcall is the entry node of the function.
// Legalizing the call will automatically add the previous call to the
// dependence.
SDValue InChain = DAG.getEntryNode();
-
+
EVT RetVT = Node->getValueType(0);
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
-
+
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
-
+
// Pass the argument.
Entry.Node = Node->getOperand(0);
Entry.Ty = RetTy;
Entry.isSExt = false;
Entry.isZExt = false;
Args.push_back(Entry);
-
+
// Pass the return address of sin.
SDValue SinPtr = DAG.CreateStackTemporary(RetVT);
Entry.Node = SinPtr;
@@ -2190,7 +2190,7 @@ SelectionDAGLegalize::ExpandSinCosLibCall(SDNode *Node,
Entry.isSExt = false;
Entry.isZExt = false;
Args.push_back(Entry);
-
+
// Also pass the return address of the cos.
SDValue CosPtr = DAG.CreateStackTemporary(RetVT);
Entry.Node = CosPtr;
@@ -2198,10 +2198,10 @@ SelectionDAGLegalize::ExpandSinCosLibCall(SDNode *Node,
Entry.isSExt = false;
Entry.isZExt = false;
Args.push_back(Entry);
-
+
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy());
-
+
SDLoc dl(Node);
TargetLowering::
CallLoweringInfo CLI(InChain, Type::getVoidTy(*DAG.getContext()),
@@ -2335,7 +2335,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
//pseudo-op, or, even better, for whole-function isel.
SDValue SignBitTest = DAG.getSetCC(dl, getSetCCResultType(MVT::i64),
Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT);
- return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast);
+ return DAG.getSelect(dl, MVT::f32, SignBitTest, Slow, Fast);
}
// Otherwise, implement the fully general conversion.
@@ -2348,11 +2348,11 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
DAG.getConstant(UINT64_C(0x7ff), MVT::i64));
SDValue Ne = DAG.getSetCC(dl, getSetCCResultType(MVT::i64),
And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE);
- SDValue Sel = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ne, Or, Op0);
+ SDValue Sel = DAG.getSelect(dl, MVT::i64, Ne, Or, Op0);
SDValue Ge = DAG.getSetCC(dl, getSetCCResultType(MVT::i64),
Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64),
ISD::SETUGE);
- SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0);
+ SDValue Sel2 = DAG.getSelect(dl, MVT::i64, Ge, Sel, Op0);
EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType());
SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2,
@@ -2375,7 +2375,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
Op0, DAG.getConstant(0, Op0.getValueType()),
ISD::SETLT);
SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4);
- SDValue CstOffset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(),
+ SDValue CstOffset = DAG.getSelect(dl, Zero.getValueType(),
SignSet, Four, Zero);
// If the sign bit of the integer is set, the large number will be treated
@@ -2928,7 +2928,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Node->getOperand(0), Tmp1));
False = DAG.getNode(ISD::XOR, dl, NVT, False,
DAG.getConstant(x, NVT));
- Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, True, False);
+ Tmp1 = DAG.getSelect(dl, NVT, Tmp2, True, False);
Results.push_back(Tmp1);
break;
}
@@ -2940,7 +2940,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
unsigned Align = Node->getConstantOperandVal(3);
SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2,
- MachinePointerInfo(V),
+ MachinePointerInfo(V),
false, false, false, 0);
SDValue VAList = VAListLoad;
@@ -3031,7 +3031,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// cast operands to v8i32 and re-build the mask.
// Calculate new VT, the size of the new VT should be equal to original.
- EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltVT,
+ EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltVT,
VT.getSizeInBits()/NewEltVT.getSizeInBits());
assert(NewVT.bitsEq(VT));
@@ -3071,11 +3071,12 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
if (Idx < NumElems)
Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
Op0,
- DAG.getIntPtrConstant(Idx)));
+ DAG.getConstant(Idx, TLI.getVectorIdxTy())));
else
Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
Op1,
- DAG.getIntPtrConstant(Idx - NumElems)));
+ DAG.getConstant(Idx - NumElems,
+ TLI.getVectorIdxTy())));
}
Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
@@ -3140,7 +3141,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Tmp2 = DAG.getSetCC(dl, getSetCCResultType(Tmp1.getValueType()),
Tmp1, Tmp2, ISD::SETUGT);
Tmp3 = DAG.getNode(ISD::FNEG, dl, VT, Tmp1);
- Tmp1 = DAG.getNode(ISD::SELECT, dl, VT, Tmp2, Tmp1, Tmp3);
+ Tmp1 = DAG.getSelect(dl, VT, Tmp2, Tmp1, Tmp3);
Results.push_back(Tmp1);
break;
}
@@ -3269,22 +3270,6 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Results.push_back(ExpandConstantFP(CFP, true));
break;
}
- case ISD::EHSELECTION: {
- unsigned Reg = TLI.getExceptionSelectorRegister();
- assert(Reg && "Can't expand to unknown register!");
- Results.push_back(DAG.getCopyFromReg(Node->getOperand(1), dl, Reg,
- Node->getValueType(0)));
- Results.push_back(Results[0].getValue(1));
- break;
- }
- case ISD::EXCEPTIONADDR: {
- unsigned Reg = TLI.getExceptionPointerRegister();
- assert(Reg && "Can't expand to unknown register!");
- Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, Reg,
- Node->getValueType(0)));
- Results.push_back(Results[0].getValue(1));
- break;
- }
case ISD::FSUB: {
EVT VT = Node->getValueType(0);
assert(TLI.isOperationLegalOrCustom(ISD::FADD, VT) &&
@@ -3704,10 +3689,12 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
for (unsigned Idx = 0; Idx < NumElem; Idx++) {
SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
VT.getScalarType(),
- Node->getOperand(0), DAG.getIntPtrConstant(Idx));
+ Node->getOperand(0), DAG.getConstant(Idx,
+ TLI.getVectorIdxTy()));
SDValue Sh = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
VT.getScalarType(),
- Node->getOperand(1), DAG.getIntPtrConstant(Idx));
+ Node->getOperand(1), DAG.getConstant(Idx,
+ TLI.getVectorIdxTy()));
Scalars.push_back(DAG.getNode(Node->getOpcode(), dl,
VT.getScalarType(), Ex, Sh));
}
@@ -3762,8 +3749,8 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
Tmp2 = DAG.getSetCC(dl, getSetCCResultType(NVT),
Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT),
ISD::SETEQ);
- Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2,
- DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1);
+ Tmp1 = DAG.getSelect(dl, NVT, Tmp2,
+ DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1);
} else if (Node->getOpcode() == ISD::CTLZ ||
Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF) {
// Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT))
@@ -3858,7 +3845,7 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1));
Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2));
// Perform the larger operation, then round down.
- Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp1, Tmp2, Tmp3);
+ Tmp1 = DAG.getSelect(dl, NVT, Tmp1, Tmp2, Tmp3);
if (TruncOp != ISD::FP_ROUND)
Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1);
else
diff --git a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index f6df211..cea0b02 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -503,7 +503,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) {
if (L->getExtensionType() == ISD::NON_EXTLOAD) {
NewL = DAG.getLoad(L->getAddressingMode(), L->getExtensionType(),
NVT, dl, L->getChain(), L->getBasePtr(), L->getOffset(),
- L->getPointerInfo(), NVT, L->isVolatile(),
+ L->getPointerInfo(), NVT, L->isVolatile(),
L->isNonTemporal(), false, L->getAlignment());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
@@ -526,8 +526,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) {
SDValue DAGTypeLegalizer::SoftenFloatRes_SELECT(SDNode *N) {
SDValue LHS = GetSoftenedFloat(N->getOperand(1));
SDValue RHS = GetSoftenedFloat(N->getOperand(2));
- return DAG.getNode(ISD::SELECT, SDLoc(N),
- LHS.getValueType(), N->getOperand(0),LHS,RHS);
+ return DAG.getSelect(SDLoc(N),
+ LHS.getValueType(), N->getOperand(0), LHS, RHS);
}
SDValue DAGTypeLegalizer::SoftenFloatRes_SELECT_CC(SDNode *N) {
@@ -855,9 +855,9 @@ void DAGTypeLegalizer::ExpandFloatRes_FABS(SDNode *N, SDValue &Lo,
GetExpandedFloat(N->getOperand(0), Lo, Tmp);
Hi = DAG.getNode(ISD::FABS, dl, Tmp.getValueType(), Tmp);
// Lo = Hi==fabs(Hi) ? Lo : -Lo;
- Lo = DAG.getNode(ISD::SELECT_CC, dl, Lo.getValueType(), Tmp, Hi, Lo,
+ Lo = DAG.getSelectCC(dl, Tmp, Hi, Lo,
DAG.getNode(ISD::FNEG, dl, Lo.getValueType(), Lo),
- DAG.getCondCode(ISD::SETEQ));
+ ISD::SETEQ);
}
void DAGTypeLegalizer::ExpandFloatRes_FADD(SDNode *N, SDValue &Lo,
@@ -1216,8 +1216,8 @@ void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
DAG.getConstantFP(APFloat(APFloat::PPCDoubleDouble,
APInt(128, Parts)),
MVT::ppcf128));
- Lo = DAG.getNode(ISD::SELECT_CC, dl, VT, Src, DAG.getConstant(0, SrcVT),
- Lo, Hi, DAG.getCondCode(ISD::SETLT));
+ Lo = DAG.getSelectCC(dl, Src, DAG.getConstant(0, SrcVT),
+ Lo, Hi, ISD::SETLT);
GetPairElements(Lo, Lo, Hi);
}
@@ -1370,17 +1370,17 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_UINT(SDNode *N) {
SDValue Tmp = DAG.getConstantFP(APF, MVT::ppcf128);
// X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
// FIXME: generated code sucks.
- return DAG.getNode(ISD::SELECT_CC, dl, MVT::i32, N->getOperand(0), Tmp,
- DAG.getNode(ISD::ADD, dl, MVT::i32,
- DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
- DAG.getNode(ISD::FSUB, dl,
- MVT::ppcf128,
- N->getOperand(0),
- Tmp)),
- DAG.getConstant(0x80000000, MVT::i32)),
- DAG.getNode(ISD::FP_TO_SINT, dl,
- MVT::i32, N->getOperand(0)),
- DAG.getCondCode(ISD::SETGE));
+ return DAG.getSelectCC(dl, N->getOperand(0), Tmp,
+ DAG.getNode(ISD::ADD, dl, MVT::i32,
+ DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
+ DAG.getNode(ISD::FSUB, dl,
+ MVT::ppcf128,
+ N->getOperand(0),
+ Tmp)),
+ DAG.getConstant(0x80000000, MVT::i32)),
+ DAG.getNode(ISD::FP_TO_SINT, dl,
+ MVT::i32, N->getOperand(0)),
+ ISD::SETGE);
}
RTLIB::Libcall LC = RTLIB::getFPTOUINT(N->getOperand(0).getValueType(), RVT);
diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index c281553..ff8f1f9 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -483,8 +483,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SDIV(SDNode *N) {
SDValue DAGTypeLegalizer::PromoteIntRes_SELECT(SDNode *N) {
SDValue LHS = GetPromotedInteger(N->getOperand(1));
SDValue RHS = GetPromotedInteger(N->getOperand(2));
- return DAG.getNode(ISD::SELECT, SDLoc(N),
- LHS.getValueType(), N->getOperand(0),LHS,RHS);
+ return DAG.getSelect(SDLoc(N),
+ LHS.getValueType(), N->getOperand(0), LHS, RHS);
}
SDValue DAGTypeLegalizer::PromoteIntRes_VSELECT(SDNode *N) {
@@ -966,7 +966,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N,
assert(OpNo == 2 && "Different operand and result vector types?");
// Promote the index.
- SDValue Idx = ZExtPromotedInteger(N->getOperand(2));
+ SDValue Idx = DAG.getZExtOrTrunc(N->getOperand(2), SDLoc(N),
+ TLI.getVectorIdxTy());
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
N->getOperand(1), Idx), 0);
}
@@ -1138,7 +1139,8 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
case ISD::ATOMIC_LOAD_UMAX:
- case ISD::ATOMIC_SWAP: {
+ case ISD::ATOMIC_SWAP:
+ case ISD::ATOMIC_CMP_SWAP: {
std::pair<SDValue, SDValue> Tmp = ExpandAtomic(N);
SplitInteger(Tmp.first, Lo, Hi);
ReplaceValueWith(SDValue(N, 1), Tmp.second);
@@ -1478,8 +1480,8 @@ ExpandShiftWithUnknownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
LoL = DAG.getConstant(0, NVT); // Lo part is zero.
HiL = DAG.getNode(ISD::SHL, dl, NVT, InL, AmtExcess); // Hi from Lo part.
- Lo = DAG.getNode(ISD::SELECT, dl, NVT, isShort, LoS, LoL);
- Hi = DAG.getNode(ISD::SELECT, dl, NVT, isShort, HiS, HiL);
+ Lo = DAG.getSelect(dl, NVT, isShort, LoS, LoL);
+ Hi = DAG.getSelect(dl, NVT, isShort, HiS, HiL);
return true;
case ISD::SRL:
// Short: ShAmt < NVTBits
@@ -1494,8 +1496,8 @@ ExpandShiftWithUnknownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
HiL = DAG.getConstant(0, NVT); // Hi part is zero.
LoL = DAG.getNode(ISD::SRL, dl, NVT, InH, AmtExcess); // Lo from Hi part.
- Lo = DAG.getNode(ISD::SELECT, dl, NVT, isShort, LoS, LoL);
- Hi = DAG.getNode(ISD::SELECT, dl, NVT, isShort, HiS, HiL);
+ Lo = DAG.getSelect(dl, NVT, isShort, LoS, LoL);
+ Hi = DAG.getSelect(dl, NVT, isShort, HiS, HiL);
return true;
case ISD::SRA:
// Short: ShAmt < NVTBits
@@ -1511,8 +1513,8 @@ ExpandShiftWithUnknownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
DAG.getConstant(NVTBits-1, ShTy));
LoL = DAG.getNode(ISD::SRA, dl, NVT, InH, AmtExcess); // Lo from Hi part.
- Lo = DAG.getNode(ISD::SELECT, dl, NVT, isShort, LoS, LoL);
- Hi = DAG.getNode(ISD::SELECT, dl, NVT, isShort, HiS, HiL);
+ Lo = DAG.getSelect(dl, NVT, isShort, LoS, LoL);
+ Hi = DAG.getSelect(dl, NVT, isShort, HiS, HiL);
return true;
}
}
@@ -1558,13 +1560,13 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
Hi = DAG.getNode(ISD::ADD, dl, NVT, HiOps, 2);
SDValue Cmp1 = DAG.getSetCC(dl, getSetCCResultType(NVT), Lo, LoOps[0],
ISD::SETULT);
- SDValue Carry1 = DAG.getNode(ISD::SELECT, dl, NVT, Cmp1,
- DAG.getConstant(1, NVT),
- DAG.getConstant(0, NVT));
+ SDValue Carry1 = DAG.getSelect(dl, NVT, Cmp1,
+ DAG.getConstant(1, NVT),
+ DAG.getConstant(0, NVT));
SDValue Cmp2 = DAG.getSetCC(dl, getSetCCResultType(NVT), Lo, LoOps[1],
ISD::SETULT);
- SDValue Carry2 = DAG.getNode(ISD::SELECT, dl, NVT, Cmp2,
- DAG.getConstant(1, NVT), Carry1);
+ SDValue Carry2 = DAG.getSelect(dl, NVT, Cmp2,
+ DAG.getConstant(1, NVT), Carry1);
Hi = DAG.getNode(ISD::ADD, dl, NVT, Hi, Carry2);
} else {
Lo = DAG.getNode(ISD::SUB, dl, NVT, LoOps, 2);
@@ -1572,9 +1574,9 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
SDValue Cmp =
DAG.getSetCC(dl, getSetCCResultType(LoOps[0].getValueType()),
LoOps[0], LoOps[1], ISD::SETULT);
- SDValue Borrow = DAG.getNode(ISD::SELECT, dl, NVT, Cmp,
- DAG.getConstant(1, NVT),
- DAG.getConstant(0, NVT));
+ SDValue Borrow = DAG.getSelect(dl, NVT, Cmp,
+ DAG.getConstant(1, NVT),
+ DAG.getConstant(0, NVT));
Hi = DAG.getNode(ISD::SUB, dl, NVT, Hi, Borrow);
}
}
@@ -1725,9 +1727,9 @@ void DAGTypeLegalizer::ExpandIntRes_CTLZ(SDNode *N,
SDValue LoLZ = DAG.getNode(N->getOpcode(), dl, NVT, Lo);
SDValue HiLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, NVT, Hi);
- Lo = DAG.getNode(ISD::SELECT, dl, NVT, HiNotZero, HiLZ,
- DAG.getNode(ISD::ADD, dl, NVT, LoLZ,
- DAG.getConstant(NVT.getSizeInBits(), NVT)));
+ Lo = DAG.getSelect(dl, NVT, HiNotZero, HiLZ,
+ DAG.getNode(ISD::ADD, dl, NVT, LoLZ,
+ DAG.getConstant(NVT.getSizeInBits(), NVT)));
Hi = DAG.getConstant(0, NVT);
}
@@ -1755,9 +1757,9 @@ void DAGTypeLegalizer::ExpandIntRes_CTTZ(SDNode *N,
SDValue LoLZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, NVT, Lo);
SDValue HiLZ = DAG.getNode(N->getOpcode(), dl, NVT, Hi);
- Lo = DAG.getNode(ISD::SELECT, dl, NVT, LoNotZero, LoLZ,
- DAG.getNode(ISD::ADD, dl, NVT, HiLZ,
- DAG.getConstant(NVT.getSizeInBits(), NVT)));
+ Lo = DAG.getSelect(dl, NVT, LoNotZero, LoLZ,
+ DAG.getNode(ISD::ADD, dl, NVT, HiLZ,
+ DAG.getConstant(NVT.getSizeInBits(), NVT)));
Hi = DAG.getConstant(0, NVT);
}
@@ -2289,14 +2291,14 @@ void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
// make sure we aren't using 0.
SDValue isZero = DAG.getSetCC(dl, getSetCCResultType(VT),
RHS, DAG.getConstant(0, VT), ISD::SETEQ);
- SDValue NotZero = DAG.getNode(ISD::SELECT, dl, VT, isZero,
- DAG.getConstant(1, VT), RHS);
+ SDValue NotZero = DAG.getSelect(dl, VT, isZero,
+ DAG.getConstant(1, VT), RHS);
SDValue DIV = DAG.getNode(ISD::UDIV, dl, VT, MUL, NotZero);
SDValue Overflow = DAG.getSetCC(dl, N->getValueType(1), DIV, LHS,
ISD::SETNE);
- Overflow = DAG.getNode(ISD::SELECT, dl, N->getValueType(1), isZero,
- DAG.getConstant(0, N->getValueType(1)),
- Overflow);
+ Overflow = DAG.getSelect(dl, N->getValueType(1), isZero,
+ DAG.getConstant(0, N->getValueType(1)),
+ Overflow);
ReplaceValueWith(SDValue(N, 1), Overflow);
return;
}
@@ -2304,7 +2306,7 @@ void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
Type *RetTy = VT.getTypeForEVT(*DAG.getContext());
EVT PtrVT = TLI.getPointerTy();
Type *PtrTy = PtrVT.getTypeForEVT(*DAG.getContext());
-
+
// Replace this with a libcall that will check overflow.
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == MVT::i32)
@@ -2601,8 +2603,8 @@ void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDValue &NewLHS,
if (!NewLHS.getNode())
NewLHS = DAG.getSetCC(dl, getSetCCResultType(LHSHi.getValueType()),
LHSHi, RHSHi, ISD::SETEQ);
- NewLHS = DAG.getNode(ISD::SELECT, dl, Tmp1.getValueType(),
- NewLHS, Tmp1, Tmp2);
+ NewLHS = DAG.getSelect(dl, Tmp1.getValueType(),
+ NewLHS, Tmp1, Tmp2);
NewRHS = SDValue();
}
@@ -2830,8 +2832,8 @@ SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
SDValue Zero = DAG.getIntPtrConstant(0);
SDValue Four = DAG.getIntPtrConstant(4);
if (TLI.isBigEndian()) std::swap(Zero, Four);
- SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
- Zero, Four);
+ SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet,
+ Zero, Four);
unsigned Alignment = cast<ConstantPoolSDNode>(FudgePtr)->getAlignment();
FudgePtr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), FudgePtr, Offset);
Alignment = std::min(Alignment, 4u);
@@ -2885,7 +2887,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N) {
// Extract the element from the original vector.
SDValue Index = DAG.getNode(ISD::ADD, dl, BaseIdx.getValueType(),
- BaseIdx, DAG.getIntPtrConstant(i));
+ BaseIdx, DAG.getConstant(i, BaseIdx.getValueType()));
SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
InVT.getVectorElementType(), N->getOperand(0), Index);
@@ -2929,7 +2931,15 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BUILD_VECTOR(SDNode *N) {
SmallVector<SDValue, 8> Ops;
Ops.reserve(NumElems);
for (unsigned i = 0; i != NumElems; ++i) {
- SDValue Op = DAG.getNode(ISD::ANY_EXTEND, dl, NOutVTElem, N->getOperand(i));
+ SDValue Op;
+ // BUILD_VECTOR integer operand types are allowed to be larger than the
+ // result's element type. This may still be true after the promotion. For
+ // example, we might be promoting (<v?i1> = BV <i32>, <i32>, ...) to
+ // (v?i16 = BV <i32>, <i32>, ...), and we can't any_extend <i32> to <i16>.
+ if (N->getOperand(i).getValueType().bitsLT(NOutVTElem))
+ Op = DAG.getNode(ISD::ANY_EXTEND, dl, NOutVTElem, N->getOperand(i));
+ else
+ Op = N->getOperand(i);
Ops.push_back(Op);
}
@@ -2975,7 +2985,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_CONCAT_VECTORS(SDNode *N) {
SDValue Op = N->getOperand(i);
for (unsigned j = 0; j < NumElem; ++j) {
SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- InElemTy, Op, DAG.getIntPtrConstant(j));
+ InElemTy, Op, DAG.getConstant(j,
+ TLI.getVectorIdxTy()));
Ops[i * NumElem + j] = DAG.getNode(ISD::ANY_EXTEND, dl, OutElemTy, Ext);
}
}
@@ -3002,7 +3013,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_INSERT_VECTOR_ELT(SDNode *N) {
SDValue DAGTypeLegalizer::PromoteIntOp_EXTRACT_VECTOR_ELT(SDNode *N) {
SDLoc dl(N);
SDValue V0 = GetPromotedInteger(N->getOperand(0));
- SDValue V1 = N->getOperand(1);
+ SDValue V1 = DAG.getZExtOrTrunc(N->getOperand(1), dl, TLI.getVectorIdxTy());
SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
V0->getValueType(0).getScalarType(), V0, V1);
@@ -3030,7 +3041,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_CONCAT_VECTORS(SDNode *N) {
for (unsigned i=0; i<NumElem; ++i) {
// Extract element from incoming vector
SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SclrTy,
- Incoming, DAG.getIntPtrConstant(i));
+ Incoming, DAG.getConstant(i, TLI.getVectorIdxTy()));
SDValue Tr = DAG.getNode(ISD::TRUNCATE, dl, RetSclrTy, Ex);
NewOps.push_back(Tr);
}
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index ef79662..fd770d1 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -615,7 +615,10 @@ void DAGTypeLegalizer::RemapValue(SDValue &N) {
// replaced with other values.
RemapValue(I->second);
N = I->second;
- assert(N.getNode()->getNodeId() != NewNode && "Mapped to new node!");
+
+ // Note that it is possible to have N.getNode()->getNodeId() == NewNode at
+ // this point because it is possible for a node to be put in the map before
+ // being processed.
}
}
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index e2597d6..63e9af3 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -653,7 +653,7 @@ private:
/// loads to load a vector with a resulting wider type. It takes
/// LdChain: list of chains for the load to be generated.
/// Ld: load to widen
- SDValue GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
+ SDValue GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
LoadSDNode *LD);
/// GenWidenVectorExtLoads - Helper function to generate a set of extension
@@ -661,20 +661,20 @@ private:
/// LdChain: list of chains for the load to be generated.
/// Ld: load to widen
/// ExtType: extension element type
- SDValue GenWidenVectorExtLoads(SmallVector<SDValue, 16>& LdChain,
+ SDValue GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
LoadSDNode *LD, ISD::LoadExtType ExtType);
/// Helper genWidenVectorStores - Helper function to generate a set of
/// stores to store a widen vector into non widen memory
/// StChain: list of chains for the stores we have generated
/// ST: store of a widen value
- void GenWidenVectorStores(SmallVector<SDValue, 16>& StChain, StoreSDNode *ST);
+ void GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain, StoreSDNode *ST);
/// Helper genWidenVectorTruncStores - Helper function to generate a set of
/// stores to store a truncate widen vector into non widen memory
/// StChain: list of chains for the stores we have generated
/// ST: store of a widen value
- void GenWidenVectorTruncStores(SmallVector<SDValue, 16>& StChain,
+ void GenWidenVectorTruncStores(SmallVectorImpl<SDValue> &StChain,
StoreSDNode *ST);
/// Modifies a vector input (widen or narrows) to a vector of NVT. The
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index 1ede3dc..96f6143 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -80,9 +80,10 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
InVT.getVectorNumElements()/2);
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, InOp,
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, InOp,
- DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
+ DAG.getConstant(InNVT.getVectorNumElements(),
+ TLI.getVectorIdxTy()));
if (TLI.isBigEndian())
std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
@@ -115,7 +116,8 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
SmallVector<SDValue, 8> Vals;
for (unsigned i = 0; i < NumElems; ++i)
Vals.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ElemVT,
- CastInOp, DAG.getIntPtrConstant(i)));
+ CastInOp, DAG.getConstant(i,
+ TLI.getVectorIdxTy())));
// Build Lo, Hi pair by pairing extracted elements if needed.
unsigned Slot = 0;
@@ -161,7 +163,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
false, false, 0);
// Load the first half from the stack slot.
- Lo = DAG.getLoad(NOutVT, dl, Store, StackPtr, PtrInfo,
+ Lo = DAG.getLoad(NOutVT, dl, Store, StackPtr, PtrInfo,
false, false, false, 0);
// Increment the pointer to the other half.
@@ -227,10 +229,6 @@ void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo,
// Extract the elements at 2 * Idx and 2 * Idx + 1 from the new vector.
SDValue Idx = N->getOperand(1);
- // Make sure the type of Idx is big enough to hold the new values.
- if (Idx.getValueType().bitsLT(TLI.getPointerTy()))
- Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx);
-
Idx = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, Idx);
Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, NewVec, Idx);
@@ -406,7 +404,8 @@ SDValue DAGTypeLegalizer::ExpandOp_INSERT_VECTOR_ELT(SDNode *N) {
Idx = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, Idx);
NewVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, NewVec, Lo, Idx);
Idx = DAG.getNode(ISD::ADD, dl,
- Idx.getValueType(), Idx, DAG.getIntPtrConstant(1));
+ Idx.getValueType(), Idx,
+ DAG.getConstant(1, Idx.getValueType()));
NewVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, NewVec, Hi, Idx);
// Convert the new vector to the old vector type.
@@ -495,9 +494,9 @@ void DAGTypeLegalizer::SplitRes_SELECT(SDNode *N, SDValue &Lo,
unsigned NumElements = Cond.getValueType().getVectorNumElements();
EVT VCondTy = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElements / 2);
CL = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VCondTy, Cond,
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
CH = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VCondTy, Cond,
- DAG.getIntPtrConstant(NumElements / 2));
+ DAG.getConstant(NumElements / 2, TLI.getVectorIdxTy()));
}
Lo = DAG.getNode(N->getOpcode(), dl, LL.getValueType(), CL, LL, RL);
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 071a0b8..bbe11b8 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -551,7 +551,7 @@ SDValue VectorLegalizer::ExpandStore(SDValue Op) {
SmallVector<SDValue, 8> Stores;
for (unsigned Idx = 0; Idx < NumElem; Idx++) {
SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- RegSclVT, Value, DAG.getIntPtrConstant(Idx));
+ RegSclVT, Value, DAG.getConstant(Idx, TLI.getVectorIdxTy()));
// This scalar TruncStore may be illegal, but we legalize it later.
SDValue Store = DAG.getTruncStore(Chain, dl, Ex, BasePTR,
@@ -572,7 +572,7 @@ SDValue VectorLegalizer::ExpandStore(SDValue Op) {
SDValue VectorLegalizer::ExpandSELECT(SDValue Op) {
// Lower a select instruction where the condition is a scalar and the
// operands are vectors. Lower this select to VSELECT and implement it
- // using XOR AND OR. The selector bit is broadcasted.
+ // using XOR AND OR. The selector bit is broadcasted.
EVT VT = Op.getValueType();
SDLoc DL(Op);
@@ -605,7 +605,7 @@ SDValue VectorLegalizer::ExpandSELECT(SDValue Op) {
// What is the size of each element in the vector mask.
EVT BitTy = MaskTy.getScalarType();
- Mask = DAG.getNode(ISD::SELECT, DL, BitTy, Mask,
+ Mask = DAG.getSelect(DL, BitTy, Mask,
DAG.getConstant(APInt::getAllOnesValue(BitTy.getSizeInBits()), BitTy),
DAG.getConstant(0, BitTy));
@@ -755,16 +755,16 @@ SDValue VectorLegalizer::UnrollVSETCC(SDValue Op) {
SmallVector<SDValue, 8> Ops(NumElems);
for (unsigned i = 0; i < NumElems; ++i) {
SDValue LHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS,
- DAG.getIntPtrConstant(i));
+ DAG.getConstant(i, TLI.getVectorIdxTy()));
SDValue RHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS,
- DAG.getIntPtrConstant(i));
+ DAG.getConstant(i, TLI.getVectorIdxTy()));
Ops[i] = DAG.getNode(ISD::SETCC, dl,
TLI.getSetCCResultType(*DAG.getContext(), TmpEltVT),
LHSElem, RHSElem, CC);
- Ops[i] = DAG.getNode(ISD::SELECT, dl, EltVT, Ops[i],
- DAG.getConstant(APInt::getAllOnesValue
- (EltVT.getSizeInBits()), EltVT),
- DAG.getConstant(0, EltVT));
+ Ops[i] = DAG.getSelect(dl, EltVT, Ops[i],
+ DAG.getConstant(APInt::getAllOnesValue
+ (EltVT.getSizeInBits()), EltVT),
+ DAG.getConstant(0, EltVT));
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElems);
}
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index a36dca6..54380ec 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -274,16 +274,17 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_VSELECT(SDNode *N) {
break;
}
}
- return DAG.getNode(ISD::SELECT, SDLoc(N),
- LHS.getValueType(), Cond, LHS,
- GetScalarizedVector(N->getOperand(2)));
+
+ return DAG.getSelect(SDLoc(N),
+ LHS.getValueType(), Cond, LHS,
+ GetScalarizedVector(N->getOperand(2)));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT(SDNode *N) {
SDValue LHS = GetScalarizedVector(N->getOperand(1));
- return DAG.getNode(ISD::SELECT, SDLoc(N),
- LHS.getValueType(), N->getOperand(0), LHS,
- GetScalarizedVector(N->getOperand(2)));
+ return DAG.getSelect(SDLoc(N),
+ LHS.getValueType(), N->getOperand(0), LHS,
+ GetScalarizedVector(N->getOperand(2)));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT_CC(SDNode *N) {
@@ -711,7 +712,8 @@ void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo,
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, LoVT, Vec, Idx);
uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, HiVT, Vec,
- DAG.getIntPtrConstant(IdxVal + LoVT.getVectorNumElements()));
+ DAG.getConstant(IdxVal + LoVT.getVectorNumElements(),
+ TLI.getVectorIdxTy()));
}
void DAGTypeLegalizer::SplitVecRes_FPOWI(SDNode *N, SDValue &Lo,
@@ -753,7 +755,8 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
Lo.getValueType(), Lo, Elt, Idx);
else
Hi = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, Hi.getValueType(), Hi, Elt,
- DAG.getIntPtrConstant(IdxVal - LoNumElts));
+ DAG.getConstant(IdxVal - LoNumElts,
+ TLI.getVectorIdxTy()));
return;
}
@@ -852,14 +855,16 @@ void DAGTypeLegalizer::SplitVecRes_SETCC(SDNode *N, SDValue &Lo, SDValue &Hi) {
EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
LoVT.getVectorNumElements());
LL = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, N->getOperand(0),
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
LH = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, N->getOperand(0),
- DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
+ DAG.getConstant(InNVT.getVectorNumElements(),
+ TLI.getVectorIdxTy()));
RL = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, N->getOperand(1),
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
RH = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, N->getOperand(1),
- DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
+ DAG.getConstant(InNVT.getVectorNumElements(),
+ TLI.getVectorIdxTy()));
Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2));
Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2));
@@ -881,9 +886,10 @@ void DAGTypeLegalizer::SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo,
EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
LoVT.getVectorNumElements());
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, N->getOperand(0),
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, N->getOperand(0),
- DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
+ DAG.getConstant(InNVT.getVectorNumElements(),
+ TLI.getVectorIdxTy()));
}
if (N->getOpcode() == ISD::FP_ROUND) {
@@ -994,7 +1000,8 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
// Extract the vector element by hand.
SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
- Inputs[Input], DAG.getIntPtrConstant(Idx)));
+ Inputs[Input], DAG.getConstant(Idx,
+ TLI.getVectorIdxTy())));
}
// Construct the Lo/Hi output using a BUILD_VECTOR.
@@ -1030,6 +1037,10 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
dbgs() << "\n");
SDValue Res = SDValue();
+ // See if the target wants to custom split this node.
+ if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
+ return false;
+
if (Res.getNode() == 0) {
switch (N->getOpcode()) {
default:
@@ -1108,8 +1119,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_VSELECT(SDNode *N, unsigned OpNo) {
assert(LoNumElts == HiNumElts && "Asymmetric vector split?");
LLVMContext &Ctx = *DAG.getContext();
- SDValue Zero = DAG.getIntPtrConstant(0);
- SDValue LoElts = DAG.getIntPtrConstant(LoNumElts);
+ SDValue Zero = DAG.getConstant(0, TLI.getVectorIdxTy());
+ SDValue LoElts = DAG.getConstant(LoNumElts, TLI.getVectorIdxTy());
EVT Src0VT = Src0.getValueType();
EVT Src0EltTy = Src0VT.getVectorElementType();
EVT MaskEltTy = MaskVT.getVectorElementType();
@@ -1284,7 +1295,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) {
for (unsigned i = 0, e = Op.getValueType().getVectorNumElements();
i != e; ++i) {
Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
- Op, DAG.getIntPtrConstant(i)));
+ Op, DAG.getConstant(i, TLI.getVectorIdxTy())));
}
}
@@ -1333,9 +1344,10 @@ SDValue DAGTypeLegalizer::SplitVecOp_TRUNCATE(SDNode *N) {
EVT SplitVT = EVT::getVectorVT(*DAG.getContext(),
InVT.getVectorElementType(), NumElements/2);
SDValue InLoVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, InVec,
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
SDValue InHiVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, InVec,
- DAG.getIntPtrConstant(NumElements/2));
+ DAG.getConstant(NumElements/2,
+ TLI.getVectorIdxTy()));
// Truncate them to 1/2 the element size.
EVT HalfElementVT = EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT,
@@ -1562,9 +1574,9 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
while (CurNumElts != 0) {
while (CurNumElts >= NumElts) {
SDValue EOp1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, InOp1,
- DAG.getIntPtrConstant(Idx));
+ DAG.getConstant(Idx, TLI.getVectorIdxTy()));
SDValue EOp2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, InOp2,
- DAG.getIntPtrConstant(Idx));
+ DAG.getConstant(Idx, TLI.getVectorIdxTy()));
ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2);
Idx += NumElts;
CurNumElts -= NumElts;
@@ -1577,9 +1589,11 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
if (NumElts == 1) {
for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) {
SDValue EOp1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
- InOp1, DAG.getIntPtrConstant(Idx));
+ InOp1, DAG.getConstant(Idx,
+ TLI.getVectorIdxTy()));
SDValue EOp2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
- InOp2, DAG.getIntPtrConstant(Idx));
+ InOp2, DAG.getConstant(Idx,
+ TLI.getVectorIdxTy()));
ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
EOp1, EOp2);
}
@@ -1617,7 +1631,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
unsigned NumToInsert = ConcatEnd - Idx - 1;
for (unsigned i = 0, OpIdx = Idx+1; i < NumToInsert; i++, OpIdx++) {
VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NextVT, VecOp,
- ConcatOps[OpIdx], DAG.getIntPtrConstant(i));
+ ConcatOps[OpIdx], DAG.getConstant(i,
+ TLI.getVectorIdxTy()));
}
ConcatOps[Idx+1] = VecOp;
ConcatEnd = Idx + 2;
@@ -1705,7 +1720,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
if (InVTNumElts % WidenNumElts == 0) {
SDValue InVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InWidenVT,
- InOp, DAG.getIntPtrConstant(0));
+ InOp, DAG.getConstant(0,
+ TLI.getVectorIdxTy()));
// Extract the input and convert the shorten input vector.
if (N->getNumOperands() == 1)
return DAG.getNode(Opcode, DL, WidenVT, InVal);
@@ -1720,7 +1736,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
unsigned i;
for (i=0; i < MinElts; ++i) {
SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, InEltVT, InOp,
- DAG.getIntPtrConstant(i));
+ DAG.getConstant(i, TLI.getVectorIdxTy()));
if (N->getNumOperands() == 1)
Ops[i] = DAG.getNode(Opcode, DL, EltVT, Val);
else
@@ -1871,7 +1887,10 @@ SDValue DAGTypeLegalizer::WidenVecRes_BUILD_VECTOR(SDNode *N) {
SDLoc dl(N);
// Build a vector with undefined for the new nodes.
EVT VT = N->getValueType(0);
- EVT EltVT = VT.getVectorElementType();
+
+ // Integer BUILD_VECTOR operands may be larger than the node's vector element
+ // type. The UNDEFs need to have the same type as the existing operands.
+ EVT EltVT = N->getOperand(0).getValueType();
unsigned NumElts = VT.getVectorNumElements();
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
@@ -1945,7 +1964,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
InOp = GetWidenedVector(InOp);
for (unsigned j=0; j < NumInElts; ++j)
Ops[Idx++] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
- DAG.getIntPtrConstant(j));
+ DAG.getConstant(j, TLI.getVectorIdxTy()));
}
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; Idx < WidenNumElts; ++Idx)
@@ -2003,7 +2022,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
if (InVTNumElts % WidenNumElts == 0) {
// Extract the input and convert the shorten input vector.
InOp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InWidenVT, InOp,
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
return DAG.getConvertRndSat(WidenVT, dl, InOp, DTyOp, STyOp, RndOp,
SatOp, CvtCode);
}
@@ -2019,7 +2038,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
unsigned i;
for (i=0; i < MinElts; ++i) {
SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, InOp,
- DAG.getIntPtrConstant(i));
+ DAG.getConstant(i, TLI.getVectorIdxTy()));
Ops[i] = DAG.getConvertRndSat(WidenVT, dl, ExtVal, DTyOp, STyOp, RndOp,
SatOp, CvtCode);
}
@@ -2062,7 +2081,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
unsigned i;
for (i=0; i < NumElts; ++i)
Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
- DAG.getIntPtrConstant(IdxVal+i));
+ DAG.getConstant(IdxVal+i, TLI.getVectorIdxTy()));
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; i < WidenNumElts; ++i)
@@ -2289,7 +2308,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
for (unsigned i=0; i < NumElts; ++i)
Ops[i] = DAG.getNode(Opcode, dl, EltVT,
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, InOp,
- DAG.getIntPtrConstant(i)));
+ DAG.getConstant(i, TLI.getVectorIdxTy())));
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElts);
}
@@ -2310,7 +2329,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_BITCAST(SDNode *N) {
if (TLI.isTypeLegal(NewVT)) {
SDValue BitOp = DAG.getNode(ISD::BITCAST, dl, NewVT, InOp);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, BitOp,
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
}
}
@@ -2338,7 +2357,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_CONCAT_VECTORS(SDNode *N) {
InOp = GetWidenedVector(InOp);
for (unsigned j=0; j < NumInElts; ++j)
Ops[Idx++] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
- DAG.getIntPtrConstant(j));
+ DAG.getConstant(j, TLI.getVectorIdxTy()));
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElts);
}
@@ -2393,7 +2412,8 @@ SDValue DAGTypeLegalizer::WidenVecOp_SETCC(SDNode *N) {
SVT.getVectorElementType(),
N->getValueType(0).getVectorNumElements());
SDValue CC = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
- ResVT, WideSETCC, DAG.getIntPtrConstant(0));
+ ResVT, WideSETCC, DAG.getConstant(0,
+ TLI.getVectorIdxTy()));
return PromoteTargetBoolean(CC, N->getValueType(0));
}
@@ -2464,8 +2484,9 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
// LDOps: Load operators to build a vector type
// [Start,End) the list of loads to use.
static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
- SmallVector<SDValue, 16>& LdOps,
+ SmallVectorImpl<SDValue> &LdOps,
unsigned Start, unsigned End) {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDLoc dl(LdOps[Start]);
EVT LdTy = LdOps[Start].getValueType();
unsigned Width = VecTy.getSizeInBits();
@@ -2486,12 +2507,12 @@ static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
LdTy = NewLdTy;
}
VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, VecOp, LdOps[i],
- DAG.getIntPtrConstant(Idx++));
+ DAG.getConstant(Idx++, TLI.getVectorIdxTy()));
}
return DAG.getNode(ISD::BITCAST, dl, VecTy, VecOp);
}
-SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
+SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
LoadSDNode *LD) {
// The strategy assumes that we can efficiently load powers of two widths.
// The routines chops the vector into the largest vector loads with the same
@@ -2645,8 +2666,8 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
}
SDValue
-DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVector<SDValue, 16>& LdChain,
- LoadSDNode * LD,
+DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
+ LoadSDNode *LD,
ISD::LoadExtType ExtType) {
// For extension loads, it may not be more efficient to chop up the vector
// and then extended it. Instead, we unroll the load and build a new vector.
@@ -2693,7 +2714,7 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVector<SDValue, 16>& LdChain,
}
-void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
+void DAGTypeLegalizer::GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain,
StoreSDNode *ST) {
// The strategy assumes that we can efficiently store powers of two widths.
// The routines chops the vector into the largest vector stores with the same
@@ -2725,7 +2746,7 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
unsigned NumVTElts = NewVT.getVectorNumElements();
do {
SDValue EOp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NewVT, ValOp,
- DAG.getIntPtrConstant(Idx));
+ DAG.getConstant(Idx, TLI.getVectorIdxTy()));
StChain.push_back(DAG.getStore(Chain, dl, EOp, BasePtr,
ST->getPointerInfo().getWithOffset(Offset),
isVolatile, isNonTemporal,
@@ -2745,7 +2766,7 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
Idx = Idx * ValEltWidth / NewVTWidth;
do {
SDValue EOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, VecOp,
- DAG.getIntPtrConstant(Idx++));
+ DAG.getConstant(Idx++, TLI.getVectorIdxTy()));
StChain.push_back(DAG.getStore(Chain, dl, EOp, BasePtr,
ST->getPointerInfo().getWithOffset(Offset),
isVolatile, isNonTemporal,
@@ -2762,7 +2783,7 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
}
void
-DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVector<SDValue, 16>& StChain,
+DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVectorImpl<SDValue> &StChain,
StoreSDNode *ST) {
// For extension loads, it may not be more efficient to truncate the vector
// and then store it. Instead, we extract each element and then store it.
@@ -2790,7 +2811,7 @@ DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVector<SDValue, 16>& StChain,
unsigned Increment = ValEltVT.getSizeInBits() / 8;
unsigned NumElts = StVT.getVectorNumElements();
SDValue EOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ValEltVT, ValOp,
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
StChain.push_back(DAG.getTruncStore(Chain, dl, EOp, BasePtr,
ST->getPointerInfo(), StEltVT,
isVolatile, isNonTemporal, Align));
@@ -2799,7 +2820,7 @@ DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVector<SDValue, 16>& StChain,
SDValue NewBasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
BasePtr, DAG.getIntPtrConstant(Offset));
SDValue EOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ValEltVT, ValOp,
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
StChain.push_back(DAG.getTruncStore(Chain, dl, EOp, NewBasePtr,
ST->getPointerInfo().getWithOffset(Offset),
StEltVT, isVolatile, isNonTemporal,
@@ -2836,7 +2857,7 @@ SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, EVT NVT) {
if (WidenNumElts < InNumElts && InNumElts % WidenNumElts)
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NVT, InOp,
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
// Fall back to extract and build.
SmallVector<SDValue, 16> Ops(WidenNumElts);
@@ -2845,7 +2866,7 @@ SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, EVT NVT) {
unsigned Idx;
for (Idx = 0; Idx < MinNumElts; ++Idx)
Ops[Idx] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
- DAG.getIntPtrConstant(Idx));
+ DAG.getConstant(Idx, TLI.getVectorIdxTy()));
SDValue UndefVal = DAG.getUNDEF(EltVT);
for ( ; Idx < WidenNumElts; ++Idx)
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index d1f36cb..6c5e0ab 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -102,8 +102,8 @@ private:
void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
const TargetRegisterClass*,
const TargetRegisterClass*,
- SmallVector<SUnit*, 2>&);
- bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
+ SmallVectorImpl<SUnit*>&);
+ bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
void ListScheduleBottomUp();
/// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
@@ -387,7 +387,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC,
- SmallVector<SUnit*, 2> &Copies) {
+ SmallVectorImpl<SUnit*> &Copies) {
SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(NULL));
CopyFromSU->CopySrcRC = SrcRC;
CopyFromSU->CopyDstRC = DestRC;
@@ -448,7 +448,7 @@ static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
std::vector<SUnit*> &LiveRegDefs,
SmallSet<unsigned, 4> &RegAdded,
- SmallVector<unsigned, 4> &LRegs,
+ SmallVectorImpl<unsigned> &LRegs,
const TargetRegisterInfo *TRI) {
bool Added = false;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
@@ -467,7 +467,7 @@ static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
/// If the specific node is the last one that's available to schedule, do
/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
- SmallVector<unsigned, 4> &LRegs){
+ SmallVectorImpl<unsigned> &LRegs){
if (NumLiveRegs == 0)
return false;
@@ -567,7 +567,7 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
// "expensive to copy" values to break the dependency. In case even
// that doesn't work, insert cross class copies.
SUnit *TrySU = NotReady[0];
- SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
+ SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
assert(LRegs.size() == 1 && "Can't handle this yet!");
unsigned Reg = LRegs[0];
SUnit *LRDef = LiveRegDefs[Reg];
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index a7daf87..f5fe168 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -229,8 +229,8 @@ private:
void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
const TargetRegisterClass*,
const TargetRegisterClass*,
- SmallVector<SUnit*, 2>&);
- bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
+ SmallVectorImpl<SUnit*>&);
+ bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
void releaseInterferences(unsigned Reg = 0);
@@ -1133,9 +1133,9 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
/// InsertCopiesAndMoveSuccs - Insert register copies and move all
/// scheduled successors of the given SUnit to the last copy.
void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- SmallVector<SUnit*, 2> &Copies) {
+ const TargetRegisterClass *DestRC,
+ const TargetRegisterClass *SrcRC,
+ SmallVectorImpl<SUnit*> &Copies) {
SUnit *CopyFromSU = CreateNewSUnit(NULL);
CopyFromSU->CopySrcRC = SrcRC;
CopyFromSU->CopyDstRC = DestRC;
@@ -1205,7 +1205,7 @@ static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
std::vector<SUnit*> &LiveRegDefs,
SmallSet<unsigned, 4> &RegAdded,
- SmallVector<unsigned, 4> &LRegs,
+ SmallVectorImpl<unsigned> &LRegs,
const TargetRegisterInfo *TRI) {
for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) {
@@ -1227,7 +1227,7 @@ static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
std::vector<SUnit*> &LiveRegDefs,
SmallSet<unsigned, 4> &RegAdded,
- SmallVector<unsigned, 4> &LRegs) {
+ SmallVectorImpl<unsigned> &LRegs) {
// Look at all live registers. Skip Reg0 and the special CallResource.
for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
if (!LiveRegDefs[i]) continue;
@@ -1252,7 +1252,7 @@ static const uint32_t *getNodeRegMask(const SDNode *N) {
/// If the specific node is the last one that's available to schedule, do
/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
bool ScheduleDAGRRList::
-DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
+DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
if (NumLiveRegs == 0)
return false;
@@ -1331,7 +1331,7 @@ void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
SUnit *SU = Interferences[i-1];
LRegsMapT::iterator LRegsPos = LRegsMap.find(SU);
if (Reg) {
- SmallVector<unsigned, 4> &LRegs = LRegsPos->second;
+ SmallVectorImpl<unsigned> &LRegs = LRegsPos->second;
if (std::find(LRegs.begin(), LRegs.end(), Reg) == LRegs.end())
continue;
}
@@ -1385,7 +1385,7 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
// to resolve it.
for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
SUnit *TrySU = Interferences[i];
- SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
+ SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
// Try unscheduling up to the point where it's safe to schedule
// this node.
@@ -1433,7 +1433,7 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
// insert cross class copies.
// If it's not too expensive, i.e. cost != -1, issue copies.
SUnit *TrySU = Interferences[0];
- SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
+ SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
assert(LRegs.size() == 1 && "Can't handle this yet!");
unsigned Reg = LRegs[0];
SUnit *LRDef = LiveRegDefs[Reg];
@@ -2401,7 +2401,8 @@ static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
bool RHasPhysReg = right->hasPhysRegDefs;
if (LHasPhysReg != RHasPhysReg) {
#ifndef NDEBUG
- const char *const PhysRegMsg[] = {" has no physreg"," defines a physreg"};
+ static const char *const PhysRegMsg[] = { " has no physreg",
+ " defines a physreg" };
#endif
DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
<< PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index ad06473..982dcc9 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -700,11 +700,10 @@ namespace {
}
/// ProcessSDDbgValues - Process SDDbgValues associated with this node.
-static void ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG,
- InstrEmitter &Emitter,
- SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
- DenseMap<SDValue, unsigned> &VRBaseMap,
- unsigned Order) {
+static void
+ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
+ SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders,
+ DenseMap<SDValue, unsigned> &VRBaseMap, unsigned Order) {
if (!N->getHasDebugValue())
return;
@@ -731,11 +730,11 @@ static void ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG,
// ProcessSourceNode - Process nodes with source order numbers. These are added
// to a vector which EmitSchedule uses to determine how to insert dbg_value
// instructions in the right order.
-static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
- InstrEmitter &Emitter,
- DenseMap<SDValue, unsigned> &VRBaseMap,
- SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
- SmallSet<unsigned, 8> &Seen) {
+static void
+ProcessSourceNode(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
+ DenseMap<SDValue, unsigned> &VRBaseMap,
+ SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders,
+ SmallSet<unsigned, 8> &Seen) {
unsigned Order = N->getIROrder();
if (!Order || !Seen.insert(Order)) {
// Process any valid SDDbgValues even if node does not have any order
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index affd9e0..bc6063c 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -864,14 +864,13 @@ unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
PointerType::get(Type::getInt8Ty(*getContext()), 0) :
VT.getTypeForEVT(*getContext());
- return TLI.getDataLayout()->getABITypeAlignment(Ty);
+ return TM.getTargetLowering()->getDataLayout()->getABITypeAlignment(Ty);
}
// EntryNode could meaningfully have debug info if we can find it...
SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
- : TM(tm), TLI(*tm.getTargetLowering()), TSI(*tm.getSelectionDAGInfo()),
- TTI(0), OptLevel(OL), EntryNode(ISD::EntryToken, 0, DebugLoc(),
- getVTList(MVT::Other)),
+ : TM(tm), TSI(*tm.getSelectionDAGInfo()), TTI(0), OptLevel(OL),
+ EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
Root(getEntryNode()), UpdateListeners(0) {
AllNodes.push_back(&EntryNode);
DbgInfo = new SDDbgInfo();
@@ -972,13 +971,15 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
EVT EltVT = VT.getScalarType();
const ConstantInt *Elt = &Val;
+ const TargetLowering *TLI = TM.getTargetLowering();
+
// In some cases the vector type is legal but the element type is illegal and
// needs to be promoted, for example v8i8 on ARM. In this case, promote the
// inserted value (the type does not need to match the vector element type).
// Any extra bits introduced will be truncated away.
- if (VT.isVector() && TLI.getTypeAction(*getContext(), EltVT) ==
+ if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
TargetLowering::TypePromoteInteger) {
- EltVT = TLI.getTypeToTransformTo(*getContext(), EltVT);
+ EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
Elt = ConstantInt::get(*getContext(), NewVal);
}
@@ -1011,7 +1012,7 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
}
SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
- return getConstant(Val, TLI.getPointerTy(), isTarget);
+ return getConstant(Val, TM.getTargetLowering()->getPointerTy(), isTarget);
}
@@ -1078,7 +1079,7 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
"Cannot set target flags on target-independent globals");
// Truncate (with sign-extension) the offset value to the pointer size.
- unsigned BitWidth = TLI.getPointerTy().getSizeInBits();
+ unsigned BitWidth = TM.getTargetLowering()->getPointerTy().getSizeInBits();
if (BitWidth < 64)
Offset = SignExtend64(Offset, BitWidth);
@@ -1155,7 +1156,8 @@ SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals");
if (Alignment == 0)
- Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType());
+ Alignment =
+ TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
@@ -1182,7 +1184,8 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals");
if (Alignment == 0)
- Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType());
+ Alignment =
+ TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
@@ -1512,7 +1515,7 @@ SDValue SelectionDAG::getMDNode(const MDNode *MD) {
/// the target's desired shift amount type.
SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
EVT OpTy = Op.getValueType();
- EVT ShTy = TLI.getShiftAmountTy(LHSTy);
+ EVT ShTy = TM.getTargetLowering()->getShiftAmountTy(LHSTy);
if (OpTy == ShTy || OpTy.isVector()) return Op;
ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
@@ -1525,11 +1528,12 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
unsigned ByteSize = VT.getStoreSize();
Type *Ty = VT.getTypeForEVT(*getContext());
+ const TargetLowering *TLI = TM.getTargetLowering();
unsigned StackAlign =
- std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
+ std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
- return getFrameIndex(FrameIdx, TLI.getPointerTy());
+ return getFrameIndex(FrameIdx, TLI->getPointerTy());
}
/// CreateStackTemporary - Create a stack temporary suitable for holding
@@ -1539,13 +1543,14 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
VT2.getStoreSizeInBits())/8;
Type *Ty1 = VT1.getTypeForEVT(*getContext());
Type *Ty2 = VT2.getTypeForEVT(*getContext());
- const DataLayout *TD = TLI.getDataLayout();
+ const TargetLowering *TLI = TM.getTargetLowering();
+ const DataLayout *TD = TLI->getDataLayout();
unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
TD->getPrefTypeAlignment(Ty2));
MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
- return getFrameIndex(FrameIdx, TLI.getPointerTy());
+ return getFrameIndex(FrameIdx, TLI->getPointerTy());
}
SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
@@ -1674,6 +1679,7 @@ bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
/// processing.
void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
APInt &KnownOne, unsigned Depth) const {
+ const TargetLowering *TLI = TM.getTargetLowering();
unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
@@ -1796,7 +1802,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
// The boolean result conforms to getBooleanContents. Fall through.
case ISD::SETCC:
// If we know the result of a setcc has the top bits zero, use this info.
- if (TLI.getBooleanContents(Op.getValueType().isVector()) ==
+ if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1)
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
return;
@@ -2108,7 +2114,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_VOID:
// Allow the target to implement this method for its nodes.
- TLI.computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
+ TLI->computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
return;
}
}
@@ -2119,6 +2125,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
/// information. For example, immediately after an "SRA X, 2", we know that
/// the top 3 bits are all equal to each other, so we return 3.
unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
+ const TargetLowering *TLI = TM.getTargetLowering();
EVT VT = Op.getValueType();
assert(VT.isInteger() && "Invalid VT!");
unsigned VTBits = VT.getScalarType().getSizeInBits();
@@ -2203,7 +2210,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
// The boolean result conforms to getBooleanContents. Fall through.
case ISD::SETCC:
// If setcc returns 0/-1, all bits are sign bits.
- if (TLI.getBooleanContents(Op.getValueType().isVector()) ==
+ if (TLI->getBooleanContents(Op.getValueType().isVector()) ==
TargetLowering::ZeroOrNegativeOneBooleanContent)
return VTBits;
break;
@@ -2304,7 +2311,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_VOID) {
- unsigned NumBits = TLI.ComputeNumSignBitsForTargetNode(Op, Depth);
+ unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, Depth);
if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
}
@@ -3639,7 +3646,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
// Don't promote to an alignment that would require dynamic stack
- // realignment.
+ // realignment.
const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF))
while (NewAlign > Align &&
@@ -3923,10 +3930,12 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
// beyond the given memory regions. But fixing this isn't easy, and most
// people don't care.
+ const TargetLowering *TLI = TM.getTargetLowering();
+
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
- Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
+ Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
@@ -3934,13 +3943,13 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
TargetLowering::
CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
false, false, false, false, 0,
- TLI.getLibcallCallingConv(RTLIB::MEMCPY),
+ TLI->getLibcallCallingConv(RTLIB::MEMCPY),
/*isTailCall=*/false,
/*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
- getExternalSymbol(TLI.getLibcallName(RTLIB::MEMCPY),
- TLI.getPointerTy()),
+ getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
+ TLI->getPointerTy()),
Args, *this, dl);
- std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
+ std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
}
@@ -3979,10 +3988,12 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
// FIXME: If the memmove is volatile, lowering it to plain libc memmove may
// not be safe. See memcpy above for more details.
+ const TargetLowering *TLI = TM.getTargetLowering();
+
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
- Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
+ Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
@@ -3990,13 +4001,13 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
TargetLowering::
CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
false, false, false, false, 0,
- TLI.getLibcallCallingConv(RTLIB::MEMMOVE),
+ TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
/*isTailCall=*/false,
/*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
- getExternalSymbol(TLI.getLibcallName(RTLIB::MEMMOVE),
- TLI.getPointerTy()),
+ getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
+ TLI->getPointerTy()),
Args, *this, dl);
- std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
+ std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
}
@@ -4032,7 +4043,8 @@ SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
return Result;
// Emit a library call.
- Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext());
+ const TargetLowering *TLI = TM.getTargetLowering();
+ Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Dst; Entry.Ty = IntPtrTy;
@@ -4054,13 +4066,13 @@ SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
TargetLowering::
CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
false, false, false, false, 0,
- TLI.getLibcallCallingConv(RTLIB::MEMSET),
+ TLI->getLibcallCallingConv(RTLIB::MEMSET),
/*isTailCall=*/false,
/*doesNotReturn*/false, /*isReturnValueUsed=*/false,
- getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET),
- TLI.getPointerTy()),
+ getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
+ TLI->getPointerTy()),
Args, *this, dl);
- std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
+ std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
}
@@ -5884,7 +5896,7 @@ void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
ClonedDVs.push_back(Clone);
}
}
- for (SmallVector<SDDbgValue *, 2>::iterator I = ClonedDVs.begin(),
+ for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(),
E = ClonedDVs.end(); I != E; ++I)
AddDbgValue(*I, ToNode, false);
}
@@ -6065,9 +6077,10 @@ bool SDNode::hasPredecessor(const SDNode *N) const {
return hasPredecessorHelper(N, Visited, Worklist);
}
-bool SDNode::hasPredecessorHelper(const SDNode *N,
- SmallPtrSet<const SDNode *, 32> &Visited,
- SmallVector<const SDNode *, 16> &Worklist) const {
+bool
+SDNode::hasPredecessorHelper(const SDNode *N,
+ SmallPtrSet<const SDNode *, 32> &Visited,
+ SmallVectorImpl<const SDNode *> &Worklist) const {
if (Visited.empty()) {
Worklist.push_back(this);
} else {
@@ -6122,11 +6135,12 @@ SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
EVT OperandVT = Operand.getValueType();
if (OperandVT.isVector()) {
// A vector operand; extract a single element.
+ const TargetLowering *TLI = TM.getTargetLowering();
EVT OperandEltVT = OperandVT.getVectorElementType();
Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
OperandEltVT,
Operand,
- getConstant(i, TLI.getPointerTy()));
+ getConstant(i, TLI->getVectorIdxTy()));
} else {
// A scalar operand; just use it as is.
Operands[j] = Operand;
@@ -6204,8 +6218,9 @@ bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
const GlobalValue *GV2 = NULL;
int64_t Offset1 = 0;
int64_t Offset2 = 0;
- bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
- bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
+ const TargetLowering *TLI = TM.getTargetLowering();
+ bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
+ bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
if (isGA1 && isGA2 && GV1 == GV2)
return Offset1 == (Offset2 + Dist*Bytes);
return false;
@@ -6218,11 +6233,12 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
// If this is a GlobalAddress + cst, return the alignment.
const GlobalValue *GV;
int64_t GVOffset = 0;
- if (TLI.isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
- unsigned PtrWidth = TLI.getPointerTy().getSizeInBits();
+ const TargetLowering *TLI = TM.getTargetLowering();
+ if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
+ unsigned PtrWidth = TLI->getPointerTy().getSizeInBits();
APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
- TLI.getDataLayout());
+ TLI->getDataLayout());
unsigned AlignBits = KnownZero.countTrailingOnes();
unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
if (Align)
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 21148ae..b9f4381 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -15,6 +15,7 @@
#include "SelectionDAGBuilder.h"
#include "SDNodeDbgValue.h"
#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
@@ -280,7 +281,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
"Cannot narrow, it would be a lossy transformation");
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
- DAG.getIntPtrConstant(0));
+ DAG.getConstant(0, TLI.getVectorIdxTy()));
}
// Vector/Vector bitcast.
@@ -489,7 +490,8 @@ static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
SmallVector<SDValue, 16> Ops;
for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
- ElementVT, Val, DAG.getIntPtrConstant(i)));
+ ElementVT, Val, DAG.getConstant(i,
+ TLI.getVectorIdxTy())));
for (unsigned i = ValueVT.getVectorNumElements(),
e = PartVT.getVectorNumElements(); i != e; ++i)
@@ -515,7 +517,7 @@ static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
assert(ValueVT.getVectorNumElements() == 1 &&
"Only trivial vector-to-scalar conversions should get here!");
Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
- PartVT, Val, DAG.getIntPtrConstant(0));
+ PartVT, Val, DAG.getConstant(0, TLI.getVectorIdxTy()));
bool Smaller = ValueVT.bitsLE(PartVT);
Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
@@ -545,10 +547,12 @@ static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
if (IntermediateVT.isVector())
Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
IntermediateVT, Val,
- DAG.getIntPtrConstant(i * (NumElements / NumIntermediates)));
+ DAG.getConstant(i * (NumElements / NumIntermediates),
+ TLI.getVectorIdxTy()));
else
Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
- IntermediateVT, Val, DAG.getIntPtrConstant(i));
+ IntermediateVT, Val,
+ DAG.getConstant(i, TLI.getVectorIdxTy()));
}
// Split the intermediate operands into legal parts.
@@ -717,6 +721,14 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
unsigned NumSignBits = LOI->NumSignBits;
unsigned NumZeroBits = LOI->KnownZero.countLeadingOnes();
+ if (NumZeroBits == RegSize) {
+ // The current value is a zero.
+ // Explicitly express that as it would be easier for
+ // optimizations to kick in.
+ Parts[i] = DAG.getConstant(0, RegisterVT);
+ continue;
+ }
+
// FIXME: We capture more information than the dag can represent. For
// now, just use the tightest assertzext/assertsext possible.
bool isSExt = true;
@@ -1008,7 +1020,8 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) {
DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
if (It != FuncInfo.ValueMap.end()) {
unsigned InReg = It->second;
- RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
+ RegsForValue RFV(*DAG.getContext(), *TM.getTargetLowering(),
+ InReg, V->getType());
SDValue Chain = DAG.getEntryNode();
N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, NULL, V);
resolveDanglingDebugInfo(V, N);
@@ -1039,8 +1052,10 @@ SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
/// getValueImpl - Helper function for getValue and getNonRegisterValue.
/// Create an SDValue for the given value.
SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
+ const TargetLowering *TLI = TM.getTargetLowering();
+
if (const Constant *C = dyn_cast<Constant>(V)) {
- EVT VT = TLI.getValueType(V->getType(), true);
+ EVT VT = TLI->getValueType(V->getType(), true);
if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
return DAG.getConstant(*CI, VT);
@@ -1049,7 +1064,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
if (isa<ConstantPointerNull>(C))
- return DAG.getConstant(0, TLI.getPointerTy());
+ return DAG.getConstant(0, TLI->getPointerTy());
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
return DAG.getConstantFP(*CFP, VT);
@@ -1080,7 +1095,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
return DAG.getMergeValues(&Constants[0], Constants.size(),
getCurSDLoc());
}
-
+
if (const ConstantDataSequential *CDS =
dyn_cast<ConstantDataSequential>(C)) {
SmallVector<SDValue, 4> Ops;
@@ -1103,7 +1118,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
"Unknown struct or array constant!");
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, C->getType(), ValueVTs);
+ ComputeValueVTs(*TLI, C->getType(), ValueVTs);
unsigned NumElts = ValueVTs.size();
if (NumElts == 0)
return SDValue(); // empty struct
@@ -1136,7 +1151,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
Ops.push_back(getValue(CV->getOperand(i)));
} else {
assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
- EVT EltVT = TLI.getValueType(VecTy->getElementType());
+ EVT EltVT = TLI->getValueType(VecTy->getElementType());
SDValue Op;
if (EltVT.isFloatingPoint())
@@ -1157,13 +1172,13 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
DenseMap<const AllocaInst*, int>::iterator SI =
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end())
- return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
+ return DAG.getFrameIndex(SI->second, TLI->getPointerTy());
}
// If this is an instruction which fast-isel has deferred, select it now.
if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
- RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType());
+ RegsForValue RFV(*DAG.getContext(), *TLI, InReg, Inst->getType());
SDValue Chain = DAG.getEntryNode();
return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, NULL, V);
}
@@ -1172,6 +1187,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
}
void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
+ const TargetLowering *TLI = TM.getTargetLowering();
SDValue Chain = getControlRoot();
SmallVector<ISD::OutputArg, 8> Outs;
SmallVector<SDValue, 8> OutVals;
@@ -1184,7 +1200,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
// Leave Outs empty so that LowerReturn won't try to load return
// registers the usual way.
SmallVector<EVT, 1> PtrValueVTs;
- ComputeValueVTs(TLI, PointerType::getUnqual(F->getReturnType()),
+ ComputeValueVTs(*TLI, PointerType::getUnqual(F->getReturnType()),
PtrValueVTs);
SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
@@ -1192,7 +1208,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
- ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
+ ComputeValueVTs(*TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
unsigned NumValues = ValueVTs.size();
SmallVector<SDValue, 4> Chains(NumValues);
@@ -1211,7 +1227,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
MVT::Other, &Chains[0], NumValues);
} else if (I.getNumOperands() != 0) {
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs);
+ ComputeValueVTs(*TLI, I.getOperand(0)->getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues) {
SDValue RetOp = getValue(I.getOperand(0));
@@ -1229,10 +1245,10 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
ExtendKind = ISD::ZERO_EXTEND;
if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
- VT = TLI.getTypeForExtArgOrReturn(VT.getSimpleVT(), ExtendKind);
+ VT = TLI->getTypeForExtArgOrReturn(VT.getSimpleVT(), ExtendKind);
- unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), VT);
- MVT PartVT = TLI.getRegisterType(*DAG.getContext(), VT);
+ unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), VT);
+ MVT PartVT = TLI->getRegisterType(*DAG.getContext(), VT);
SmallVector<SDValue, 4> Parts(NumParts);
getCopyToParts(DAG, getCurSDLoc(),
SDValue(RetOp.getNode(), RetOp.getResNo() + j),
@@ -1262,8 +1278,9 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
CallingConv::ID CallConv =
DAG.getMachineFunction().getFunction()->getCallingConv();
- Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
- Outs, OutVals, getCurSDLoc(), DAG);
+ Chain = TM.getTargetLowering()->LowerReturn(Chain, CallConv, isVarArg,
+ Outs, OutVals, getCurSDLoc(),
+ DAG);
// Verify that the target's LowerReturn behaved as expected.
assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
@@ -1462,7 +1479,7 @@ void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
/// If we should emit this as a bunch of and/or'd together conditions, return
/// false.
bool
-SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
+SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
if (Cases.size() != 2) return true;
// If this is two comparisons of the same values or'd or and'd together, they
@@ -1536,7 +1553,7 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
// jle foo
//
if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
- if (!TLI.isJumpExpensive() &&
+ if (!TM.getTargetLowering()->isJumpExpensive() &&
BOp->hasOneUse() &&
(BOp->getOpcode() == Instruction::And ||
BOp->getOpcode() == Instruction::Or)) {
@@ -1608,7 +1625,7 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
SDValue CmpOp = getValue(CB.CmpMHS);
EVT VT = CmpOp.getValueType();
-
+
if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(false)) {
Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
ISD::SETULE);
@@ -1659,7 +1676,7 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
// Emit the code for the jump table
assert(JT.Reg != -1U && "Should lower JT Header first!");
- EVT PTy = TLI.getPointerTy();
+ EVT PTy = TM.getTargetLowering()->getPointerTy();
SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
JT.Reg, PTy);
SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
@@ -1687,9 +1704,10 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
// can be used as an index into the jump table in a subsequent basic block.
// This value may be smaller or larger than the target's pointer type, and
// therefore require extension or truncating.
- SwitchOp = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), TLI.getPointerTy());
+ const TargetLowering *TLI = TM.getTargetLowering();
+ SwitchOp = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), TLI->getPointerTy());
- unsigned JumpTableReg = FuncInfo.CreateReg(TLI.getPointerTy());
+ unsigned JumpTableReg = FuncInfo.CreateReg(TLI->getPointerTy());
SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurSDLoc(),
JumpTableReg, SwitchOp);
JT.Reg = JumpTableReg;
@@ -1698,8 +1716,8 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
// for the switch statement if the value being switched on exceeds the largest
// case in the switch.
SDValue CMP = DAG.getSetCC(getCurSDLoc(),
- TLI.getSetCCResultType(*DAG.getContext(),
- Sub.getValueType()),
+ TLI->getSetCCResultType(*DAG.getContext(),
+ Sub.getValueType()),
Sub,
DAG.getConstant(JTH.Last - JTH.First,VT),
ISD::SETUGT);
@@ -1734,15 +1752,16 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
DAG.getConstant(B.First, VT));
// Check range
+ const TargetLowering *TLI = TM.getTargetLowering();
SDValue RangeCmp = DAG.getSetCC(getCurSDLoc(),
- TLI.getSetCCResultType(*DAG.getContext(),
+ TLI->getSetCCResultType(*DAG.getContext(),
Sub.getValueType()),
Sub, DAG.getConstant(B.Range, VT),
ISD::SETUGT);
// Determine the type of the test operands.
bool UsePtrType = false;
- if (!TLI.isTypeLegal(VT))
+ if (!TLI->isTypeLegal(VT))
UsePtrType = true;
else {
for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
@@ -1754,7 +1773,7 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
}
}
if (UsePtrType) {
- VT = TLI.getPointerTy();
+ VT = TLI->getPointerTy();
Sub = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), VT);
}
@@ -1798,18 +1817,19 @@ void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
Reg, VT);
SDValue Cmp;
unsigned PopCount = CountPopulation_64(B.Mask);
+ const TargetLowering *TLI = TM.getTargetLowering();
if (PopCount == 1) {
// Testing for a single bit; just compare the shift count with what it
// would need to be to shift a 1 bit in that position.
Cmp = DAG.getSetCC(getCurSDLoc(),
- TLI.getSetCCResultType(*DAG.getContext(), VT),
+ TLI->getSetCCResultType(*DAG.getContext(), VT),
ShiftOp,
DAG.getConstant(countTrailingZeros(B.Mask), VT),
ISD::SETEQ);
} else if (PopCount == BB.Range) {
// There is only one zero bit in the range, test for it directly.
Cmp = DAG.getSetCC(getCurSDLoc(),
- TLI.getSetCCResultType(*DAG.getContext(), VT),
+ TLI->getSetCCResultType(*DAG.getContext(), VT),
ShiftOp,
DAG.getConstant(CountTrailingOnes_64(B.Mask), VT),
ISD::SETNE);
@@ -1822,7 +1842,7 @@ void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
SDValue AndOp = DAG.getNode(ISD::AND, getCurSDLoc(),
VT, SwitchVal, DAG.getConstant(B.Mask, VT));
Cmp = DAG.getSetCC(getCurSDLoc(),
- TLI.getSetCCResultType(*DAG.getContext(), VT),
+ TLI->getSetCCResultType(*DAG.getContext(), VT),
AndOp, DAG.getConstant(0, VT),
ISD::SETNE);
}
@@ -1895,39 +1915,32 @@ void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
// If there aren't registers to copy the values into (e.g., during SjLj
// exceptions), then don't bother to create these DAG nodes.
- if (TLI.getExceptionPointerRegister() == 0 &&
- TLI.getExceptionSelectorRegister() == 0)
+ const TargetLowering *TLI = TM.getTargetLowering();
+ if (TLI->getExceptionPointerRegister() == 0 &&
+ TLI->getExceptionSelectorRegister() == 0)
return;
SmallVector<EVT, 2> ValueVTs;
- ComputeValueVTs(TLI, LP.getType(), ValueVTs);
+ ComputeValueVTs(*TLI, LP.getType(), ValueVTs);
+ assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
- // Insert the EXCEPTIONADDR instruction.
- assert(FuncInfo.MBB->isLandingPad() &&
- "Call to eh.exception not in landing pad!");
- SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
+ // Get the two live-in registers as SDValues. The physregs have already been
+ // copied into virtual registers.
SDValue Ops[2];
- Ops[0] = DAG.getRoot();
- SDValue Op1 = DAG.getNode(ISD::EXCEPTIONADDR, getCurSDLoc(), VTs, Ops, 1);
- SDValue Chain = Op1.getValue(1);
-
- // Insert the EHSELECTION instruction.
- VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
- Ops[0] = Op1;
- Ops[1] = Chain;
- SDValue Op2 = DAG.getNode(ISD::EHSELECTION, getCurSDLoc(), VTs, Ops, 2);
- Chain = Op2.getValue(1);
- Op2 = DAG.getSExtOrTrunc(Op2, getCurSDLoc(), MVT::i32);
-
- Ops[0] = Op1;
- Ops[1] = Op2;
+ Ops[0] = DAG.getZExtOrTrunc(
+ DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
+ FuncInfo.ExceptionPointerVirtReg, TLI->getPointerTy()),
+ getCurSDLoc(), ValueVTs[0]);
+ Ops[1] = DAG.getZExtOrTrunc(
+ DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
+ FuncInfo.ExceptionSelectorVirtReg, TLI->getPointerTy()),
+ getCurSDLoc(), ValueVTs[1]);
+
+ // Merge into one.
SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
&Ops[0], 2);
-
- std::pair<SDValue, SDValue> RetPair = std::make_pair(Res, Chain);
- setValue(&LP, RetPair.first);
- DAG.setRoot(RetPair.second);
+ setValue(&LP, Res);
}
/// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
@@ -2029,12 +2042,11 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
// The last case block won't fall through into 'NextBlock' if we emit the
// branches in this order. See if rearranging a case value would help.
// We start at the bottom as it's the case with the least weight.
- for (Case *I = &*(CR.Range.second-2), *E = &*CR.Range.first-1; I != E; --I){
+ for (Case *I = &*(CR.Range.second-2), *E = &*CR.Range.first-1; I != E; --I)
if (I->BB == NextBlock) {
std::swap(*I, BackCase);
break;
}
- }
}
// Create a CaseBlock record representing a conditional branch to
@@ -2061,7 +2073,7 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
CC = ISD::SETEQ;
LHS = SV; RHS = I->High; MHS = NULL;
} else {
- CC = ISD::SETCC_INVALID;
+ CC = ISD::SETCC_INVALID;
LHS = I->Low; MHS = SV; RHS = I->High;
}
@@ -2115,7 +2127,8 @@ bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR,
for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I)
TSize += I->size();
- if (!areJTsAllowed(TLI) || TSize.ult(TLI.getMinimumJumpTableEntries()))
+ const TargetLowering *TLI = TM.getTargetLowering();
+ if (!areJTsAllowed(*TLI) || TSize.ult(TLI->getMinimumJumpTableEntries()))
return false;
APInt Range = ComputeRange(First, Last);
@@ -2176,7 +2189,7 @@ bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR,
for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
DestWeights.find(I->BB);
- if (Itr != DestWeights.end())
+ if (Itr != DestWeights.end())
Itr->second += I->ExtraWeight;
else
DestWeights[I->BB] = I->ExtraWeight;
@@ -2196,7 +2209,7 @@ bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR,
}
// Create a jump table index for this jump table.
- unsigned JTEncoding = TLI.getJumpTableEncoding();
+ unsigned JTEncoding = TLI->getJumpTableEncoding();
unsigned JTI = CurMF->getOrCreateJumpTableInfo(JTEncoding)
->createJumpTableIndex(DestBBs);
@@ -2216,8 +2229,8 @@ bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR,
bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
const Value* SV,
- MachineBasicBlock *Default,
- MachineBasicBlock *SwitchBB) {
+ MachineBasicBlock* Default,
+ MachineBasicBlock* SwitchBB) {
// Get the MachineFunction which holds the current MBB. This is used when
// inserting any additional MBBs necessary to represent the switch.
MachineFunction *CurMF = FuncInfo.MF;
@@ -2281,7 +2294,9 @@ bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
LSize += J->size();
RSize -= J->size();
}
- if (areJTsAllowed(TLI)) {
+
+ const TargetLowering *TLI = TM.getTargetLowering();
+ if (areJTsAllowed(*TLI)) {
// If our case is dense we *really* should handle it earlier!
assert((FMetric > 0) && "Should handle dense range earlier!");
} else {
@@ -2350,8 +2365,9 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
const Value* SV,
MachineBasicBlock* Default,
- MachineBasicBlock *SwitchBB){
- EVT PTy = TLI.getPointerTy();
+ MachineBasicBlock* SwitchBB) {
+ const TargetLowering *TLI = TM.getTargetLowering();
+ EVT PTy = TLI->getPointerTy();
unsigned IntPtrBits = PTy.getSizeInBits();
Case& FrontCase = *CR.Range.first;
@@ -2362,7 +2378,7 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
MachineFunction *CurMF = FuncInfo.MF;
// If target does not have legal shift left, do not emit bit tests at all.
- if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
+ if (!TLI->isOperationLegal(ISD::SHL, TLI->getPointerTy()))
return false;
size_t numCmps = 0;
@@ -2480,11 +2496,11 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
/// Clusterify - Transform simple list of Cases into list of CaseRange's
size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
const SwitchInst& SI) {
-
+
/// Use a shorter form of declaration, and also
/// show the we want to use CRSBuilder as Clusterifier.
typedef IntegersSubsetMapping<MachineBasicBlock> Clusterifier;
-
+
Clusterifier TheClusterifier;
BranchProbabilityInfo *BPI = FuncInfo.BPI;
@@ -2494,12 +2510,12 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
const BasicBlock *SuccBB = i.getCaseSuccessor();
MachineBasicBlock *SMBB = FuncInfo.MBBMap[SuccBB];
- TheClusterifier.add(i.getCaseValueEx(), SMBB,
+ TheClusterifier.add(i.getCaseValueEx(), SMBB,
BPI ? BPI->getEdgeWeight(SI.getParent(), i.getSuccessorIndex()) : 0);
}
-
+
TheClusterifier.optimize();
-
+
size_t numCmps = 0;
for (Clusterifier::RangeIterator i = TheClusterifier.begin(),
e = TheClusterifier.end(); i != e; ++i, ++numCmps) {
@@ -2511,7 +2527,7 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
// Changing it to APInt based is a pretty heavy for this commit.
Cases.push_back(Case(C.first.getLow().toConstantInt(),
C.first.getHigh().toConstantInt(), C.second, W));
-
+
if (C.first.getLow() != C.first.getHigh())
// A range counts double, since it requires two compares.
++numCmps;
@@ -2645,7 +2661,7 @@ void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
- EVT ShiftTy = TLI.getShiftAmountTy(Op2.getValueType());
+ EVT ShiftTy = TM.getTargetLowering()->getShiftAmountTy(Op2.getValueType());
// Coerce the shift amount to the right type if we can.
if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
@@ -2683,7 +2699,8 @@ void SelectionDAGBuilder::visitSDiv(const User &I) {
if (isa<BinaryOperator>(&I) && cast<BinaryOperator>(&I)->isExact() &&
!isa<ConstantSDNode>(Op1) &&
isa<ConstantSDNode>(Op2) && !cast<ConstantSDNode>(Op2)->isNullValue())
- setValue(&I, TLI.BuildExactSDIV(Op1, Op2, getCurSDLoc(), DAG));
+ setValue(&I, TM.getTargetLowering()->BuildExactSDIV(Op1, Op2,
+ getCurSDLoc(), DAG));
else
setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(),
Op1, Op2));
@@ -2699,7 +2716,7 @@ void SelectionDAGBuilder::visitICmp(const User &I) {
SDValue Op2 = getValue(I.getOperand(1));
ISD::CondCode Opcode = getICmpCondCode(predicate);
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
}
@@ -2714,13 +2731,13 @@ void SelectionDAGBuilder::visitFCmp(const User &I) {
ISD::CondCode Condition = getFCmpCondCode(predicate);
if (TM.Options.NoNaNsFPMath)
Condition = getFCmpCodeWithoutNaN(Condition);
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
}
void SelectionDAGBuilder::visitSelect(const User &I) {
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, I.getType(), ValueVTs);
+ ComputeValueVTs(*TM.getTargetLowering(), I.getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0) return;
@@ -2748,7 +2765,7 @@ void SelectionDAGBuilder::visitSelect(const User &I) {
void SelectionDAGBuilder::visitTrunc(const User &I) {
// TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
}
@@ -2756,7 +2773,7 @@ void SelectionDAGBuilder::visitZExt(const User &I) {
// ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// ZExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
}
@@ -2764,51 +2781,52 @@ void SelectionDAGBuilder::visitSExt(const User &I) {
// SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// SExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPTrunc(const User &I) {
// FPTrunc is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TLI.getValueType(I.getType());
+ const TargetLowering *TLI = TM.getTargetLowering();
+ EVT DestVT = TLI->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurSDLoc(),
DestVT, N,
- DAG.getTargetConstant(0, TLI.getPointerTy())));
+ DAG.getTargetConstant(0, TLI->getPointerTy())));
}
-void SelectionDAGBuilder::visitFPExt(const User &I){
+void SelectionDAGBuilder::visitFPExt(const User &I) {
// FPExt is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPToUI(const User &I) {
// FPToUI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPToSI(const User &I) {
// FPToSI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitUIToFP(const User &I) {
// UIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
}
-void SelectionDAGBuilder::visitSIToFP(const User &I){
+void SelectionDAGBuilder::visitSIToFP(const User &I) {
// SIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
}
@@ -2816,7 +2834,7 @@ void SelectionDAGBuilder::visitPtrToInt(const User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
}
@@ -2824,13 +2842,13 @@ void SelectionDAGBuilder::visitIntToPtr(const User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
}
void SelectionDAGBuilder::visitBitCast(const User &I) {
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
// BitCast assures us that source and destination are the same size so this is
// either a BITCAST or a no-op.
@@ -2842,23 +2860,24 @@ void SelectionDAGBuilder::visitBitCast(const User &I) {
}
void SelectionDAGBuilder::visitInsertElement(const User &I) {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue InVec = getValue(I.getOperand(0));
SDValue InVal = getValue(I.getOperand(1));
- SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(),
- TLI.getPointerTy(),
- getValue(I.getOperand(2)));
+ SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)),
+ getCurSDLoc(), TLI.getVectorIdxTy());
setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
- TLI.getValueType(I.getType()),
+ TM.getTargetLowering()->getValueType(I.getType()),
InVec, InVal, InIdx));
}
void SelectionDAGBuilder::visitExtractElement(const User &I) {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue InVec = getValue(I.getOperand(0));
- SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(),
- TLI.getPointerTy(),
- getValue(I.getOperand(1)));
+ SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)),
+ getCurSDLoc(), TLI.getVectorIdxTy());
setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
- TLI.getValueType(I.getType()), InVec, InIdx));
+ TM.getTargetLowering()->getValueType(I.getType()),
+ InVec, InIdx));
}
// Utility for visitShuffleVector - Return true if every element in Mask,
@@ -2879,8 +2898,9 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
SmallVector<int, 8> Mask;
ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
unsigned MaskNumElts = Mask.size();
-
- EVT VT = TLI.getValueType(I.getType());
+
+ const TargetLowering *TLI = TM.getTargetLowering();
+ EVT VT = TLI->getValueType(I.getType());
EVT SrcVT = Src1.getValueType();
unsigned SrcNumElts = SrcVT.getVectorNumElements();
@@ -3002,7 +3022,8 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
Src = DAG.getUNDEF(VT);
else
Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurSDLoc(), VT,
- Src, DAG.getIntPtrConstant(StartIdx[Input]));
+ Src, DAG.getConstant(StartIdx[Input],
+ TLI->getVectorIdxTy()));
}
// Calculate new mask.
@@ -3028,7 +3049,7 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
// replacing the shuffle with extract and build vector.
// to insert and build vector.
EVT EltVT = VT.getVectorElementType();
- EVT PtrVT = TLI.getPointerTy();
+ EVT IdxVT = TLI->getVectorIdxTy();
SmallVector<SDValue,8> Ops;
for (unsigned i = 0; i != MaskNumElts; ++i) {
int Idx = Mask[i];
@@ -3041,7 +3062,7 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
- EltVT, Src, DAG.getConstant(Idx, PtrVT));
+ EltVT, Src, DAG.getConstant(Idx, IdxVT));
}
Ops.push_back(Res);
@@ -3061,10 +3082,11 @@ void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
+ const TargetLowering *TLI = TM.getTargetLowering();
SmallVector<EVT, 4> AggValueVTs;
- ComputeValueVTs(TLI, AggTy, AggValueVTs);
+ ComputeValueVTs(*TLI, AggTy, AggValueVTs);
SmallVector<EVT, 4> ValValueVTs;
- ComputeValueVTs(TLI, ValTy, ValValueVTs);
+ ComputeValueVTs(*TLI, ValTy, ValValueVTs);
unsigned NumAggValues = AggValueVTs.size();
unsigned NumValValues = ValValueVTs.size();
@@ -3101,8 +3123,9 @@ void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
+ const TargetLowering *TLI = TM.getTargetLowering();
SmallVector<EVT, 4> ValValueVTs;
- ComputeValueVTs(TLI, ValTy, ValValueVTs);
+ ComputeValueVTs(*TLI, ValTy, ValValueVTs);
unsigned NumValValues = ValValueVTs.size();
@@ -3150,16 +3173,17 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
Ty = cast<SequentialType>(Ty)->getElementType();
// If this is a constant subscript, handle it quickly.
+ const TargetLowering *TLI = TM.getTargetLowering();
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->isZero()) continue;
uint64_t Offs =
TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
SDValue OffsVal;
- EVT PTy = TLI.getPointerTy();
+ EVT PTy = TLI->getPointerTy();
unsigned PtrBits = PTy.getSizeInBits();
if (PtrBits < 64)
OffsVal = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(),
- TLI.getPointerTy(),
+ TLI->getPointerTy(),
DAG.getConstant(Offs, MVT::i64));
else
OffsVal = DAG.getIntPtrConstant(Offs);
@@ -3170,7 +3194,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
}
// N = N + Idx * ElementSize;
- APInt ElementSize = APInt(TLI.getPointerTy().getSizeInBits(),
+ APInt ElementSize = APInt(TLI->getPointerTy().getSizeInBits(),
TD->getTypeAllocSize(Ty));
SDValue IdxN = getValue(Idx);
@@ -3208,14 +3232,15 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
return; // getValue will auto-populate this.
Type *Ty = I.getAllocatedType();
- uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
+ const TargetLowering *TLI = TM.getTargetLowering();
+ uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
unsigned Align =
- std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty),
+ std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty),
I.getAlignment());
SDValue AllocSize = getValue(I.getArraySize());
- EVT IntPtr = TLI.getPointerTy();
+ EVT IntPtr = TLI->getPointerTy();
if (AllocSize.getValueType() != IntPtr)
AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurSDLoc(), IntPtr);
@@ -3271,7 +3296,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
- ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
+ ComputeValueVTs(*TM.getTargetLowering(), Ty, ValueVTs, &Offsets);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0)
return;
@@ -3345,7 +3370,7 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
- ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
+ ComputeValueVTs(*TM.getTargetLowering(), SrcV->getType(), ValueVTs, &Offsets);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0)
return;
@@ -3419,9 +3444,10 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
SDValue InChain = getRoot();
- if (TLI.getInsertFencesForAtomic())
+ const TargetLowering *TLI = TM.getTargetLowering();
+ if (TLI->getInsertFencesForAtomic())
InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
- DAG, TLI);
+ DAG, *TLI);
SDValue L =
DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl,
@@ -3431,14 +3457,14 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
getValue(I.getCompareOperand()),
getValue(I.getNewValOperand()),
MachinePointerInfo(I.getPointerOperand()), 0 /* Alignment */,
- TLI.getInsertFencesForAtomic() ? Monotonic : Order,
+ TLI->getInsertFencesForAtomic() ? Monotonic : Order,
Scope);
SDValue OutChain = L.getValue(1);
- if (TLI.getInsertFencesForAtomic())
+ if (TLI->getInsertFencesForAtomic())
OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
- DAG, TLI);
+ DAG, *TLI);
setValue(&I, L);
DAG.setRoot(OutChain);
@@ -3466,9 +3492,10 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
SDValue InChain = getRoot();
- if (TLI.getInsertFencesForAtomic())
+ const TargetLowering *TLI = TM.getTargetLowering();
+ if (TLI->getInsertFencesForAtomic())
InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
- DAG, TLI);
+ DAG, *TLI);
SDValue L =
DAG.getAtomic(NT, dl,
@@ -3477,14 +3504,14 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
getValue(I.getPointerOperand()),
getValue(I.getValOperand()),
I.getPointerOperand(), 0 /* Alignment */,
- TLI.getInsertFencesForAtomic() ? Monotonic : Order,
+ TLI->getInsertFencesForAtomic() ? Monotonic : Order,
Scope);
SDValue OutChain = L.getValue(1);
- if (TLI.getInsertFencesForAtomic())
+ if (TLI->getInsertFencesForAtomic())
OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
- DAG, TLI);
+ DAG, *TLI);
setValue(&I, L);
DAG.setRoot(OutChain);
@@ -3492,10 +3519,11 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
void SelectionDAGBuilder::visitFence(const FenceInst &I) {
SDLoc dl = getCurSDLoc();
+ const TargetLowering *TLI = TM.getTargetLowering();
SDValue Ops[3];
Ops[0] = getRoot();
- Ops[1] = DAG.getConstant(I.getOrdering(), TLI.getPointerTy());
- Ops[2] = DAG.getConstant(I.getSynchScope(), TLI.getPointerTy());
+ Ops[1] = DAG.getConstant(I.getOrdering(), TLI->getPointerTy());
+ Ops[2] = DAG.getConstant(I.getSynchScope(), TLI->getPointerTy());
DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops, 3));
}
@@ -3506,7 +3534,8 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
SDValue InChain = getRoot();
- EVT VT = TLI.getValueType(I.getType());
+ const TargetLowering *TLI = TM.getTargetLowering();
+ EVT VT = TLI->getValueType(I.getType());
if (I.getAlignment() < VT.getSizeInBits() / 8)
report_fatal_error("Cannot generate unaligned atomic load");
@@ -3515,14 +3544,14 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
getValue(I.getPointerOperand()),
I.getPointerOperand(), I.getAlignment(),
- TLI.getInsertFencesForAtomic() ? Monotonic : Order,
+ TLI->getInsertFencesForAtomic() ? Monotonic : Order,
Scope);
SDValue OutChain = L.getValue(1);
- if (TLI.getInsertFencesForAtomic())
+ if (TLI->getInsertFencesForAtomic())
OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
- DAG, TLI);
+ DAG, *TLI);
setValue(&I, L);
DAG.setRoot(OutChain);
@@ -3536,14 +3565,15 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
SDValue InChain = getRoot();
- EVT VT = TLI.getValueType(I.getValueOperand()->getType());
+ const TargetLowering *TLI = TM.getTargetLowering();
+ EVT VT = TLI->getValueType(I.getValueOperand()->getType());
if (I.getAlignment() < VT.getSizeInBits() / 8)
report_fatal_error("Cannot generate unaligned atomic store");
- if (TLI.getInsertFencesForAtomic())
+ if (TLI->getInsertFencesForAtomic())
InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
- DAG, TLI);
+ DAG, *TLI);
SDValue OutChain =
DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
@@ -3551,12 +3581,12 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
getValue(I.getPointerOperand()),
getValue(I.getValueOperand()),
I.getPointerOperand(), I.getAlignment(),
- TLI.getInsertFencesForAtomic() ? Monotonic : Order,
+ TLI->getInsertFencesForAtomic() ? Monotonic : Order,
Scope);
- if (TLI.getInsertFencesForAtomic())
+ if (TLI->getInsertFencesForAtomic())
OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
- DAG, TLI);
+ DAG, *TLI);
DAG.setRoot(OutChain);
}
@@ -3581,12 +3611,13 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
// Info is set by getTgtMemInstrinsic
TargetLowering::IntrinsicInfo Info;
- bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
+ const TargetLowering *TLI = TM.getTargetLowering();
+ bool IsTgtIntrinsic = TLI->getTgtMemIntrinsic(Info, I, Intrinsic);
// Add the intrinsic ID as an integer operand if it's not a target intrinsic.
if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
Info.opc == ISD::INTRINSIC_W_CHAIN)
- Ops.push_back(DAG.getTargetConstant(Intrinsic, TLI.getPointerTy()));
+ Ops.push_back(DAG.getTargetConstant(Intrinsic, TLI->getPointerTy()));
// Add all operands of the call to the operand list.
for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
@@ -3595,7 +3626,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
}
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, I.getType(), ValueVTs);
+ ComputeValueVTs(*TLI, I.getType(), ValueVTs);
if (HasChain)
ValueVTs.push_back(MVT::Other);
@@ -3633,7 +3664,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
if (!I.getType()->isVoidTy()) {
if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
- EVT VT = TLI.getValueType(PTy);
+ EVT VT = TLI->getValueType(PTy);
Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
}
@@ -4326,7 +4357,8 @@ static unsigned getTruncatedArgReg(const SDValue &N) {
return 0;
const SDValue &Ext = N.getOperand(0);
- if (Ext.getOpcode() == ISD::AssertZext || Ext.getOpcode() == ISD::AssertSext){
+ if (Ext.getOpcode() == ISD::AssertZext ||
+ Ext.getOpcode() == ISD::AssertSext) {
const SDValue &CFR = Ext.getOperand(0);
if (CFR.getOpcode() == ISD::CopyFromReg)
return cast<RegisterSDNode>(CFR.getOperand(1))->getReg();
@@ -4349,20 +4381,19 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
MachineFunction &MF = DAG.getMachineFunction();
const TargetInstrInfo *TII = DAG.getTarget().getInstrInfo();
- const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
// Ignore inlined function arguments here.
DIVariable DV(Variable);
if (DV.isInlinedFnArgument(MF.getFunction()))
return false;
- unsigned Reg = 0;
+ Optional<MachineOperand> Op;
// Some arguments' frame index is recorded during argument lowering.
- Offset = FuncInfo.getArgumentFrameIndex(Arg);
- if (Offset)
- Reg = TRI->getFrameRegister(MF);
+ if (int FI = FuncInfo.getArgumentFrameIndex(Arg))
+ Op = MachineOperand::CreateFI(FI);
- if (!Reg && N.getNode()) {
+ if (!Op && N.getNode()) {
+ unsigned Reg;
if (N.getOpcode() == ISD::CopyFromReg)
Reg = cast<RegisterSDNode>(N.getOperand(1))->getReg();
else
@@ -4373,32 +4404,39 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
if (PR)
Reg = PR;
}
+ if (Reg)
+ Op = MachineOperand::CreateReg(Reg, false);
}
- if (!Reg) {
+ if (!Op) {
// Check if ValueMap has reg number.
DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
if (VMI != FuncInfo.ValueMap.end())
- Reg = VMI->second;
+ Op = MachineOperand::CreateReg(VMI->second, false);
}
- if (!Reg && N.getNode()) {
+ if (!Op && N.getNode())
// Check if frame index is available.
if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
if (FrameIndexSDNode *FINode =
- dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) {
- Reg = TRI->getFrameRegister(MF);
- Offset = FINode->getIndex();
- }
- }
+ dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
+ Op = MachineOperand::CreateFI(FINode->getIndex());
- if (!Reg)
+ if (!Op)
return false;
- MachineInstrBuilder MIB = BuildMI(MF, getCurDebugLoc(),
- TII->get(TargetOpcode::DBG_VALUE))
- .addReg(Reg, RegState::Debug).addImm(Offset).addMetadata(Variable);
- FuncInfo.ArgDbgValues.push_back(&*MIB);
+ // FIXME: This does not handle register-indirect values at offset 0.
+ bool IsIndirect = Offset != 0;
+ if (Op->isReg())
+ FuncInfo.ArgDbgValues.push_back(BuildMI(MF, getCurDebugLoc(),
+ TII->get(TargetOpcode::DBG_VALUE),
+ IsIndirect,
+ Op->getReg(), Offset, Variable));
+ else
+ FuncInfo.ArgDbgValues.push_back(
+ BuildMI(MF, getCurDebugLoc(), TII->get(TargetOpcode::DBG_VALUE))
+ .addOperand(*Op).addImm(Offset).addMetadata(Variable));
+
return true;
}
@@ -4415,6 +4453,7 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
/// otherwise lower it and return null.
const char *
SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
+ const TargetLowering *TLI = TM.getTargetLowering();
SDLoc sdl = getCurSDLoc();
DebugLoc dl = getCurDebugLoc();
SDValue Res;
@@ -4428,17 +4467,17 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::vaend: visitVAEnd(I); return 0;
case Intrinsic::vacopy: visitVACopy(I); return 0;
case Intrinsic::returnaddress:
- setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, TLI.getPointerTy(),
+ setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, TLI->getPointerTy(),
getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::frameaddress:
- setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, TLI.getPointerTy(),
+ setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, TLI->getPointerTy(),
getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::setjmp:
- return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
+ return &"_setjmp"[!TLI->usesUnderscoreSetJmp()];
case Intrinsic::longjmp:
- return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
+ return &"_longjmp"[!TLI->usesUnderscoreLongJmp()];
case Intrinsic::memcpy: {
// Assert for address < 256 since we support only user defined address
// spaces.
@@ -4500,7 +4539,10 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
MDNode *Variable = DI.getVariable();
const Value *Address = DI.getAddress();
- if (!Address || !DIVariable(Variable).Verify()) {
+ DIVariable DIVar(Variable);
+ assert((!DIVar || DIVar.isVariable()) &&
+ "Variable in DbgDeclareInst should be either null or a DIVariable.");
+ if (!Address || !DIVar) {
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
return 0;
}
@@ -4575,7 +4617,10 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::dbg_value: {
const DbgValueInst &DI = cast<DbgValueInst>(I);
- if (!DIVariable(DI.getVariable()).Verify())
+ DIVariable DIVar(DI.getVariable());
+ assert((!DIVar || DIVar.isVariable()) &&
+ "Variable in DbgValueInst should be either null or a DIVariable.");
+ if (!DIVar)
return 0;
MDNode *Variable = DI.getVariable();
@@ -4658,16 +4703,16 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
return 0;
case Intrinsic::eh_dwarf_cfa: {
SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), sdl,
- TLI.getPointerTy());
+ TLI->getPointerTy());
SDValue Offset = DAG.getNode(ISD::ADD, sdl,
- TLI.getPointerTy(),
+ TLI->getPointerTy(),
DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, sdl,
- TLI.getPointerTy()),
+ TLI->getPointerTy()),
CfaArg);
SDValue FA = DAG.getNode(ISD::FRAMEADDR, sdl,
- TLI.getPointerTy(),
- DAG.getConstant(0, TLI.getPointerTy()));
- setValue(&I, DAG.getNode(ISD::ADD, sdl, TLI.getPointerTy(),
+ TLI->getPointerTy(),
+ DAG.getConstant(0, TLI->getPointerTy()));
+ setValue(&I, DAG.getNode(ISD::ADD, sdl, TLI->getPointerTy(),
FA, Offset));
return 0;
}
@@ -4757,7 +4802,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
ShOps[0] = ShAmt;
ShOps[1] = DAG.getConstant(0, MVT::i32);
ShAmt = DAG.getNode(ISD::BUILD_VECTOR, sdl, ShAmtVT, &ShOps[0], 2);
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI->getValueType(I.getType());
ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
DAG.getConstant(NewIntrinsic, MVT::i32),
@@ -4769,14 +4814,14 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::x86_avx_vinsertf128_ps_256:
case Intrinsic::x86_avx_vinsertf128_si_256:
case Intrinsic::x86_avx2_vinserti128: {
- EVT DestVT = TLI.getValueType(I.getType());
- EVT ElVT = TLI.getValueType(I.getArgOperand(1)->getType());
+ EVT DestVT = TLI->getValueType(I.getType());
+ EVT ElVT = TLI->getValueType(I.getArgOperand(1)->getType());
uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(2))->getZExtValue() & 1) *
ElVT.getVectorNumElements();
Res = DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, DestVT,
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)),
- DAG.getIntPtrConstant(Idx));
+ DAG.getConstant(Idx, TLI->getVectorIdxTy()));
setValue(&I, Res);
return 0;
}
@@ -4784,12 +4829,12 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::x86_avx_vextractf128_ps_256:
case Intrinsic::x86_avx_vextractf128_si_256:
case Intrinsic::x86_avx2_vextracti128: {
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI->getValueType(I.getType());
uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(1))->getZExtValue() & 1) *
DestVT.getVectorNumElements();
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, DestVT,
getValue(I.getArgOperand(0)),
- DAG.getIntPtrConstant(Idx));
+ DAG.getConstant(Idx, TLI->getVectorIdxTy()));
setValue(&I, Res);
return 0;
}
@@ -4815,7 +4860,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::convertus: Code = ISD::CVT_US; break;
case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
}
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI->getValueType(I.getType());
const Value *Op1 = I.getArgOperand(0);
Res = DAG.getConvertRndSat(DestVT, sdl, getValue(Op1),
DAG.getValueType(DestVT),
@@ -4831,23 +4876,23 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(1)), DAG));
return 0;
case Intrinsic::log:
- setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
+ setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
return 0;
case Intrinsic::log2:
- setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
+ setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
return 0;
case Intrinsic::log10:
- setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
+ setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
return 0;
case Intrinsic::exp:
- setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
+ setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
return 0;
case Intrinsic::exp2:
- setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
+ setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
return 0;
case Intrinsic::pow:
setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
- getValue(I.getArgOperand(1)), DAG, TLI));
+ getValue(I.getArgOperand(1)), DAG, *TLI));
return 0;
case Intrinsic::sqrt:
case Intrinsic::fabs:
@@ -4885,9 +4930,9 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(2))));
return 0;
case Intrinsic::fmuladd: {
- EVT VT = TLI.getValueType(I.getType());
+ EVT VT = TLI->getValueType(I.getType());
if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
- TLI.isFMAFasterThanMulAndAdd(VT)){
+ TLI->isFMAFasterThanFMulAndFAdd(VT)) {
setValue(&I, DAG.getNode(ISD::FMA, sdl,
getValue(I.getArgOperand(0)).getValueType(),
getValue(I.getArgOperand(0)),
@@ -4958,7 +5003,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::stacksave: {
SDValue Op = getRoot();
Res = DAG.getNode(ISD::STACKSAVE, sdl,
- DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
+ DAG.getVTList(TLI->getPointerTy(), MVT::Other), &Op, 1);
setValue(&I, Res);
DAG.setRoot(Res.getValue(1));
return 0;
@@ -4972,7 +5017,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// Emit code into the DAG to store the stack guard onto the stack.
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
- EVT PtrTy = TLI.getPointerTy();
+ EVT PtrTy = TLI->getPointerTy();
SDValue Src = getValue(I.getArgOperand(0)); // The guard's value.
AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
@@ -5034,7 +5079,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::adjust_trampoline: {
setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
- TLI.getPointerTy(),
+ TLI->getPointerTy(),
getValue(I.getArgOperand(0))));
return 0;
}
@@ -5064,7 +5109,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::trap: {
StringRef TrapFuncName = TM.Options.getTrapFunctionName();
if (TrapFuncName.empty()) {
- ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
+ ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
ISD::TRAP : ISD::DEBUGTRAP;
DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
return 0;
@@ -5075,9 +5120,10 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
false, false, false, false, 0, CallingConv::C,
/*isTailCall=*/false,
/*doesNotRet=*/false, /*isReturnValueUsed=*/true,
- DAG.getExternalSymbol(TrapFuncName.data(), TLI.getPointerTy()),
+ DAG.getExternalSymbol(TrapFuncName.data(),
+ TLI->getPointerTy()),
Args, DAG, sdl);
- std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
+ std::pair<SDValue, SDValue> Result = TLI->LowerCallTo(CLI);
DAG.setRoot(Result.second);
return 0;
}
@@ -5134,8 +5180,8 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SmallVector<Value *, 4> Allocas;
GetUnderlyingObjects(I.getArgOperand(1), Allocas, TD);
- for (SmallVector<Value*, 4>::iterator Object = Allocas.begin(),
- E = Allocas.end(); Object != E; ++Object) {
+ for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
+ E = Allocas.end(); Object != E; ++Object) {
AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
// Could not find an Alloca.
@@ -5146,7 +5192,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDValue Ops[2];
Ops[0] = getRoot();
- Ops[1] = DAG.getFrameIndex(FI, TLI.getPointerTy(), true);
+ Ops[1] = DAG.getFrameIndex(FI, TLI->getPointerTy(), true);
unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops, 2);
@@ -5156,7 +5202,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::invariant_start:
// Discard region information.
- setValue(&I, DAG.getUNDEF(TLI.getPointerTy()));
+ setValue(&I, DAG.getUNDEF(TLI->getPointerTy()));
return 0;
case Intrinsic::invariant_end:
// Discard region information.
@@ -5182,26 +5228,27 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
// Check whether the function can return without sret-demotion.
SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(RetTy, CS.getAttributes(), Outs, TLI);
+ const TargetLowering *TLI = TM.getTargetLowering();
+ GetReturnInfo(RetTy, CS.getAttributes(), Outs, *TLI);
- bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
- DAG.getMachineFunction(),
- FTy->isVarArg(), Outs,
- FTy->getContext());
+ bool CanLowerReturn = TLI->CanLowerReturn(CS.getCallingConv(),
+ DAG.getMachineFunction(),
+ FTy->isVarArg(), Outs,
+ FTy->getContext());
SDValue DemoteStackSlot;
int DemoteStackIdx = -100;
if (!CanLowerReturn) {
- uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(
+ uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(
FTy->getReturnType());
- unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment(
+ unsigned Align = TLI->getDataLayout()->getPrefTypeAlignment(
FTy->getReturnType());
MachineFunction &MF = DAG.getMachineFunction();
DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
- DemoteStackSlot = DAG.getFrameIndex(DemoteStackIdx, TLI.getPointerTy());
+ DemoteStackSlot = DAG.getFrameIndex(DemoteStackIdx, TLI->getPointerTy());
Entry.Node = DemoteStackSlot;
Entry.Ty = StackSlotPtrType;
Entry.isSExt = false;
@@ -5262,14 +5309,14 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
}
// Check if target-independent constraints permit a tail call here.
- // Target-dependent constraints are checked within TLI.LowerCallTo.
- if (isTailCall && !isInTailCallPosition(CS, TLI))
+ // Target-dependent constraints are checked within TLI->LowerCallTo.
+ if (isTailCall && !isInTailCallPosition(CS, *TLI))
isTailCall = false;
TargetLowering::
CallLoweringInfo CLI(getRoot(), RetTy, FTy, isTailCall, Callee, Args, DAG,
getCurSDLoc(), CS);
- std::pair<SDValue,SDValue> Result = TLI.LowerCallTo(CLI);
+ std::pair<SDValue,SDValue> Result = TLI->LowerCallTo(CLI);
assert((isTailCall || Result.second.getNode()) &&
"Non-null chain expected with non-tail call!");
assert((Result.second.getNode() || !Result.first.getNode()) &&
@@ -5282,14 +5329,14 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
SmallVector<EVT, 1> PVTs;
Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
- ComputeValueVTs(TLI, PtrRetTy, PVTs);
+ ComputeValueVTs(*TLI, PtrRetTy, PVTs);
assert(PVTs.size() == 1 && "Pointers should fit in one register");
EVT PtrVT = PVTs[0];
SmallVector<EVT, 4> RetTys;
SmallVector<uint64_t, 4> Offsets;
RetTy = FTy->getReturnType();
- ComputeValueVTs(TLI, RetTy, RetTys, &Offsets);
+ ComputeValueVTs(*TLI, RetTy, RetTys, &Offsets);
unsigned NumValues = RetTys.size();
SmallVector<SDValue, 4> Values(NumValues);
@@ -5320,6 +5367,10 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
// As a special case, a null chain means that a tail call has been emitted and
// the DAG root is already updated.
HasTailCall = true;
+
+ // Since there's no actual continuation from this block, nothing can be
+ // relying on us setting vregs for them.
+ PendingExports.clear();
} else {
DAG.setRoot(Result.second);
}
@@ -5386,7 +5437,7 @@ static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
Ptr, MachinePointerInfo(PtrVal),
false /*volatile*/,
- false /*nontemporal*/,
+ false /*nontemporal*/,
false /*isinvariant*/, 1 /* align=1 */);
if (!ConstantMemory)
@@ -5451,10 +5502,11 @@ bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
// Require that we can find a legal MVT, and only do this if the target
// supports unaligned loads of that type. Expanding into byte loads would
// bloat the code.
+ const TargetLowering *TLI = TM.getTargetLowering();
if (ActuallyDoIt && Size->getZExtValue() > 4) {
// TODO: Handle 5 byte compare as 4-byte + 1 byte.
// TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
- if (!TLI.isTypeLegal(LoadVT) ||!TLI.allowsUnalignedMemoryAccesses(LoadVT))
+ if (!TLI->isTypeLegal(LoadVT) ||!TLI->allowsUnalignedMemoryAccesses(LoadVT))
ActuallyDoIt = false;
}
@@ -5464,7 +5516,7 @@ bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
SDValue Res = DAG.getSetCC(getCurSDLoc(), MVT::i1, LHSVal, RHSVal,
ISD::SETNE);
- EVT CallVT = TLI.getValueType(I.getType(), true);
+ EVT CallVT = TLI->getValueType(I.getType(), true);
setValue(&I, DAG.getZExtOrTrunc(Res, getCurSDLoc(), CallVT));
return true;
}
@@ -5622,7 +5674,8 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
if (!RenameFn)
Callee = getValue(I.getCalledValue());
else
- Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
+ Callee = DAG.getExternalSymbol(RenameFn,
+ TM.getTargetLowering()->getPointerTy());
// Check if we can potentially perform a tail call. More detailed checking is
// be done within LowerCallTo, after more information about the call is known.
@@ -5817,8 +5870,9 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
/// ConstraintOperands - Information about all of the constraints.
SDISelAsmOperandInfoVector ConstraintOperands;
+ const TargetLowering *TLI = TM.getTargetLowering();
TargetLowering::AsmOperandInfoVector
- TargetConstraints = TLI.ParseConstraints(CS);
+ TargetConstraints = TLI->ParseConstraints(CS);
bool hasMemory = false;
@@ -5843,10 +5897,10 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// corresponding argument.
assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
- OpVT = TLI.getSimpleValueType(STy->getElementType(ResNo));
+ OpVT = TLI->getSimpleValueType(STy->getElementType(ResNo));
} else {
assert(ResNo == 0 && "Asm only has one result!");
- OpVT = TLI.getSimpleValueType(CS.getType());
+ OpVT = TLI->getSimpleValueType(CS.getType());
}
++ResNo;
break;
@@ -5867,7 +5921,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
}
- OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, TD).
+ OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), *TLI, TD).
getSimpleVT();
}
@@ -5879,7 +5933,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
else {
for (unsigned j = 0, ee = OpInfo.Codes.size(); j != ee; ++j) {
TargetLowering::ConstraintType
- CType = TLI.getConstraintType(OpInfo.Codes[j]);
+ CType = TLI->getConstraintType(OpInfo.Codes[j]);
if (CType == TargetLowering::C_Memory) {
hasMemory = true;
break;
@@ -5911,11 +5965,11 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
if (OpInfo.ConstraintVT != Input.ConstraintVT) {
std::pair<unsigned, const TargetRegisterClass*> MatchRC =
- TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
- OpInfo.ConstraintVT);
+ TLI->getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
+ OpInfo.ConstraintVT);
std::pair<unsigned, const TargetRegisterClass*> InputRC =
- TLI.getRegForInlineAsmConstraint(Input.ConstraintCode,
- Input.ConstraintVT);
+ TLI->getRegForInlineAsmConstraint(Input.ConstraintCode,
+ Input.ConstraintVT);
if ((OpInfo.ConstraintVT.isInteger() !=
Input.ConstraintVT.isInteger()) ||
(MatchRC.second != InputRC.second)) {
@@ -5928,7 +5982,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
}
// Compute the constraint code and ConstraintType to use.
- TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
+ TLI->ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
OpInfo.Type == InlineAsm::isClobber)
@@ -5956,16 +6010,16 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
- TLI.getPointerTy());
+ TLI->getPointerTy());
} else {
// Otherwise, create a stack slot and emit a store to it before the
// asm.
Type *Ty = OpVal->getType();
- uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
- unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment(Ty);
+ uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
+ unsigned Align = TLI->getDataLayout()->getPrefTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
- SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
+ SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI->getPointerTy());
Chain = DAG.getStore(Chain, getCurSDLoc(),
OpInfo.CallOperand, StackSlot,
MachinePointerInfo::getFixedStack(SSFI),
@@ -5983,7 +6037,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// If this constraint is for a specific register, allocate it before
// anything else.
if (OpInfo.ConstraintType == TargetLowering::C_Register)
- GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo);
+ GetRegistersForValue(DAG, *TLI, getCurSDLoc(), OpInfo);
}
// Second pass - Loop over all of the operands, assigning virtual or physregs
@@ -5994,7 +6048,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// C_Register operands have already been allocated, Other/Memory don't need
// to be.
if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
- GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo);
+ GetRegistersForValue(DAG, *TLI, getCurSDLoc(), OpInfo);
}
// AsmNodeOperands - The operands for the ISD::INLINEASM node.
@@ -6002,7 +6056,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
AsmNodeOperands.push_back(
DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
- TLI.getPointerTy()));
+ TLI->getPointerTy()));
// If we have a !srcloc metadata node associated with it, we want to attach
// this to the ultimately generated inline asm machineinstr. To do this, we
@@ -6025,7 +6079,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
// Compute the constraint code and ConstraintType to use.
- TLI.ComputeConstraintToUse(OpInfo, SDValue());
+ TLI->ComputeConstraintToUse(OpInfo, SDValue());
// Ideally, we would only check against memory constraints. However, the
// meaning of an other constraint can be target-specific and we can't easily
@@ -6043,7 +6097,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
}
AsmNodeOperands.push_back(DAG.getTargetConstant(ExtraInfo,
- TLI.getPointerTy()));
+ TLI->getPointerTy()));
// Loop over all of the inputs, copying the operand values into the
// appropriate registers and processing the output regs.
@@ -6065,7 +6119,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// Add information to the INLINEASM node to know about this output.
unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags,
- TLI.getPointerTy()));
+ TLI->getPointerTy()));
AsmNodeOperands.push_back(OpInfo.CallOperand);
break;
}
@@ -6076,10 +6130,10 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// we can use.
if (OpInfo.AssignedRegs.Regs.empty()) {
LLVMContext &Ctx = *DAG.getContext();
- Ctx.emitError(CS.getInstruction(),
+ Ctx.emitError(CS.getInstruction(),
"couldn't allocate output register for constraint '" +
- Twine(OpInfo.ConstraintCode) + "'");
- break;
+ Twine(OpInfo.ConstraintCode) + "'");
+ return;
}
// If this is an indirect operand, store through the pointer after the
@@ -6096,13 +6150,11 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// Add information to the INLINEASM node to know that this register is
// set.
- OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
- InlineAsm::Kind_RegDefEarlyClobber :
- InlineAsm::Kind_RegDef,
- false,
- 0,
- DAG,
- AsmNodeOperands);
+ OpInfo.AssignedRegs
+ .AddInlineAsmOperands(OpInfo.isEarlyClobber
+ ? InlineAsm::Kind_RegDefEarlyClobber
+ : InlineAsm::Kind_RegDef,
+ false, 0, DAG, AsmNodeOperands);
break;
}
case InlineAsm::isInput: {
@@ -6134,10 +6186,10 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
if (OpInfo.isIndirect) {
// This happens on gcc/testsuite/gcc.dg/pr8788-1.c
LLVMContext &Ctx = *DAG.getContext();
- Ctx.emitError(CS.getInstruction(), "inline asm not supported yet:"
- " don't know how to handle tied "
- "indirect register inputs");
- report_fatal_error("Cannot handle indirect register inputs!");
+ Ctx.emitError(CS.getInstruction(), "inline asm not supported yet:"
+ " don't know how to handle tied "
+ "indirect register inputs");
+ return;
}
RegsForValue MatchedRegs;
@@ -6147,14 +6199,14 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
i != e; ++i) {
- if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT))
+ if (const TargetRegisterClass *RC = TLI->getRegClassFor(RegVT))
MatchedRegs.Regs.push_back(RegInfo.createVirtualRegister(RC));
else {
LLVMContext &Ctx = *DAG.getContext();
- Ctx.emitError(CS.getInstruction(), "inline asm error: This value"
+ Ctx.emitError(CS.getInstruction(),
+ "inline asm error: This value"
" type register class is not natively supported!");
- report_fatal_error("inline asm error: This value type register "
- "class is not natively supported!");
+ return;
}
}
// Use the produced MatchedRegs object to
@@ -6174,7 +6226,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
OpInfo.getMatchedOperand());
AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
- TLI.getPointerTy()));
+ TLI->getPointerTy()));
AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
break;
}
@@ -6186,34 +6238,34 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
if (OpInfo.ConstraintType == TargetLowering::C_Other) {
std::vector<SDValue> Ops;
- TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
- Ops, DAG);
+ TLI->LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
+ Ops, DAG);
if (Ops.empty()) {
LLVMContext &Ctx = *DAG.getContext();
Ctx.emitError(CS.getInstruction(),
"invalid operand for inline asm constraint '" +
- Twine(OpInfo.ConstraintCode) + "'");
- break;
+ Twine(OpInfo.ConstraintCode) + "'");
+ return;
}
// Add information to the INLINEASM node to know about this input.
unsigned ResOpType =
InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
- TLI.getPointerTy()));
+ TLI->getPointerTy()));
AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
break;
}
if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
- assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
+ assert(InOperandVal.getValueType() == TLI->getPointerTy() &&
"Memory operands expect pointer values");
// Add information to the INLINEASM node to know about this input.
unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
- TLI.getPointerTy()));
+ TLI->getPointerTy()));
AsmNodeOperands.push_back(InOperandVal);
break;
}
@@ -6227,17 +6279,18 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
LLVMContext &Ctx = *DAG.getContext();
Ctx.emitError(CS.getInstruction(),
"Don't know how to handle indirect register inputs yet "
- "for constraint '" + Twine(OpInfo.ConstraintCode) + "'");
- break;
+ "for constraint '" +
+ Twine(OpInfo.ConstraintCode) + "'");
+ return;
}
// Copy the input into the appropriate registers.
if (OpInfo.AssignedRegs.Regs.empty()) {
LLVMContext &Ctx = *DAG.getContext();
- Ctx.emitError(CS.getInstruction(),
+ Ctx.emitError(CS.getInstruction(),
"couldn't allocate input reg for constraint '" +
- Twine(OpInfo.ConstraintCode) + "'");
- break;
+ Twine(OpInfo.ConstraintCode) + "'");
+ return;
}
OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurSDLoc(),
@@ -6276,7 +6329,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// FIXME: Why don't we do this for inline asms with MRVs?
if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
- EVT ResultType = TLI.getValueType(CS.getType());
+ EVT ResultType = TLI->getValueType(CS.getType());
// If any of the results of the inline asm is a vector, it may have the
// wrong width/num elts. This can happen for register classes that can
@@ -6342,8 +6395,9 @@ void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
}
void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
- const DataLayout &TD = *TLI.getDataLayout();
- SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurSDLoc(),
+ const TargetLowering *TLI = TM.getTargetLowering();
+ const DataLayout &TD = *TLI->getDataLayout();
+ SDValue V = DAG.getVAArg(TLI->getValueType(I.getType()), getCurSDLoc(),
getRoot(), getValue(I.getOperand(0)),
DAG.getSrcValue(I.getOperand(0)),
TD.getABITypeAlignment(I.getType()));
@@ -6566,7 +6620,8 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
"Copy from a reg to the same reg!");
assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
- RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
+ const TargetLowering *TLI = TM.getTargetLowering();
+ RegsForValue RFV(V->getContext(), *TLI, Reg, V->getType());
SDValue Chain = DAG.getEntryNode();
RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, 0, V);
PendingExports.push_back(Chain);
@@ -6596,13 +6651,15 @@ static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
void SelectionDAGISel::LowerArguments(const Function &F) {
SelectionDAG &DAG = SDB->DAG;
SDLoc dl = SDB->getCurSDLoc();
+ const TargetLowering *TLI = getTargetLowering();
const DataLayout *TD = TLI->getDataLayout();
SmallVector<ISD::InputArg, 16> Ins;
if (!FuncInfo->CanLowerReturn) {
// Put in an sret pointer parameter before all the other parameters.
SmallVector<EVT, 1> ValueVTs;
- ComputeValueVTs(*TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
+ ComputeValueVTs(*getTargetLowering(),
+ PointerType::getUnqual(F.getReturnType()), ValueVTs);
// NOTE: Assuming that a pointer will never break down to more than one VT
// or one register.
@@ -6771,7 +6828,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
SDB->setValue(I, Res);
if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
- if (LoadSDNode *LNode =
+ if (LoadSDNode *LNode =
dyn_cast<LoadSDNode>(Res.getOperand(0).getNode()))
if (FrameIndexSDNode *FI =
dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
@@ -6869,15 +6926,17 @@ SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
// Remember that this register needs to added to the machine PHI node as
// the input for this MBB.
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, PN->getType(), ValueVTs);
+ const TargetLowering *TLI = TM.getTargetLowering();
+ ComputeValueVTs(*TLI, PN->getType(), ValueVTs);
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
EVT VT = ValueVTs[vti];
- unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
+ unsigned NumRegisters = TLI->getNumRegisters(*DAG.getContext(), VT);
for (unsigned i = 0, e = NumRegisters; i != e; ++i)
FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
Reg += NumRegisters;
}
}
}
+
ConstantsOut.clear();
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index bff92ca..ef73c00 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -1,4 +1,4 @@
-//===-- SelectionDAGBuilder.h - Selection-DAG building --------------------===//
+//===-- SelectionDAGBuilder.h - Selection-DAG building --------*- c++ -*---===//
//
// The LLVM Compiler Infrastructure
//
@@ -278,12 +278,9 @@ private:
BitTestInfo Cases;
};
-public:
- // TLI - This is information that describes the available target features we
- // need for lowering. This indicates when operations are unavailable,
- // implemented with a libcall, etc.
+private:
const TargetMachine &TM;
- const TargetLowering &TLI;
+public:
SelectionDAG &DAG;
const DataLayout *TD;
AliasAnalysis *AA;
@@ -328,7 +325,6 @@ public:
SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo,
CodeGenOpt::Level ol)
: CurInst(NULL), SDNodeOrder(0), TM(dag.getTarget()),
- TLI(dag.getTargetLoweringInfo()),
DAG(dag), FuncInfo(funcinfo), OptLevel(ol),
HasTailCall(false) {
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index b6cc7ea..d8ee221 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -92,9 +92,6 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::RETURNADDR: return "RETURNADDR";
case ISD::FRAMEADDR: return "FRAMEADDR";
case ISD::FRAME_TO_ARGS_OFFSET: return "FRAME_TO_ARGS_OFFSET";
- case ISD::EXCEPTIONADDR: return "EXCEPTIONADDR";
- case ISD::LSDAADDR: return "LSDAADDR";
- case ISD::EHSELECTION: return "EHSELECTION";
case ISD::EH_RETURN: return "EH_RETURN";
case ISD::EH_SJLJ_SETJMP: return "EH_SJLJ_SETJMP";
case ISD::EH_SJLJ_LONGJMP: return "EH_SJLJ_LONGJMP";
@@ -500,8 +497,10 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
DIScope
Scope(dl.getScope(G->getMachineFunction().getFunction()->getContext()));
OS << " dbg:";
+ assert((!Scope || Scope.isScope()) &&
+ "Scope of a DebugLoc should be null or a DIScope.");
// Omit the directory, since it's usually long and uninteresting.
- if (Scope.Verify())
+ if (Scope)
OS << Scope.getFilename();
else
OS << "<unknown>";
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 475017a..01da51c 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -19,6 +19,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
@@ -275,9 +276,9 @@ void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
// SelectionDAGISel code
//===----------------------------------------------------------------------===//
-SelectionDAGISel::SelectionDAGISel(const TargetMachine &tm,
+SelectionDAGISel::SelectionDAGISel(TargetMachine &tm,
CodeGenOpt::Level OL) :
- MachineFunctionPass(ID), TM(tm), TLI(tm.getTargetLowering()),
+ MachineFunctionPass(ID), TM(tm),
FuncInfo(new FunctionLoweringInfo(TM)),
CurDAG(new SelectionDAG(tm, OL)),
SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)),
@@ -401,7 +402,8 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// Insert DBG_VALUE instructions for function arguments to the entry block.
for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1];
- unsigned Reg = MI->getOperand(0).getReg();
+ bool hasFI = MI->getOperand(0).isFI();
+ unsigned Reg = hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
EntryMBB->insert(EntryMBB->begin(), MI);
else {
@@ -414,16 +416,19 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// If Reg is live-in then update debug info to track its copy in a vreg.
DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Reg);
if (LDI != LiveInMap.end()) {
+ assert(!hasFI && "There's no handling of frame pointer updating here yet "
+ "- add if needed");
MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
MachineBasicBlock::iterator InsertPos = Def;
const MDNode *Variable =
MI->getOperand(MI->getNumOperands()-1).getMetadata();
- unsigned Offset = MI->getOperand(1).getImm();
+ bool IsIndirect = MI->getOperand(1).isImm();
+ unsigned Offset = IsIndirect ? MI->getOperand(1).getImm() : 0;
// Def is never a terminator here, so it is ok to increment InsertPos.
BuildMI(*EntryMBB, ++InsertPos, MI->getDebugLoc(),
- TII.get(TargetOpcode::DBG_VALUE))
- .addReg(LDI->second, RegState::Debug)
- .addImm(Offset).addMetadata(Variable);
+ TII.get(TargetOpcode::DBG_VALUE),
+ IsIndirect,
+ LDI->second, Offset, Variable);
// If this vreg is directly copied into an exported register then
// that COPY instructions also need DBG_VALUE, if it is the only
@@ -442,9 +447,10 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
if (CopyUseMI) {
MachineInstr *NewMI =
BuildMI(*MF, CopyUseMI->getDebugLoc(),
- TII.get(TargetOpcode::DBG_VALUE))
- .addReg(CopyUseMI->getOperand(0).getReg(), RegState::Debug)
- .addImm(Offset).addMetadata(Variable);
+ TII.get(TargetOpcode::DBG_VALUE),
+ IsIndirect,
+ CopyUseMI->getOperand(0).getReg(),
+ Offset, Variable);
MachineBasicBlock::iterator Pos = CopyUseMI;
EntryMBB->insertAfter(Pos, NewMI);
}
@@ -825,12 +831,14 @@ void SelectionDAGISel::PrepareEHLandingPad() {
.addSym(Label);
// Mark exception register as live in.
- unsigned Reg = TLI->getExceptionPointerRegister();
- if (Reg) MBB->addLiveIn(Reg);
+ const TargetLowering *TLI = getTargetLowering();
+ const TargetRegisterClass *PtrRC = TLI->getRegClassFor(TLI->getPointerTy());
+ if (unsigned Reg = TLI->getExceptionPointerRegister())
+ FuncInfo->ExceptionPointerVirtReg = MBB->addLiveIn(Reg, PtrRC);
// Mark exception selector register as live in.
- Reg = TLI->getExceptionSelectorRegister();
- if (Reg) MBB->addLiveIn(Reg);
+ if (unsigned Reg = TLI->getExceptionSelectorRegister())
+ FuncInfo->ExceptionSelectorVirtReg = MBB->addLiveIn(Reg, PtrRC);
}
/// isFoldedOrDeadInstruction - Return true if the specified instruction is
@@ -929,7 +937,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Initialize the Fast-ISel state, if needed.
FastISel *FastIS = 0;
if (TM.Options.EnableFastISel)
- FastIS = TLI->createFastISel(*FuncInfo, LibInfo);
+ FastIS = getTargetLowering()->createFastISel(*FuncInfo, LibInfo);
// Iterate over all basic blocks in the function.
ReversePostOrderTraversal<const Function*> RPOT(&Fn);
@@ -968,6 +976,8 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
// Setup an EH landing-pad block.
+ FuncInfo->ExceptionPointerVirtReg = 0;
+ FuncInfo->ExceptionSelectorVirtReg = 0;
if (FuncInfo->MBB->isLandingPad())
PrepareEHLandingPad();
@@ -2076,7 +2086,7 @@ static unsigned IsPredicateKnownToFail(const unsigned char *Table,
Result = !::CheckOpcode(Table, Index, N.getNode());
return Index;
case SelectionDAGISel::OPC_CheckType:
- Result = !::CheckType(Table, Index, N, SDISel.TLI);
+ Result = !::CheckType(Table, Index, N, SDISel.getTargetLowering());
return Index;
case SelectionDAGISel::OPC_CheckChild0Type:
case SelectionDAGISel::OPC_CheckChild1Type:
@@ -2086,14 +2096,14 @@ static unsigned IsPredicateKnownToFail(const unsigned char *Table,
case SelectionDAGISel::OPC_CheckChild5Type:
case SelectionDAGISel::OPC_CheckChild6Type:
case SelectionDAGISel::OPC_CheckChild7Type:
- Result = !::CheckChildType(Table, Index, N, SDISel.TLI,
+ Result = !::CheckChildType(Table, Index, N, SDISel.getTargetLowering(),
Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Type);
return Index;
case SelectionDAGISel::OPC_CheckCondCode:
Result = !::CheckCondCode(Table, Index, N);
return Index;
case SelectionDAGISel::OPC_CheckValueType:
- Result = !::CheckValueType(Table, Index, N, SDISel.TLI);
+ Result = !::CheckValueType(Table, Index, N, SDISel.getTargetLowering());
return Index;
case SelectionDAGISel::OPC_CheckInteger:
Result = !::CheckInteger(Table, Index, N);
@@ -2386,7 +2396,8 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
continue;
case OPC_CheckType:
- if (!::CheckType(MatcherTable, MatcherIndex, N, TLI)) break;
+ if (!::CheckType(MatcherTable, MatcherIndex, N, getTargetLowering()))
+ break;
continue;
case OPC_SwitchOpcode: {
@@ -2433,7 +2444,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (CaseVT == MVT::iPTR)
- CaseVT = TLI->getPointerTy();
+ CaseVT = getTargetLowering()->getPointerTy();
// If the VT matches, then we will execute this case.
if (CurNodeVT == CaseVT)
@@ -2455,7 +2466,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case OPC_CheckChild2Type: case OPC_CheckChild3Type:
case OPC_CheckChild4Type: case OPC_CheckChild5Type:
case OPC_CheckChild6Type: case OPC_CheckChild7Type:
- if (!::CheckChildType(MatcherTable, MatcherIndex, N, TLI,
+ if (!::CheckChildType(MatcherTable, MatcherIndex, N, getTargetLowering(),
Opcode-OPC_CheckChild0Type))
break;
continue;
@@ -2463,7 +2474,8 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
continue;
case OPC_CheckValueType:
- if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI)) break;
+ if (!::CheckValueType(MatcherTable, MatcherIndex, N, getTargetLowering()))
+ break;
continue;
case OPC_CheckInteger:
if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
@@ -2655,7 +2667,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
for (unsigned i = 0; i != NumVTs; ++i) {
MVT::SimpleValueType VT =
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
- if (VT == MVT::iPTR) VT = TLI->getPointerTy().SimpleTy;
+ if (VT == MVT::iPTR) VT = getTargetLowering()->getPointerTy().SimpleTy;
VTs.push_back(VT);
}
@@ -2757,8 +2769,8 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
bool mayStore = MCID.mayStore();
unsigned NumMemRefs = 0;
- for (SmallVector<MachineMemOperand*, 2>::const_iterator I =
- MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
+ for (SmallVectorImpl<MachineMemOperand *>::const_iterator I =
+ MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
if ((*I)->isLoad()) {
if (mayLoad)
++NumMemRefs;
@@ -2774,8 +2786,8 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
MF->allocateMemRefsArray(NumMemRefs);
MachineSDNode::mmo_iterator MemRefsPos = MemRefs;
- for (SmallVector<MachineMemOperand*, 2>::const_iterator I =
- MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
+ for (SmallVectorImpl<MachineMemOperand *>::const_iterator I =
+ MatchedMemRefs.begin(), E = MatchedMemRefs.end(); I != E; ++I) {
if ((*I)->isLoad()) {
if (mayLoad)
*MemRefsPos++ = *I;
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index f02e3d6..e3c6306 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -510,7 +510,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// into an AND, as we know the bits will be cleared.
// e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
// NB: it is okay if more bits are known than are requested
- if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known on one side
+ if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known on one side
if (KnownOne == KnownOne2) { // set bits are the same on both sides
EVT VT = Op.getValueType();
SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT);
@@ -1992,7 +1992,7 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::pair<unsigned, const TargetRegisterClass*> TargetLowering::
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
+ MVT VT) const {
if (Constraint[0] != '{')
return std::make_pair(0u, static_cast<TargetRegisterClass*>(0));
assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");