diff options
author | Andrew Trick <atrick@apple.com> | 2013-05-25 02:42:55 +0000 |
---|---|---|
committer | Andrew Trick <atrick@apple.com> | 2013-05-25 02:42:55 +0000 |
commit | ac6d9bec671252dd1e596fa71180ff6b39d06b5d (patch) | |
tree | 414f60df511ae112e9498df4f722cb1a9ee2fbba /lib/Target | |
parent | ea5db0c315f1ab8ee3be52e0e765c32d3efff024 (diff) | |
download | external_llvm-ac6d9bec671252dd1e596fa71180ff6b39d06b5d.zip external_llvm-ac6d9bec671252dd1e596fa71180ff6b39d06b5d.tar.gz external_llvm-ac6d9bec671252dd1e596fa71180ff6b39d06b5d.tar.bz2 |
Track IR ordering of SelectionDAG nodes 2/4.
Change SelectionDAG::getXXXNode() interfaces as well as call sites of
these functions to pass in SDLoc instead of DebugLoc.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@182703 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target')
55 files changed, 815 insertions, 817 deletions
diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 102c71b..3f77979 100644 --- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -102,7 +102,7 @@ public: /// Put the given constant into a pool and return a DAG which will give its /// address. - SDValue getConstantPoolItemAddress(DebugLoc DL, const Constant *CV); + SDValue getConstantPoolItemAddress(SDLoc DL, const Constant *CV); SDNode *TrySelectToMoveImm(SDNode *N); SDNode *LowerToFPLitPool(SDNode *Node); @@ -191,7 +191,7 @@ bool AArch64DAGToDAGISel::SelectLogicalImm(SDValue N, SDValue &Imm) { SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) { SDNode *ResNode; - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); EVT DestType = Node->getValueType(0); unsigned DestWidth = DestType.getSizeInBits(); @@ -241,7 +241,7 @@ SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) { } SDValue -AArch64DAGToDAGISel::getConstantPoolItemAddress(DebugLoc DL, +AArch64DAGToDAGISel::getConstantPoolItemAddress(SDLoc DL, const Constant *CV) { EVT PtrVT = TLI.getPointerTy(); @@ -281,7 +281,7 @@ AArch64DAGToDAGISel::getConstantPoolItemAddress(DebugLoc DL, } SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) { - DebugLoc DL = Node->getDebugLoc(); + SDLoc DL(Node); uint64_t UnsignedVal = cast<ConstantSDNode>(Node)->getZExtValue(); int64_t SignedVal = cast<ConstantSDNode>(Node)->getSExtValue(); EVT DestType = Node->getValueType(0); @@ -323,7 +323,7 @@ SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) { } SDNode *AArch64DAGToDAGISel::LowerToFPLitPool(SDNode *Node) { - DebugLoc DL = Node->getDebugLoc(); + SDLoc DL(Node); const ConstantFP *FV = cast<ConstantFPSDNode>(Node)->getConstantFPValue(); EVT DestType = Node->getValueType(0); @@ -497,7 +497,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) { assert((Ty == MVT::i32 || Ty == MVT::i64) && "unexpected type"); uint16_t Register = Ty == MVT::i32 ? AArch64::WZR : AArch64::XZR; ResNode = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), - Node->getDebugLoc(), + SDLoc(Node), Register, Ty).getNode(); } diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 36f6494..db6dfcc 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -826,7 +826,7 @@ CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const { void AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, - DebugLoc DL, SDValue &Chain) const { + SDLoc DL, SDValue &Chain) const { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); AArch64MachineFunctionInfo *FuncInfo @@ -897,7 +897,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); AArch64MachineFunctionInfo *FuncInfo @@ -1012,7 +1012,7 @@ AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const { + SDLoc dl, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of the return value to a location. SmallVector<CCValAssign, 16> RVLocs; @@ -1085,7 +1085,7 @@ SDValue AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &dl = CLI.DL; + SDLoc &dl = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; SmallVector<SDValue, 32> &OutVals = CLI.OutVals; SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; @@ -1348,7 +1348,7 @@ SDValue AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // Assign locations to each value returned by this call. SmallVector<CCValAssign, 16> RVLocs; @@ -1537,7 +1537,7 @@ SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain, } // Build a tokenfactor for all the chains. - return DAG.getNode(ISD::TokenFactor, Chain.getDebugLoc(), MVT::Other, + return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, &ArgChains[0], ArgChains.size()); } @@ -1570,7 +1570,7 @@ bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const { SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &A64cc, - SelectionDAG &DAG, DebugLoc &dl) const { + SelectionDAG &DAG, SDLoc &dl) const { if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { int64_t C = 0; EVT VT = RHSC->getValueType(0); @@ -1663,7 +1663,7 @@ static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC, SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT PtrVT = getPointerTy(); const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); @@ -1693,7 +1693,7 @@ AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { // (BRCOND chain, val, dest) SDValue AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue Chain = Op.getOperand(0); SDValue TheBit = Op.getOperand(1); SDValue DestBB = Op.getOperand(2); @@ -1716,7 +1716,7 @@ AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { // (BR_CC chain, condcode, lhs, rhs, dest) SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue Chain = Op.getOperand(0); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); SDValue LHS = Op.getOperand(2); @@ -1802,7 +1802,7 @@ AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG, CallLoweringInfo CLI(InChain, RetTy, false, false, false, false, 0, getLibcallCallingConv(Call), isTailCall, /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, - Callee, Args, DAG, Op->getDebugLoc()); + Callee, Args, DAG, SDLoc(Op)); std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); if (!CallInfo.second.getNode()) @@ -1824,7 +1824,7 @@ AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { SDValue SrcVal = Op.getOperand(0); return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1, - /*isSigned*/ false, Op.getDebugLoc()); + /*isSigned*/ false, SDLoc(Op)); } SDValue @@ -1861,7 +1861,7 @@ AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op, assert(getTargetMachine().getRelocationModel() == Reloc::Static); EVT PtrVT = getPointerTy(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); const GlobalValue *GV = GN->getGlobal(); @@ -1885,7 +1885,7 @@ AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op, assert(getTargetMachine().getCodeModel() == CodeModel::Small); EVT PtrVT = getPointerTy(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); const GlobalValue *GV = GN->getGlobal(); unsigned Alignment = GV->getAlignment(); @@ -1978,7 +1978,7 @@ AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op, SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr, SDValue DescAddr, - DebugLoc DL, + SDLoc DL, SelectionDAG &DAG) const { EVT PtrVT = getPointerTy(); @@ -2033,7 +2033,7 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, SDValue TPOff; EVT PtrVT = getPointerTy(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); const GlobalValue *GV = GA->getGlobal(); SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT); @@ -2134,7 +2134,7 @@ AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); - DebugLoc dl = JT->getDebugLoc(); + SDLoc dl(JT); EVT PtrVT = getPointerTy(); // When compiling PIC, jump tables get put in the code section so a static @@ -2161,7 +2161,7 @@ AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { // (SELECT_CC lhs, rhs, iftrue, iffalse, condcode) SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); SDValue IfTrue = Op.getOperand(2); @@ -2217,7 +2217,7 @@ AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { // (SELECT testbit, iftrue, iffalse) SDValue AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue TheBit = Op.getOperand(0); SDValue IfTrue = Op.getOperand(1); SDValue IfFalse = Op.getOperand(2); @@ -2239,7 +2239,7 @@ AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { // (SETCC lhs, rhs, condcode) SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); @@ -2298,7 +2298,7 @@ AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes // rather than just 8. - return DAG.getMemcpy(Op.getOperand(0), Op.getDebugLoc(), + return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op), Op.getOperand(1), Op.getOperand(2), DAG.getConstant(32, MVT::i32), 8, false, false, MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV)); @@ -2311,7 +2311,7 @@ AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); AArch64MachineFunctionInfo *FuncInfo = MF.getInfo<AArch64MachineFunctionInfo>(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue Chain = Op.getOperand(0); SDValue VAList = Op.getOperand(1); @@ -2410,7 +2410,7 @@ static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { SelectionDAG &DAG = DCI.DAG; - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); EVT VT = N->getValueType(0); // We're looking for an SRA/SHL pair which form an SBFX. @@ -2448,7 +2448,7 @@ static SDValue PerformANDCombine(SDNode *N, /// a compatible SHL operation (unless they're already low). This function /// checks that condition and returns the least-significant bit that's /// intended. If the operation not a field preparation, -1 is returned. -static int32_t getLSBForBFI(SelectionDAG &DAG, DebugLoc DL, EVT VT, +static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT, SDValue &MaskedVal, uint64_t Mask) { if (!isShiftedMask_64(Mask)) return -1; @@ -2524,7 +2524,7 @@ static SDValue tryCombineToBFI(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget) { SelectionDAG &DAG = DCI.DAG; - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); EVT VT = N->getValueType(0); assert(N->getOpcode() == ISD::OR && "Unexpected root"); @@ -2605,7 +2605,7 @@ static SDValue tryCombineToLargerBFI(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget) { SelectionDAG &DAG = DCI.DAG; - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); EVT VT = N->getValueType(0); // First job is to hunt for a MaskedBFI on either the left or right. Swap @@ -2687,7 +2687,7 @@ static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, static SDValue tryCombineToEXTR(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { SelectionDAG &DAG = DCI.DAG; - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); EVT VT = N->getValueType(0); assert(N->getOpcode() == ISD::OR && "Unexpected root"); @@ -2759,7 +2759,7 @@ static SDValue PerformSRACombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { SelectionDAG &DAG = DCI.DAG; - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); EVT VT = N->getValueType(0); // We're looking for an SRA/SHL pair which form an SBFX. @@ -2899,7 +2899,7 @@ AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op, case 'S': { // An absolute symbolic address or label reference. if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { - Result = DAG.getTargetGlobalAddress(GA->getGlobal(), Op.getDebugLoc(), + Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), GA->getValueType(0)); } else if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) { diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h index 4436879..ef3f4f4 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.h +++ b/lib/Target/AArch64/AArch64ISelLowering.h @@ -130,14 +130,14 @@ public: SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const; + SDLoc dl, SelectionDAG &DAG) const; SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const; @@ -145,11 +145,11 @@ public: SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; void SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, - DebugLoc DL, SDValue &Chain) const; + SDLoc DL, SDValue &Chain) const; /// IsEligibleForTailCallOptimization - Check whether the call is eligible @@ -181,7 +181,7 @@ public: bool isLegalICmpImmediate(int64_t Val) const; SDValue getSelectableIntSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, - SDValue &A64cc, SelectionDAG &DAG, DebugLoc &dl) const; + SDValue &A64cc, SelectionDAG &DAG, SDLoc &dl) const; virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const; @@ -216,7 +216,7 @@ public: SDValue LowerGlobalAddressELFLarge(SDValue Op, SelectionDAG &DAG) const; SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerTLSDescCall(SDValue SymAddr, SDValue DescAddr, DebugLoc DL, + SDValue LowerTLSDescCall(SDValue SymAddr, SDValue DescAddr, SDLoc DL, SelectionDAG &DAG) const; SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, bool IsSigned) const; diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 5c846a3..94f2716 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -402,12 +402,12 @@ void ARMDAGToDAGISel::PreprocessISelDAG() { } // Now make the transformation. - Srl = CurDAG->getNode(ISD::SRL, Srl.getDebugLoc(), MVT::i32, + Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32, Srl.getOperand(0), CurDAG->getConstant(Srl_imm+TZ, MVT::i32)); - N1 = CurDAG->getNode(ISD::AND, N1.getDebugLoc(), MVT::i32, + N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32, Srl, CurDAG->getConstant(And_imm, MVT::i32)); - N1 = CurDAG->getNode(ISD::SHL, N1.getDebugLoc(), MVT::i32, + N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32, N1, CurDAG->getConstant(TZ, MVT::i32)); CurDAG->UpdateNodeOperands(N, N0, N1); } @@ -1468,14 +1468,14 @@ SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) { SDValue Base = LD->getBasePtr(); SDValue Ops[]= { Base, AMOpc, getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), Chain }; - return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, + return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32, MVT::Other, Ops); } else { SDValue Chain = LD->getChain(); SDValue Base = LD->getBasePtr(); SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), Chain }; - return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, + return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32, MVT::Other, Ops); } } @@ -1524,7 +1524,7 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) { SDValue Base = LD->getBasePtr(); SDValue Ops[]= { Base, Offset, getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), Chain }; - return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32, + return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32, MVT::Other, Ops); } @@ -1533,7 +1533,7 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) { /// \brief Form a GPRPair pseudo register from a pair of GPR regs. SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) { - DebugLoc dl = V0.getNode()->getDebugLoc(); + SDLoc dl(V0.getNode()); SDValue RegClass = CurDAG->getTargetConstant(ARM::GPRPairRegClassID, MVT::i32); SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32); @@ -1544,7 +1544,7 @@ SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) { /// \brief Form a D register from a pair of S registers. SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) { - DebugLoc dl = V0.getNode()->getDebugLoc(); + SDLoc dl(V0.getNode()); SDValue RegClass = CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32); SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32); @@ -1555,7 +1555,7 @@ SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) { /// \brief Form a quad register from a pair of D registers. SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) { - DebugLoc dl = V0.getNode()->getDebugLoc(); + SDLoc dl(V0.getNode()); SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32); SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32); SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32); @@ -1565,7 +1565,7 @@ SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) { /// \brief Form 4 consecutive D registers from a pair of Q registers. SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) { - DebugLoc dl = V0.getNode()->getDebugLoc(); + SDLoc dl(V0.getNode()); SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32); SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32); SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32); @@ -1576,7 +1576,7 @@ SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) { /// \brief Form 4 consecutive S registers. SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3) { - DebugLoc dl = V0.getNode()->getDebugLoc(); + SDLoc dl(V0.getNode()); SDValue RegClass = CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32); SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32); @@ -1591,7 +1591,7 @@ SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, /// \brief Form 4 consecutive D registers. SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3) { - DebugLoc dl = V0.getNode()->getDebugLoc(); + SDLoc dl(V0.getNode()); SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32); SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32); SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32); @@ -1605,7 +1605,7 @@ SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, /// \brief Form 4 consecutive Q registers. SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3) { - DebugLoc dl = V0.getNode()->getDebugLoc(); + SDLoc dl(V0.getNode()); SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32); SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32); SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32); @@ -1689,7 +1689,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs, const uint16_t *QOpcodes0, const uint16_t *QOpcodes1) { assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range"); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue MemAddr, Align; unsigned AddrOpIdx = isUpdating ? 1 : 2; @@ -1821,7 +1821,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs, const uint16_t *QOpcodes0, const uint16_t *QOpcodes1) { assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range"); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue MemAddr, Align; unsigned AddrOpIdx = isUpdating ? 1 : 2; @@ -1966,7 +1966,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, const uint16_t *DOpcodes, const uint16_t *QOpcodes) { assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range"); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue MemAddr, Align; unsigned AddrOpIdx = isUpdating ? 1 : 2; @@ -2084,7 +2084,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs, const uint16_t *Opcodes) { assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range"); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue MemAddr, Align; if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align)) @@ -2166,7 +2166,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc) { assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range"); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); EVT VT = N->getValueType(0); unsigned FirstTblReg = IsExt ? 2 : 1; @@ -2536,7 +2536,7 @@ SDNode *ARMDAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { Ops.push_back(Node->getOperand(0)); // Chain MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); - SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), + SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), MVT::i32, MVT::i32, MVT::Other, Ops); cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); @@ -2544,7 +2544,7 @@ SDNode *ARMDAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { } SDNode *ARMDAGToDAGISel::Select(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); if (N->isMachineOpcode()) return NULL; // Already selected. @@ -3121,7 +3121,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { case Intrinsic::arm_ldrexd: { SDValue MemAddr = N->getOperand(2); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue Chain = N->getOperand(0); bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2(); @@ -3179,7 +3179,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { } case Intrinsic::arm_strexd: { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue Chain = N->getOperand(0); SDValue Val0 = N->getOperand(2); SDValue Val1 = N->getOperand(3); @@ -3383,7 +3383,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { } case ARMISD::VTBL1: { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); EVT VT = N->getValueType(0); SmallVector<SDValue, 6> Ops; @@ -3394,7 +3394,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops); } case ARMISD::VTBL2: { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); EVT VT = N->getValueType(0); // Form a REG_SEQUENCE to force register allocation. @@ -3462,7 +3462,7 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){ if (AsmString.find(":H}") == StringRef::npos) return NULL; - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue Glue = N->getOperand(NumOps-1); // Glue node will be appended late. @@ -3567,7 +3567,7 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){ if (!Changed) return NULL; - SDValue New = CurDAG->getNode(ISD::INLINEASM, N->getDebugLoc(), + SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N), CurDAG->getVTList(MVT::Other, MVT::Glue), &AsmNodeOperands[0], AsmNodeOperands.size()); New->setNodeId(-1); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index b951715..611f234 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -1235,7 +1235,7 @@ SDValue ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, SDValue ThisVal) const { @@ -1316,7 +1316,7 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const { unsigned LocMemOffset = VA.getLocMemOffset(); @@ -1327,7 +1327,7 @@ ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, false, false, 0); } -void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, +void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG, SDValue Chain, SDValue &Arg, RegsToPassVector &RegsToPass, CCValAssign &VA, CCValAssign &NextVA, @@ -1359,7 +1359,7 @@ SDValue ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &dl = CLI.DL; + SDLoc &dl = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; SmallVector<SDValue, 32> &OutVals = CLI.OutVals; SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; @@ -2021,7 +2021,7 @@ ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const { + SDLoc dl, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of the return value to a location. SmallVector<CCValAssign, 16> RVLocs; @@ -2188,7 +2188,7 @@ bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { EVT PtrVT = Op.getValueType(); // FIXME there is no actual debug info here - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); SDValue Res; if (CP->isMachineConstantPoolEntry()) @@ -2209,7 +2209,7 @@ SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, MachineFunction &MF = DAG.getMachineFunction(); ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); unsigned ARMPCLabelIndex = 0; - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT PtrVT = getPointerTy(); const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); Reloc::Model RelocM = getTargetMachine().getRelocationModel(); @@ -2238,7 +2238,7 @@ SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, SDValue ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const { - DebugLoc dl = GA->getDebugLoc(); + SDLoc dl(GA); EVT PtrVT = getPointerTy(); unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; MachineFunction &MF = DAG.getMachineFunction(); @@ -2281,7 +2281,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, SelectionDAG &DAG, TLSModel::Model model) const { const GlobalValue *GV = GA->getGlobal(); - DebugLoc dl = GA->getDebugLoc(); + SDLoc dl(GA); SDValue Offset; SDValue Chain = DAG.getEntryNode(); EVT PtrVT = getPointerTy(); @@ -2351,7 +2351,7 @@ ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const { EVT PtrVT = getPointerTy(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); @@ -2394,7 +2394,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const { EVT PtrVT = getPointerTy(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); Reloc::Model RelocM = getTargetMachine().getRelocationModel(); @@ -2459,7 +2459,7 @@ SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); EVT PtrVT = getPointerTy(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; ARMConstantPoolValue *CPV = ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_", @@ -2475,7 +2475,7 @@ SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SDValue ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue Val = DAG.getConstant(0, MVT::i32); return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), @@ -2484,7 +2484,7 @@ ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { SDValue ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), Op.getOperand(1), DAG.getConstant(0, MVT::i32)); } @@ -2493,7 +2493,7 @@ SDValue ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const { unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. case Intrinsic::arm_thread_pointer: { @@ -2529,7 +2529,7 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, case Intrinsic::arm_neon_vmullu: { unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) ? ARMISD::VMULLs : ARMISD::VMULLu; - return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), + return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); } } @@ -2538,7 +2538,7 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) { // FIXME: handle "fence singlethread" more efficiently. - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (!Subtarget->hasDataBarrier()) { // Some ARMv6 cpus can support data barriers with an mcr instruction. // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get @@ -2561,7 +2561,7 @@ static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, // Just preserve the chain. return Op.getOperand(0); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; if (!isRead && (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) @@ -2586,7 +2586,7 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); @@ -2597,7 +2597,7 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, SDValue &Root, SelectionDAG &DAG, - DebugLoc dl) const { + SDLoc dl) const { MachineFunction &MF = DAG.getMachineFunction(); ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); @@ -2686,7 +2686,7 @@ ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, // Return: The frame index registers were stored into. int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, - DebugLoc dl, SDValue &Chain, + SDLoc dl, SDValue &Chain, const Value *OrigArg, unsigned InRegsParamRecordIdx, unsigned OffsetFromOrigArg, @@ -2779,7 +2779,7 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, // Setup stack frame, the va_list pointer will start from. void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, - DebugLoc dl, SDValue &Chain, + SDLoc dl, SDValue &Chain, unsigned ArgOffset, bool ForceMutable) const { MachineFunction &MF = DAG.getMachineFunction(); @@ -2802,7 +2802,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); @@ -2982,7 +2982,7 @@ static bool isFloatingPointZero(SDValue Op) { SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &ARMcc, SelectionDAG &DAG, - DebugLoc dl) const { + SDLoc dl) const { if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { unsigned C = RHSC->getZExtValue(); if (!isLegalICmpImmediate(C)) { @@ -3040,7 +3040,7 @@ ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, - DebugLoc dl) const { + SDLoc dl) const { SDValue Cmp; if (!isFloatingPointZero(RHS)) Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); @@ -3054,7 +3054,7 @@ ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, SDValue ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { unsigned Opc = Cmp.getOpcode(); - DebugLoc DL = Cmp.getDebugLoc(); + SDLoc DL(Cmp); if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); @@ -3074,7 +3074,7 @@ SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { SDValue Cond = Op.getOperand(0); SDValue SelectTrue = Op.getOperand(1); SDValue SelectFalse = Op.getOperand(2); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Convert: // @@ -3129,7 +3129,7 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); SDValue TrueVal = Op.getOperand(2); SDValue FalseVal = Op.getOperand(3); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (LHS.getValueType() == MVT::i32) { SDValue ARMcc; @@ -3184,7 +3184,7 @@ static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { return DAG.getConstant(0, MVT::i32); if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) - return DAG.getLoad(MVT::i32, Op.getDebugLoc(), + return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(), Ld->getAlignment()); @@ -3202,7 +3202,7 @@ static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { SDValue Ptr = Ld->getBasePtr(); - RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), + RetVal1 = DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ptr, Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(), @@ -3210,9 +3210,9 @@ static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, EVT PtrType = Ptr.getValueType(); unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); - SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), + SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(Op), PtrType, Ptr, DAG.getConstant(4, PtrType)); - RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), + RetVal2 = DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), NewPtr, Ld->getPointerInfo().getWithOffset(4), Ld->isVolatile(), Ld->isNonTemporal(), @@ -3232,7 +3232,7 @@ ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { SDValue LHS = Op.getOperand(2); SDValue RHS = Op.getOperand(3); SDValue Dest = Op.getOperand(4); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); bool LHSSeenZero = false; bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); @@ -3282,7 +3282,7 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { SDValue LHS = Op.getOperand(2); SDValue RHS = Op.getOperand(3); SDValue Dest = Op.getOperand(4); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (LHS.getValueType() == MVT::i32) { SDValue ARMcc; @@ -3323,7 +3323,7 @@ SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); SDValue Table = Op.getOperand(1); SDValue Index = Op.getOperand(2); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT PTy = getPointerTy(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); @@ -3359,7 +3359,7 @@ SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (Op.getValueType().getVectorElementType() == MVT::i32) { if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) @@ -3381,7 +3381,7 @@ static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { if (VT.isVector()) return LowerVectorFP_TO_INT(Op, DAG); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned Opc; switch (Op.getOpcode()) { @@ -3399,7 +3399,7 @@ static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { if (VT.getVectorElementType() == MVT::f32) @@ -3435,7 +3435,7 @@ static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { if (VT.isVector()) return LowerVectorINT_TO_FP(Op, DAG); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned Opc; switch (Op.getOpcode()) { @@ -3456,7 +3456,7 @@ SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { // Implement fcopysign with a fabs and a conditional fneg. SDValue Tmp0 = Op.getOperand(0); SDValue Tmp1 = Op.getOperand(1); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT VT = Op.getValueType(); EVT SrcVT = Tmp1.getValueType(); bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || @@ -3540,7 +3540,7 @@ SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ MFI->setReturnAddressIsTaken(true); EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); if (Depth) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); @@ -3560,7 +3560,7 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { MFI->setFrameAddressIsTaken(true); EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful + SDLoc dl(Op); // FIXME probably not meaningful unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) ? ARM::R7 : ARM::R11; @@ -3589,7 +3589,7 @@ static SDValue ExpandVectorExtension(SDNode *N, SelectionDAG &DAG) { SrcVT.getSizeInBits()*8 != DestVT.getSizeInBits()) return SDValue(); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); unsigned SrcEltSize = SrcVT.getVectorElementType().getSizeInBits(); unsigned DestEltSize = DestVT.getVectorElementType().getSizeInBits(); unsigned NumElts = SrcVT.getVectorNumElements(); @@ -3620,7 +3620,7 @@ static SDValue ExpandVectorExtension(SDNode *N, SelectionDAG &DAG) { /// vectors), since the legalizer won't know what to do with that. static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue Op = N->getOperand(0); // This function is only supposed to be called for i64 types, either as the @@ -3657,7 +3657,7 @@ static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { /// not support i64 elements, so sometimes the zero vectors will need to be /// explicitly constructed. Regardless, use a canonical VMOV to create the /// zero vector. -static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { +static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) { assert(VT.isVector() && "Expected a vector type"); // The canonical modified immediate encoding of a zero vector is....0! SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); @@ -3673,7 +3673,7 @@ SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, assert(Op.getNumOperands() == 3 && "Not a double-shift!"); EVT VT = Op.getValueType(); unsigned VTBits = VT.getSizeInBits(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue ShOpLo = Op.getOperand(0); SDValue ShOpHi = Op.getOperand(1); SDValue ShAmt = Op.getOperand(2); @@ -3709,7 +3709,7 @@ SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, assert(Op.getNumOperands() == 3 && "Not a double-shift!"); EVT VT = Op.getValueType(); unsigned VTBits = VT.getSizeInBits(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue ShOpLo = Op.getOperand(0); SDValue ShOpHi = Op.getOperand(1); SDValue ShAmt = Op.getOperand(2); @@ -3742,7 +3742,7 @@ SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) // so that the shift + and get folded into a bitfield extract. - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, DAG.getConstant(Intrinsic::arm_get_fpscr, MVT::i32)); @@ -3757,7 +3757,7 @@ SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST) { EVT VT = N->getValueType(0); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); if (!ST->hasV6T2Ops()) return SDValue(); @@ -3781,7 +3781,7 @@ static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, /// vuzp: = [k0 k1 k2 k3 k0 k1 k2 k3] each ki is 8-bits) static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) { EVT VT = N->getValueType(0); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0)); @@ -3803,7 +3803,7 @@ static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) { /// v4i16:Extracted = [k0 k1 k2 k3 ] static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) { EVT VT = N->getValueType(0); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SDValue BitCounts = getCTPOP16BitCounts(N, DAG); if (VT.is64BitVector()) { @@ -3838,7 +3838,7 @@ static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) { /// static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) { EVT VT = N->getValueType(0); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16; @@ -3877,7 +3877,7 @@ static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST) { EVT VT = N->getValueType(0); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); if (!VT.isVector()) return SDValue(); @@ -3912,7 +3912,7 @@ static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST) { EVT VT = N->getValueType(0); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); // We can get here for a node like i32 = ISD::SHL i32, i64 if (VT != MVT::i64) @@ -3958,7 +3958,7 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { SDValue CC = Op.getOperand(2); EVT VT = Op.getValueType(); ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (Op.getOperand(1).getValueType().isFloatingPoint()) { switch (SetCCOpcode) { @@ -4227,7 +4227,7 @@ SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, APFloat FPVal = CFP->getValueAPF(); int ImmVal = ARM_AM::getFP32Imm(FPVal); if (ImmVal != -1) { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue NewVal = DAG.getTargetConstant(ImmVal, MVT::i32); SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, NewVal); @@ -4241,7 +4241,7 @@ SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, SDValue NewVal = isNEONModifiedImm(iVal, 0, 32, DAG, VMovVT, false, VMOVModImm); if (NewVal != SDValue()) { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, NewVal); SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, @@ -4254,7 +4254,7 @@ SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, NewVal = isNEONModifiedImm(~iVal & 0xffffffff, 0, 32, DAG, VMovVT, false, VMVNModImm); if (NewVal != SDValue()) { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, VecConstant); @@ -4514,7 +4514,7 @@ static bool isReverseMask(ArrayRef<int> M, EVT VT) { // instruction, return an SDValue of such a constant (will become a MOV // instruction). Otherwise return null. static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, - const ARMSubtarget *ST, DebugLoc dl) { + const ARMSubtarget *ST, SDLoc dl) { uint64_t Val; if (!isa<ConstantSDNode>(N)) return SDValue(); @@ -4535,7 +4535,7 @@ static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST) const { BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT VT = Op.getValueType(); APInt SplatBits, SplatUndef; @@ -4725,7 +4725,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, // shuffle in combination with VEXTs. SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT VT = Op.getValueType(); unsigned NumElts = VT.getVectorNumElements(); @@ -4914,7 +4914,7 @@ ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, /// the specified operations to build the shuffle. static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, - DebugLoc dl) { + SDLoc dl) { unsigned OpNum = (PFEntry >> 26) & 0x0F; unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); @@ -4994,7 +4994,7 @@ static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, // Check to see if we can use the VTBL instruction. SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SmallVector<SDValue, 8> VTBLMask; for (ArrayRef<int>::iterator @@ -5013,7 +5013,7 @@ static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, SelectionDAG &DAG) { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue OpLHS = Op.getOperand(0); EVT VT = OpLHS.getValueType(); @@ -5031,7 +5031,7 @@ static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT VT = Op.getValueType(); ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); @@ -5195,7 +5195,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { SDValue Vec = Op.getOperand(0); if (Op.getValueType() == MVT::i32 && Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); } @@ -5207,7 +5207,7 @@ static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { // two 64-bit vectors are concatenated to a 128-bit vector. assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && "unexpected CONCAT_VECTORS"); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue Val = DAG.getUNDEF(MVT::v2f64); SDValue Op0 = Op.getOperand(0); SDValue Op1 = Op.getOperand(1); @@ -5330,7 +5330,7 @@ static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, // Must extend size to at least 64 bits to be used as an operand for VMULL. EVT NewVT = getExtensionTo64Bits(OrigTy); - return DAG.getNode(ExtOpcode, N->getDebugLoc(), NewVT, N); + return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); } /// SkipLoadExtensionForVMULL - return a load of the original vector size that @@ -5343,7 +5343,7 @@ static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { // The load already has the right type. if (ExtendedTy == LD->getMemoryVT()) - return DAG.getLoad(LD->getMemoryVT(), LD->getDebugLoc(), LD->getChain(), + return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(), LD->isInvariant(), LD->getAlignment()); @@ -5351,7 +5351,7 @@ static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { // We need to create a zextload/sextload. We cannot just create a load // followed by a zext/zext node because LowerMUL is also run during normal // operation legalization where we can't create illegal types. - return DAG.getExtLoad(LD->getExtensionType(), LD->getDebugLoc(), ExtendedTy, + return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(), LD->getAlignment()); @@ -5380,7 +5380,7 @@ static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { assert(BVN->getOpcode() == ISD::BUILD_VECTOR && BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; - return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, + return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), MVT::v2i32, BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); } // Construct a new BUILD_VECTOR with elements truncated to half the size. @@ -5397,7 +5397,7 @@ static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { // The values are implicitly truncated so sext vs. zext doesn't matter. Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), MVT::i32)); } - return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), + return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); } @@ -5469,7 +5469,7 @@ static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { } // Legalize to a VMULL instruction. - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue Op0; SDValue Op1 = SkipExtensionForVMULL(N1, DAG); if (!isMLA) { @@ -5499,7 +5499,7 @@ static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { } static SDValue -LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) { +LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG) { // Convert to float // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); @@ -5528,7 +5528,7 @@ LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) { } static SDValue -LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) { +LowerSDIV_v4i16(SDValue N0, SDValue N1, SDLoc dl, SelectionDAG &DAG) { SDValue N2; // Convert to float. // float4 yf = vcvt_f32_s32(vmovl_s16(y)); @@ -5569,7 +5569,7 @@ static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { assert((VT == MVT::v4i16 || VT == MVT::v8i8) && "unexpected type for custom-lowering ISD::SDIV"); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue N0 = Op.getOperand(0); SDValue N1 = Op.getOperand(1); SDValue N2, N3; @@ -5604,7 +5604,7 @@ static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { assert((VT == MVT::v4i16 || VT == MVT::v8i8) && "unexpected type for custom-lowering ISD::UDIV"); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue N0 = Op.getOperand(0); SDValue N1 = Op.getOperand(1); SDValue N2, N3; @@ -5688,9 +5688,9 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { } if (!ExtraOp) - return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), + return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1)); - return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), + return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1), Op.getOperand(2)); } @@ -5707,7 +5707,7 @@ static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { static void ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results, SelectionDAG &DAG, unsigned NewOp) { - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); assert (Node->getValueType(0) == MVT::i64 && "Only know how to expand i64 atomics"); @@ -5741,7 +5741,7 @@ static void ReplaceREADCYCLECOUNTER(SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG, const ARMSubtarget *Subtarget) { - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SDValue Cycles32, OutChain; if (Subtarget->hasPerfMon()) { @@ -7789,13 +7789,13 @@ SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, // Slct is now know to be the desired identity constant when CC is true. SDValue TrueVal = OtherOp; - SDValue FalseVal = DAG.getNode(N->getOpcode(), N->getDebugLoc(), VT, + SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal); // Unless SwapSelectOps says CC should be false. if (SwapSelectOps) std::swap(TrueVal, FalseVal); - return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, + return DAG.getNode(ISD::SELECT, SDLoc(N), VT, CCOp, TrueVal, FalseVal); } @@ -7902,9 +7902,9 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, llvm_unreachable("Invalid vector element type for padd optimization."); } - SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), + SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), widenType, &Ops[0], Ops.size()); - return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp); + return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, tmp); } static SDValue findMUL_LOHI(SDValue V) { @@ -8029,7 +8029,7 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, Ops.push_back(*LowAdd); Ops.push_back(*HiAdd); - SDValue MLALNode = DAG.getNode(FinalOpc, AddcNode->getDebugLoc(), + SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcNode), DAG.getVTList(MVT::i32, MVT::i32), &Ops[0], Ops.size()); @@ -8137,7 +8137,7 @@ static SDValue PerformVMULCombine(SDNode *N, } EVT VT = N->getValueType(0); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SDValue N00 = N0->getOperand(0); SDValue N01 = N0->getOperand(1); return DAG.getNode(Opcode, DL, VT, @@ -8171,7 +8171,7 @@ static SDValue PerformMULCombine(SDNode *N, ShiftAmt = ShiftAmt & (32 - 1); SDValue V = N->getOperand(0); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SDValue Res; MulAmt >>= ShiftAmt; @@ -8235,7 +8235,7 @@ static SDValue PerformANDCombine(SDNode *N, // Attempt to use immediate-form VBIC BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); EVT VT = N->getValueType(0); SelectionDAG &DAG = DCI.DAG; @@ -8278,7 +8278,7 @@ static SDValue PerformORCombine(SDNode *N, const ARMSubtarget *Subtarget) { // Attempt to use immediate-form VORR BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); EVT VT = N->getValueType(0); SelectionDAG &DAG = DCI.DAG; @@ -8353,7 +8353,7 @@ static SDValue PerformORCombine(SDNode *N, if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) return SDValue(); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // 1) or (and A, mask), val => ARMbfi A, val, mask // iff (val & mask) == val // @@ -8497,7 +8497,7 @@ static SDValue PerformBFICombine(SDNode *N, unsigned Mask = (1 << Width)-1; unsigned Mask2 = N11C->getZExtValue(); if ((Mask & (~Mask2)) == 0) - return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), + return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0), N->getOperand(0), N1.getOperand(0), N->getOperand(2)); } @@ -8523,7 +8523,7 @@ static SDValue PerformVMOVRRDCombine(SDNode *N, LoadSDNode *LD = cast<LoadSDNode>(InNode); SelectionDAG &DAG = DCI.DAG; - DebugLoc DL = LD->getDebugLoc(); + SDLoc DL(LD); SDValue BasePtr = LD->getBasePtr(); SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(), LD->isVolatile(), @@ -8560,7 +8560,7 @@ static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { if (Op0.getOpcode() == ARMISD::VMOVRRD && Op0.getNode() == Op1.getNode() && Op0.getResNo() == 0 && Op1.getResNo() == 1) - return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), + return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Op0.getOperand(0)); return SDValue(); } @@ -8602,7 +8602,7 @@ static SDValue PerformSTORECombine(SDNode *N, NumElems*SizeRatio); assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); - DebugLoc DL = St->getDebugLoc(); + SDLoc DL(St); SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); for (unsigned i = 0; i < NumElems; ++i) ShuffleVec[i] = i * SizeRatio; @@ -8663,7 +8663,7 @@ static SDValue PerformSTORECombine(SDNode *N, if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && StVal.getNode()->hasOneUse()) { SelectionDAG &DAG = DCI.DAG; - DebugLoc DL = St->getDebugLoc(); + SDLoc DL(St); SDValue BasePtr = St->getBasePtr(); SDValue NewST1 = DAG.getStore(St->getChain(), DL, StVal.getNode()->getOperand(0), BasePtr, @@ -8685,14 +8685,14 @@ static SDValue PerformSTORECombine(SDNode *N, // Bitcast an i64 store extracted from a vector to f64. // Otherwise, the i64 value will be legalized to a pair of i32 values. SelectionDAG &DAG = DCI.DAG; - DebugLoc dl = StVal.getDebugLoc(); + SDLoc dl(StVal); SDValue IntVec = StVal.getOperand(0); EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, IntVec.getValueType().getVectorNumElements()); SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Vec, StVal.getOperand(1)); - dl = N->getDebugLoc(); + dl = SDLoc(N); SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); // Make the DAGCombiner fold the bitcasts. DCI.AddToWorklist(Vec.getNode()); @@ -8738,7 +8738,7 @@ static SDValue PerformBUILD_VECTORCombine(SDNode *N, EVT VT = N->getValueType(0); if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) return SDValue(); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SmallVector<SDValue, 8> Ops; unsigned NumElts = VT.getVectorNumElements(); for (unsigned i = 0; i < NumElts; ++i) { @@ -8765,7 +8765,7 @@ static SDValue PerformInsertEltCombine(SDNode *N, return SDValue(); SelectionDAG &DAG = DCI.DAG; - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VT.getVectorNumElements()); SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); @@ -8811,7 +8811,7 @@ static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { !TLI.isTypeLegal(Concat1Op1.getValueType())) return SDValue(); - SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, + SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Op0.getOperand(0), Op1.getOperand(0)); // Translate the shuffle mask. SmallVector<int, 16> NewMask; @@ -8827,7 +8827,7 @@ static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { NewElt = HalfElts + MaskElt - NumElts; NewMask.push_back(NewElt); } - return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, + return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat, DAG.getUNDEF(VT), NewMask.data()); } @@ -8944,7 +8944,7 @@ static SDValue CombineBaseUpdate(SDNode *N, Ops.push_back(N->getOperand(i)); } MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); - SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys, + SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, Ops.data(), Ops.size(), MemInt->getMemoryVT(), MemInt->getMemOperand()); @@ -9018,7 +9018,7 @@ static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); - SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, + SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, Ops, 2, VLDMemInt->getMemoryVT(), VLDMemInt->getMemOperand()); @@ -9073,7 +9073,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N, if (EltSize > VT.getVectorElementType().getSizeInBits()) return SDValue(); - return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); + return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); } // isConstVecPow2 - Return true if each vector element is a power of 2, all @@ -9132,7 +9132,7 @@ static SDValue PerformVCVTCombine(SDNode *N, unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : Intrinsic::arm_neon_vcvtfp2fxu; - return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), N->getValueType(0), DAG.getConstant(IntrinsicOpcode, MVT::i32), N0, DAG.getConstant(Log2_64(C), MVT::i32)); @@ -9168,7 +9168,7 @@ static SDValue PerformVDIVCombine(SDNode *N, unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : Intrinsic::arm_neon_vcvtfxu2fp; - return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), Op.getValueType(), DAG.getConstant(IntrinsicOpcode, MVT::i32), Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32)); @@ -9352,7 +9352,7 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { VShiftOpc = ARMISD::VQRSHRNsu; break; } - return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), + return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0), N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); } @@ -9369,7 +9369,7 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); } - return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), + return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2), DAG.getConstant(Cnt, MVT::i32)); } @@ -9400,7 +9400,7 @@ static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && DAG.MaskedValueIsZero(N0.getOperand(0), APInt::getHighBitsSet(32, 16))) - return DAG.getNode(ISD::ROTR, N->getDebugLoc(), VT, N0, N1); + return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1); } } @@ -9417,7 +9417,7 @@ static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, case ISD::SHL: if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) - return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), + return DAG.getNode(ARMISD::VSHL, SDLoc(N), VT, N->getOperand(0), DAG.getConstant(Cnt, MVT::i32)); break; @@ -9426,7 +9426,7 @@ static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? ARMISD::VSHRs : ARMISD::VSHRu); - return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), + return DAG.getNode(VShiftOpc, SDLoc(N), VT, N->getOperand(0), DAG.getConstant(Cnt, MVT::i32)); } } @@ -9466,7 +9466,7 @@ static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, Opc = ARMISD::VGETLANEu; break; } - return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); + return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane); } } @@ -9555,7 +9555,7 @@ static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, if (!Opcode) return SDValue(); - return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); + return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), LHS, RHS); } /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. @@ -9567,7 +9567,7 @@ ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { return SDValue(); EVT VT = N->getValueType(0); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue LHS = Cmp.getOperand(0); SDValue RHS = Cmp.getOperand(1); SDValue FalseVal = N->getOperand(0); diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index 8aa926f..2b65019 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -412,7 +412,7 @@ namespace llvm { void addQRTypeForNEON(MVT VT); typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; - void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, + void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG, SDValue Chain, SDValue &Arg, RegsToPassVector &RegsToPass, CCValAssign &VA, CCValAssign &NextVA, @@ -421,12 +421,12 @@ namespace llvm { ISD::ArgFlagsTy Flags) const; SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, SDValue &Root, SelectionDAG &DAG, - DebugLoc dl) const; + SDLoc dl) const; CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, bool isVarArg) const; SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const; SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; @@ -463,7 +463,7 @@ namespace llvm { SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, SDValue ThisVal) const; @@ -471,11 +471,11 @@ namespace llvm { LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, - DebugLoc dl, SDValue &Chain, + SDLoc dl, SDValue &Chain, const Value *OrigArg, unsigned InRegsParamRecordIdx, unsigned OffsetFromOrigArg, @@ -484,7 +484,7 @@ namespace llvm { bool ForceMutable) const; void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, - DebugLoc dl, SDValue &Chain, + SDLoc dl, SDValue &Chain, unsigned ArgOffset, bool ForceMutable = false) const; @@ -524,16 +524,16 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const; + SDLoc dl, SelectionDAG &DAG) const; virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const; virtual bool mayBeEmittedAsTailCall(CallInst *CI) const; SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, - SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const; + SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const; SDValue getVFPCmp(SDValue LHS, SDValue RHS, - SelectionDAG &DAG, DebugLoc dl) const; + SelectionDAG &DAG, SDLoc dl) const; SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/lib/Target/ARM/ARMSelectionDAGInfo.cpp index 41a7e0c..93add6e 100644 --- a/lib/Target/ARM/ARMSelectionDAGInfo.cpp +++ b/lib/Target/ARM/ARMSelectionDAGInfo.cpp @@ -26,7 +26,7 @@ ARMSelectionDAGInfo::~ARMSelectionDAGInfo() { } SDValue -ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, +ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, @@ -140,7 +140,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, // GNU library uses (ptr, value, size) // See RTABI section 4.3.4 SDValue ARMSelectionDAGInfo:: -EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, +EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVolatile, diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.h b/lib/Target/ARM/ARMSelectionDAGInfo.h index 6419a73..56c9375 100644 --- a/lib/Target/ARM/ARMSelectionDAGInfo.h +++ b/lib/Target/ARM/ARMSelectionDAGInfo.h @@ -45,7 +45,7 @@ public: ~ARMSelectionDAGInfo(); virtual - SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, + SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, @@ -55,7 +55,7 @@ public: // Adjust parameters for memset, see RTABI section 4.3.4 virtual - SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, + SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, unsigned Align, diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 54ca2c9..5bb2a3e 100644 --- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -92,14 +92,14 @@ public: bool SelectAddr(SDNode *Op, SDValue Addr, SDValue &Base, SDValue &Offset); SDNode *SelectLoad(SDNode *N); - SDNode *SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl); - SDNode *SelectIndexedLoad(LoadSDNode *LD, DebugLoc dl); + SDNode *SelectBaseOffsetLoad(LoadSDNode *LD, SDLoc dl); + SDNode *SelectIndexedLoad(LoadSDNode *LD, SDLoc dl); SDNode *SelectIndexedLoadZeroExtend64(LoadSDNode *LD, unsigned Opcode, - DebugLoc dl); + SDLoc dl); SDNode *SelectIndexedLoadSignExtend64(LoadSDNode *LD, unsigned Opcode, - DebugLoc dl); - SDNode *SelectBaseOffsetStore(StoreSDNode *ST, DebugLoc dl); - SDNode *SelectIndexedStore(StoreSDNode *ST, DebugLoc dl); + SDLoc dl); + SDNode *SelectBaseOffsetStore(StoreSDNode *ST, SDLoc dl); + SDNode *SelectIndexedStore(StoreSDNode *ST, SDLoc dl); SDNode *SelectStore(SDNode *N); SDNode *SelectSHL(SDNode *N); SDNode *SelectSelect(SDNode *N); @@ -385,7 +385,7 @@ static bool OffsetFitsS11(EVT MemType, int64_t Offset) { // lowering for GlobalAddress nodes has already turned it into a // CONST32. // -SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl) { +SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, SDLoc dl) { SDValue Chain = LD->getChain(); SDNode* Const32 = LD->getBasePtr().getNode(); unsigned Opcode = 0; @@ -433,7 +433,7 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl) { SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD, unsigned Opcode, - DebugLoc dl) + SDLoc dl) { SDValue Chain = LD->getChain(); EVT LoadedVT = LD->getMemoryVT(); @@ -497,7 +497,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD, SDNode *HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD, unsigned Opcode, - DebugLoc dl) + SDLoc dl) { SDValue Chain = LD->getChain(); EVT LoadedVT = LD->getMemoryVT(); @@ -572,7 +572,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD, } -SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, DebugLoc dl) { +SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, SDLoc dl) { SDValue Chain = LD->getChain(); SDValue Base = LD->getBasePtr(); SDValue Offset = LD->getOffset(); @@ -667,7 +667,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, DebugLoc dl) { SDNode *HexagonDAGToDAGISel::SelectLoad(SDNode *N) { SDNode *result; - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); LoadSDNode *LD = cast<LoadSDNode>(N); ISD::MemIndexedMode AM = LD->getAddressingMode(); @@ -682,7 +682,7 @@ SDNode *HexagonDAGToDAGISel::SelectLoad(SDNode *N) { } -SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) { +SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, SDLoc dl) { SDValue Chain = ST->getChain(); SDValue Base = ST->getBasePtr(); SDValue Offset = ST->getOffset(); @@ -751,7 +751,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) { SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST, - DebugLoc dl) { + SDLoc dl) { SDValue Chain = ST->getChain(); SDNode* Const32 = ST->getBasePtr().getNode(); SDValue Value = ST->getValue(); @@ -805,7 +805,7 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST, SDNode *HexagonDAGToDAGISel::SelectStore(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); StoreSDNode *ST = cast<StoreSDNode>(N); ISD::MemIndexedMode AM = ST->getAddressingMode(); @@ -818,7 +818,7 @@ SDNode *HexagonDAGToDAGISel::SelectStore(SDNode *N) { } SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); // // %conv.i = sext i32 %tmp1 to i64 @@ -902,7 +902,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) { SDNode *HexagonDAGToDAGISel::SelectSelect(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue N0 = N->getOperand(0); if (N0.getOpcode() == ISD::SETCC) { SDValue N00 = N0.getOperand(0); @@ -969,7 +969,7 @@ SDNode *HexagonDAGToDAGISel::SelectSelect(SDNode *N) { SDNode *HexagonDAGToDAGISel::SelectTruncate(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue Shift = N->getOperand(0); // @@ -1082,7 +1082,7 @@ SDNode *HexagonDAGToDAGISel::SelectTruncate(SDNode *N) { SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); if (N->getValueType(0) == MVT::i32) { SDValue Shl_0 = N->getOperand(0); SDValue Shl_1 = N->getOperand(1); @@ -1158,7 +1158,7 @@ SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) { // We want to preserve all the lower 8-bits and, not just 1 LSB bit. // SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDNode *IsIntrinsic = N->getOperand(0).getNode(); if ((IsIntrinsic->getOpcode() == ISD::INTRINSIC_WO_CHAIN)) { unsigned ID = @@ -1201,7 +1201,7 @@ SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) { // and lowering to the actual intrinsic. // SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); unsigned ID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); unsigned IntrinsicWithPred = doesIntrinsicContainPredicate(ID); @@ -1251,7 +1251,7 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) { // Map floating point constant values. // SDNode *HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N); APFloat APF = CN->getValueAPF(); if (N->getValueType(0) == MVT::f32) { @@ -1271,7 +1271,7 @@ SDNode *HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) { // Map predicate true (encoded as -1 in LLVM) to a XOR. // SDNode *HexagonDAGToDAGISel::SelectConstant(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); if (N->getValueType(0) == MVT::i1) { SDNode* Result; int32_t Val = cast<ConstantSDNode>(N)->getSExtValue(); @@ -1310,7 +1310,7 @@ SDNode *HexagonDAGToDAGISel::SelectConstant(SDNode *N) { // Map add followed by a asr -> asr +=. // SDNode *HexagonDAGToDAGISel::SelectAdd(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); if (N->getValueType(0) != MVT::i32) { return SelectCode(N); } @@ -1660,7 +1660,7 @@ bool HexagonDAGToDAGISel::foldGlobalAddressImpl(SDValue &N, SDValue &R, !hasNumUsesBelowThresGA(GA)) return false; R = CurDAG->getTargetGlobalAddress(GA->getGlobal(), - Const->getDebugLoc(), + SDLoc(Const), N.getValueType(), GA->getOffset() + (uint64_t)Const->getSExtValue()); diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index 14175b3..e1d5592 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -285,7 +285,7 @@ const { static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, - DebugLoc dl) { + SDLoc dl) { SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), @@ -302,7 +302,7 @@ HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const { + SDLoc dl, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of the return value to locations. SmallVector<CCValAssign, 16> RVLocs; @@ -351,7 +351,7 @@ HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const { @@ -382,7 +382,7 @@ SDValue HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &dl = CLI.DL; + SDLoc &dl = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; SmallVector<SDValue, 32> &OutVals = CLI.OutVals; SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; @@ -730,7 +730,7 @@ LowerBR_JT(SDValue Op, SelectionDAG &DAG) const SDValue Chain = Op.getOperand(0); SDValue Table = Op.getOperand(1); SDValue Index = Op.getOperand(2); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); unsigned JTI = JT->getIndex(); MachineFunction &MF = DAG.getMachineFunction(); @@ -766,7 +766,7 @@ HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); SDValue Size = Op.getOperand(1); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned SPReg = getStackPointerRegisterToSaveRestore(); @@ -812,7 +812,7 @@ HexagonTargetLowering::LowerFormalArguments(SDValue Chain, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { @@ -925,7 +925,7 @@ HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>(); SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); - return DAG.getStore(Op.getOperand(0), Op.getDebugLoc(), Addr, + return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr, Op.getOperand(1), MachinePointerInfo(SV), false, false, 0); } @@ -937,7 +937,7 @@ HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { SDValue CC = Op.getOperand(4); SDValue TrueVal = Op.getOperand(2); SDValue FalseVal = Op.getOperand(3); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDNode* OpNode = Op.getNode(); EVT SVT = OpNode->getValueType(0); @@ -948,7 +948,7 @@ HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { SDValue HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { EVT ValTy = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); SDValue Res; if (CP->isMachineConstantPoolEntry()) @@ -968,7 +968,7 @@ HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { MFI->setReturnAddressIsTaken(true); EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); if (Depth) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); @@ -990,7 +990,7 @@ HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { MFI->setFrameAddressIsTaken(true); EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, TRI->getFrameRegister(), VT); @@ -1003,7 +1003,7 @@ HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { SDValue HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0)); } @@ -1013,7 +1013,7 @@ SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, SDValue Result; const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); const HexagonTargetObjectFile &TLOF = @@ -1029,7 +1029,7 @@ SDValue HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); SDValue BA_SD = DAG.getTargetBlockAddress(BA, MVT::i32); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), BA_SD); } @@ -1513,7 +1513,7 @@ HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); SDValue Offset = Op.getOperand(1); SDValue Handler = Op.getOperand(2); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Mark function as containing a call to EH_RETURN. HexagonMachineFunctionInfo *FuncInfo = diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h index 3045905..70642e6 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.h +++ b/lib/Target/Hexagon/HexagonISelLowering.h @@ -106,7 +106,7 @@ namespace llvm { SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; @@ -117,7 +117,7 @@ namespace llvm { SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const; @@ -131,7 +131,7 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const; + SDLoc dl, SelectionDAG &DAG) const; virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, diff --git a/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp b/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp index a52c604..c37bf9f 100644 --- a/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp +++ b/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp @@ -27,7 +27,7 @@ HexagonSelectionDAGInfo::~HexagonSelectionDAGInfo() { SDValue HexagonSelectionDAGInfo:: -EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, SDValue Chain, +EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo, diff --git a/lib/Target/Hexagon/HexagonSelectionDAGInfo.h b/lib/Target/Hexagon/HexagonSelectionDAGInfo.h index 0673e4d..31f278a 100644 --- a/lib/Target/Hexagon/HexagonSelectionDAGInfo.h +++ b/lib/Target/Hexagon/HexagonSelectionDAGInfo.h @@ -26,7 +26,7 @@ public: ~HexagonSelectionDAGInfo(); virtual - SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, + SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, diff --git a/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp b/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp index 34e33fd..2a13e13 100644 --- a/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp +++ b/lib/Target/MBlaze/MBlazeISelDAGToDAG.cpp @@ -188,7 +188,7 @@ SDNode *MBlazeDAGToDAGISel::getGlobalBaseReg() { /// expanded, promoted and normal instructions SDNode* MBlazeDAGToDAGISel::Select(SDNode *Node) { unsigned Opcode = Node->getOpcode(); - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); // If we have a custom node, we already have selected! if (Node->isMachineOpcode()) diff --git a/lib/Target/MBlaze/MBlazeISelLowering.cpp b/lib/Target/MBlaze/MBlazeISelLowering.cpp index 52d722b..fe166d1 100644 --- a/lib/Target/MBlaze/MBlazeISelLowering.cpp +++ b/lib/Target/MBlaze/MBlazeISelLowering.cpp @@ -575,7 +575,7 @@ SDValue MBlazeTargetLowering::LowerSELECT_CC(SDValue Op, SDValue RHS = Op.getOperand(1); SDValue TrueVal = Op.getOperand(2); SDValue FalseVal = Op.getOperand(3); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned Opc; SDValue CompareFlag; @@ -594,7 +594,7 @@ SDValue MBlazeTargetLowering::LowerSELECT_CC(SDValue Op, SDValue MBlazeTargetLowering:: LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { // FIXME there isn't actually debug info here - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32); @@ -611,7 +611,7 @@ LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { SDValue ResNode; SDValue HiPart; // FIXME there isn't actually debug info here - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); @@ -625,7 +625,7 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { SDValue ResNode; ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); const Constant *C = N->getConstVal(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(), N->getOffset(), 0); @@ -637,7 +637,7 @@ SDValue MBlazeTargetLowering::LowerVASTART(SDValue Op, MachineFunction &MF = DAG.getMachineFunction(); MBlazeFunctionInfo *FuncInfo = MF.getInfo<MBlazeFunctionInfo>(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), getPointerTy()); @@ -686,7 +686,7 @@ SDValue MBlazeTargetLowering:: LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &dl = CLI.DL; + SDLoc dl = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; SmallVector<SDValue, 32> &OutVals = CLI.OutVals; SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; @@ -844,7 +844,7 @@ LowerCall(TargetLowering::CallLoweringInfo &CLI, SDValue MBlazeTargetLowering:: LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // Assign locations to each value returned by this call. SmallVector<CCValAssign, 16> RVLocs; @@ -874,7 +874,7 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, SDValue MBlazeTargetLowering:: LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); @@ -1017,7 +1017,7 @@ SDValue MBlazeTargetLowering:: LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const { + SDLoc dl, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of // the return value to a location SmallVector<CCValAssign, 16> RVLocs; diff --git a/lib/Target/MBlaze/MBlazeISelLowering.h b/lib/Target/MBlaze/MBlazeISelLowering.h index 1961dee..f874113 100644 --- a/lib/Target/MBlaze/MBlazeISelLowering.h +++ b/lib/Target/MBlaze/MBlazeISelLowering.h @@ -113,7 +113,7 @@ namespace llvm { SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; // Lower Operand specifics @@ -128,7 +128,7 @@ namespace llvm { LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; virtual SDValue @@ -140,7 +140,7 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const; + SDLoc dl, SelectionDAG &DAG) const; virtual MachineBasicBlock* EmitCustomShift(MachineInstr *MI, MachineBasicBlock *MBB) const; diff --git a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp index 1566c09..bd3ed81 100644 --- a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp +++ b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp @@ -263,7 +263,7 @@ bool MSP430DAGToDAGISel::SelectAddr(SDValue N, AM.Base.Reg; if (AM.GV) - Disp = CurDAG->getTargetGlobalAddress(AM.GV, N->getDebugLoc(), + Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(N), MVT::i16, AM.Disp, 0/*AM.SymbolFlags*/); else if (AM.CP) @@ -345,7 +345,7 @@ SDNode *MSP430DAGToDAGISel::SelectIndexedLoad(SDNode *N) { return NULL; } - return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), + return CurDAG->getMachineNode(Opcode, SDLoc(N), VT, MVT::i16, MVT::Other, LD->getBasePtr(), LD->getChain()); } @@ -382,7 +382,7 @@ SDNode *MSP430DAGToDAGISel::SelectIndexedBinOp(SDNode *Op, SDNode *MSP430DAGToDAGISel::Select(SDNode *Node) { - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); // Dump information about the Node being selected DEBUG(errs() << "Selecting: "); diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index 09cdf32..c96c397 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -254,7 +254,7 @@ MSP430TargetLowering::LowerFormalArguments(SDValue Chain, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { @@ -276,7 +276,7 @@ SDValue MSP430TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &dl = CLI.DL; + SDLoc &dl = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; SmallVector<SDValue, 32> &OutVals = CLI.OutVals; SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; @@ -310,7 +310,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { @@ -407,7 +407,7 @@ MSP430TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const { + SDLoc dl, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of the return value to a location SmallVector<CCValAssign, 16> RVLocs; @@ -463,7 +463,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, &Outs, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ArgLocs; @@ -599,7 +599,7 @@ SDValue MSP430TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // Assign locations to each value returned by this call. @@ -625,7 +625,7 @@ SDValue MSP430TargetLowering::LowerShifts(SDValue Op, unsigned Opc = Op.getOpcode(); SDNode* N = Op.getNode(); EVT VT = Op.getValueType(); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); // Expand non-constant shifts to loops: if (!isa<ConstantSDNode>(N->getOperand(1))) @@ -669,15 +669,15 @@ SDValue MSP430TargetLowering::LowerGlobalAddress(SDValue Op, int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); // Create the TargetGlobalAddress node, folding in the constant offset. - SDValue Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), + SDValue Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op), getPointerTy(), Offset); - return DAG.getNode(MSP430ISD::Wrapper, Op.getDebugLoc(), + return DAG.getNode(MSP430ISD::Wrapper, SDLoc(Op), getPointerTy(), Result); } SDValue MSP430TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); @@ -686,7 +686,7 @@ SDValue MSP430TargetLowering::LowerExternalSymbol(SDValue Op, SDValue MSP430TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy()); @@ -695,7 +695,7 @@ SDValue MSP430TargetLowering::LowerBlockAddress(SDValue Op, static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC, ISD::CondCode CC, - DebugLoc dl, SelectionDAG &DAG) { + SDLoc dl, SelectionDAG &DAG) { // FIXME: Handle bittests someday assert(!LHS.getValueType().isFloatingPoint() && "We don't handle FP yet"); @@ -782,7 +782,7 @@ SDValue MSP430TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { SDValue LHS = Op.getOperand(2); SDValue RHS = Op.getOperand(3); SDValue Dest = Op.getOperand(4); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl (Op); SDValue TargetCC; SDValue Flag = EmitCMP(LHS, RHS, TargetCC, CC, dl, DAG); @@ -794,7 +794,7 @@ SDValue MSP430TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl (Op); // If we are doing an AND and testing against zero, then the CMP // will not be generated. The AND (or BIT) will generate the condition codes, @@ -878,7 +878,7 @@ SDValue MSP430TargetLowering::LowerSELECT_CC(SDValue Op, SDValue TrueV = Op.getOperand(2); SDValue FalseV = Op.getOperand(3); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl (Op); SDValue TargetCC; SDValue Flag = EmitCMP(LHS, RHS, TargetCC, CC, dl, DAG); @@ -897,7 +897,7 @@ SDValue MSP430TargetLowering::LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const { SDValue Val = Op.getOperand(0); EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); assert(VT == MVT::i16 && "Only support i16 for now!"); @@ -929,7 +929,7 @@ SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op, MFI->setReturnAddressIsTaken(true); unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (Depth > 0) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); @@ -953,7 +953,7 @@ SDValue MSP430TargetLowering::LowerFRAMEADDR(SDValue Op, MFI->setFrameAddressIsTaken(true); EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful + SDLoc dl(Op); // FIXME probably not meaningful unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, MSP430::FPW, VT); @@ -975,7 +975,7 @@ SDValue MSP430TargetLowering::LowerVASTART(SDValue Op, const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); // Create a store of the frame index to the location operand - return DAG.getStore(Op.getOperand(0), Op.getDebugLoc(), FrameIndex, + return DAG.getStore(Op.getOperand(0), SDLoc(Op), FrameIndex, Op.getOperand(1), MachinePointerInfo(SV), false, false, 0); } diff --git a/lib/Target/MSP430/MSP430ISelLowering.h b/lib/Target/MSP430/MSP430ISelLowering.h index e0ed870..9570ef2 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.h +++ b/lib/Target/MSP430/MSP430ISelLowering.h @@ -130,28 +130,28 @@ namespace llvm { const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerCCCArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; virtual SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; virtual SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, @@ -162,7 +162,7 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const; + SDLoc dl, SelectionDAG &DAG) const; virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp index c1c635c..c02b8b6 100644 --- a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp +++ b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp @@ -42,7 +42,7 @@ bool Mips16DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { } /// Select multiply instructions. std::pair<SDNode*, SDNode*> -Mips16DAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, DebugLoc DL, EVT Ty, +Mips16DAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, SDLoc DL, EVT Ty, bool HasLo, bool HasHi) { SDNode *Lo = 0, *Hi = 0; SDNode *Mul = CurDAG->getMachineNode(Opc, DL, MVT::Glue, N->getOperand(0), @@ -235,7 +235,7 @@ bool Mips16DAGToDAGISel::selectAddr16( /// expanded, promoted and normal instructions std::pair<bool, SDNode*> Mips16DAGToDAGISel::selectNode(SDNode *Node) { unsigned Opcode = Node->getOpcode(); - DebugLoc DL = Node->getDebugLoc(); + SDLoc DL(Node); /// // Instruction Selection not handled by the auto-generated diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.h b/lib/Target/Mips/Mips16ISelDAGToDAG.h index f05f9b7..49dc6e5 100644 --- a/lib/Target/Mips/Mips16ISelDAGToDAG.h +++ b/lib/Target/Mips/Mips16ISelDAGToDAG.h @@ -23,7 +23,7 @@ public: explicit Mips16DAGToDAGISel(MipsTargetMachine &TM) : MipsDAGToDAGISel(TM) {} private: - std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, DebugLoc DL, + std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, SDLoc DL, EVT Ty, bool HasLo, bool HasHi); SDValue getMips16SPAliasReg(); diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 666fbdd..2bcaa98 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -83,7 +83,7 @@ static SDValue getTargetNode(SDValue Op, SelectionDAG &DAG, unsigned Flag) { EVT Ty = Op.getValueType(); if (GlobalAddressSDNode *N = dyn_cast<GlobalAddressSDNode>(Op)) - return DAG.getTargetGlobalAddress(N->getGlobal(), Op.getDebugLoc(), Ty, 0, + return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(Op), Ty, 0, Flag); if (ExternalSymbolSDNode *N = dyn_cast<ExternalSymbolSDNode>(Op)) return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag); @@ -100,7 +100,7 @@ static SDValue getTargetNode(SDValue Op, SelectionDAG &DAG, unsigned Flag) { } static SDValue getAddrNonPIC(SDValue Op, SelectionDAG &DAG) { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT Ty = Op.getValueType(); SDValue Hi = getTargetNode(Op, DAG, MipsII::MO_ABS_HI); SDValue Lo = getTargetNode(Op, DAG, MipsII::MO_ABS_LO); @@ -111,7 +111,7 @@ static SDValue getAddrNonPIC(SDValue Op, SelectionDAG &DAG) { SDValue MipsTargetLowering::getAddrLocal(SDValue Op, SelectionDAG &DAG, bool HasMips64) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT Ty = Op.getValueType(); unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT; SDValue GOT = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty), @@ -126,7 +126,7 @@ SDValue MipsTargetLowering::getAddrLocal(SDValue Op, SelectionDAG &DAG, SDValue MipsTargetLowering::getAddrGlobal(SDValue Op, SelectionDAG &DAG, unsigned Flag) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT Ty = Op.getValueType(); SDValue Tgt = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty), getTargetNode(Op, DAG, Flag)); @@ -137,7 +137,7 @@ SDValue MipsTargetLowering::getAddrGlobal(SDValue Op, SelectionDAG &DAG, SDValue MipsTargetLowering::getAddrGlobalLargeGOT(SDValue Op, SelectionDAG &DAG, unsigned HiFlag, unsigned LoFlag) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT Ty = Op.getValueType(); SDValue Hi = DAG.getNode(MipsISD::Hi, DL, Ty, getTargetNode(Op, DAG, HiFlag)); Hi = DAG.getNode(ISD::ADD, DL, Ty, Hi, getGlobalReg(DAG, Ty)); @@ -431,7 +431,7 @@ static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG, unsigned HI = (Ty == MVT::i32) ? Mips::HI : Mips::HI64; unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 : MipsISD::DivRemU16; - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SDValue DivRem = DAG.getNode(Opc, DL, MVT::Glue, N->getOperand(0), N->getOperand(1)); @@ -509,7 +509,7 @@ static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) { return Op; SDValue RHS = Op.getOperand(1); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); // Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of // node if necessary. @@ -521,7 +521,7 @@ static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) { // Creates and returns a CMovFPT/F node. static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True, - SDValue False, DebugLoc DL) { + SDValue False, SDLoc DL) { ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2)); bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue()); @@ -552,7 +552,7 @@ static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG, if (!CN || CN->getZExtValue()) return SDValue(); - const DebugLoc DL = N->getDebugLoc(); + const SDLoc DL(N); ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get(); SDValue True = N->getOperand(1); @@ -597,7 +597,7 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG, if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits()) return SDValue(); - return DAG.getNode(MipsISD::Ext, N->getDebugLoc(), ValTy, + return DAG.getNode(MipsISD::Ext, SDLoc(N), ValTy, ShiftRight.getOperand(0), DAG.getConstant(Pos, MVT::i32), DAG.getConstant(SMSize, MVT::i32)); } @@ -651,7 +651,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG, if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits())) return SDValue(); - return DAG.getNode(MipsISD::Ins, N->getDebugLoc(), ValTy, Shl.getOperand(0), + return DAG.getNode(MipsISD::Ins, SDLoc(N), ValTy, Shl.getOperand(0), DAG.getConstant(SMPos0, MVT::i32), DAG.getConstant(SMSize0, MVT::i32), And0.getOperand(0)); } @@ -676,7 +676,7 @@ static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); EVT ValTy = N->getValueType(0); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0), Add.getOperand(0)); @@ -1383,7 +1383,7 @@ SDValue MipsTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); SDValue Table = Op.getOperand(1); SDValue Index = Op.getOperand(2); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT PTy = getPointerTy(); unsigned EntrySize = DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(*getDataLayout()); @@ -1416,7 +1416,7 @@ lowerBRCOND(SDValue Op, SelectionDAG &DAG) const // the block to branch to if the condition is true. SDValue Chain = Op.getOperand(0); SDValue Dest = Op.getOperand(2); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue CondRes = createFPCmp(DAG, Op.getOperand(1)); @@ -1443,13 +1443,13 @@ lowerSELECT(SDValue Op, SelectionDAG &DAG) const return Op; return createCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2), - Op.getDebugLoc()); + SDLoc(Op)); } SDValue MipsTargetLowering:: lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT Ty = Op.getOperand(0).getValueType(); SDValue Cond = DAG.getNode(ISD::SETCC, DL, getSetCCResultType(*DAG.getContext(), Ty), @@ -1469,13 +1469,13 @@ SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const { SDValue True = DAG.getConstant(1, MVT::i32); SDValue False = DAG.getConstant(0, MVT::i32); - return createCMovFP(DAG, Cond, True, False, Op.getDebugLoc()); + return createCMovFP(DAG, Cond, True, False, SDLoc(Op)); } SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { // FIXME there isn't actually debug info here - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) { @@ -1523,7 +1523,7 @@ lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const // Local Exec TLS Model. GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); - DebugLoc DL = GA->getDebugLoc(); + SDLoc DL(GA); const GlobalValue *GV = GA->getGlobal(); EVT PtrVT = getPointerTy(); @@ -1628,7 +1628,7 @@ SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), getPointerTy()); @@ -1644,7 +1644,7 @@ static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasR2) { EVT TyY = Op.getOperand(1).getValueType(); SDValue Const1 = DAG.getConstant(1, MVT::i32); SDValue Const31 = DAG.getConstant(31, MVT::i32); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue Res; // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it @@ -1689,7 +1689,7 @@ static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasR2) { unsigned WidthY = Op.getOperand(1).getValueSizeInBits(); EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY); SDValue Const1 = DAG.getConstant(1, MVT::i32); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); // Bitcast to integer nodes. SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0)); @@ -1742,7 +1742,7 @@ MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG, bool HasR2) { SDValue Res, Const1 = DAG.getConstant(1, MVT::i32); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it // to i32. @@ -1771,7 +1771,7 @@ static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG, bool HasR2) { static SDValue lowerFABS64(SDValue Op, SelectionDAG &DAG, bool HasR2) { SDValue Res, Const1 = DAG.getConstant(1, MVT::i32); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); // Bitcast to integer node. SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0)); @@ -1806,7 +1806,7 @@ lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); MFI->setFrameAddressIsTaken(true); EVT VT = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, IsN64 ? Mips::FP_64 : Mips::FP, VT); return FrameAddr; @@ -1826,7 +1826,7 @@ SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op, // Return RA, which contains the return address. Mark it an implicit live-in. unsigned Reg = MF.addLiveIn(RA, getRegClassFor(VT)); - return DAG.getCopyFromReg(DAG.getEntryNode(), Op.getDebugLoc(), Reg, VT); + return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT); } // An EH_RETURN is the result of lowering llvm.eh.return which in turn is @@ -1842,7 +1842,7 @@ SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG) SDValue Chain = Op.getOperand(0); SDValue Offset = Op.getOperand(1); SDValue Handler = Op.getOperand(2); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT Ty = IsN64 ? MVT::i64 : MVT::i32; // Store stack offset in V1, store jump target in V0. Glue CopyToReg and @@ -1862,14 +1862,14 @@ SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op, // FIXME: Need pseudo-fence for 'singlethread' fences // FIXME: Set SType for weaker fences where supported/appropriate. unsigned SType = 0; - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0), DAG.getConstant(SType, MVT::i32)); } SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1); SDValue Shamt = Op.getOperand(2); @@ -1900,7 +1900,7 @@ SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op, SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1); SDValue Shamt = Op.getOperand(2); @@ -1944,7 +1944,7 @@ static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD, SDValue Ptr = LD->getBasePtr(); EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT(); EVT BasePtrVT = Ptr.getValueType(); - DebugLoc DL = LD->getDebugLoc(); + SDLoc DL(LD); SDVTList VTList = DAG.getVTList(VT, MVT::Other); if (Offset) @@ -2010,7 +2010,7 @@ SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const { // (set tmp1, (lwr baseptr, tmp0)) // (set tmp2, (shl tmp1, 32)) // (set dst, (srl tmp2, 32)) - DebugLoc DL = LD->getDebugLoc(); + SDLoc DL(LD); SDValue Const32 = DAG.getConstant(32, MVT::i32); SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32); SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32); @@ -2022,7 +2022,7 @@ static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD, SDValue Chain, unsigned Offset) { SDValue Ptr = SD->getBasePtr(), Value = SD->getValue(); EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType(); - DebugLoc DL = SD->getDebugLoc(); + SDLoc DL(SD); SDVTList VTList = DAG.getVTList(MVT::Other); if (Offset) @@ -2071,10 +2071,10 @@ static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG) { return SDValue(); EVT FPTy = EVT::getFloatingPointVT(Val.getValueSizeInBits()); - SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, Val.getDebugLoc(), FPTy, + SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy, Val.getOperand(0)); - return DAG.getStore(SD->getChain(), SD->getDebugLoc(), Tr, SD->getBasePtr(), + return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(), SD->getPointerInfo(), SD->isVolatile(), SD->isNonTemporal(), SD->getAlignment()); } @@ -2108,16 +2108,16 @@ SDValue MipsTargetLowering::lowerADD(SDValue Op, SelectionDAG &DAG) const { EVT ValTy = Op->getValueType(0); int FI = MFI->CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false); SDValue InArgsAddr = DAG.getFrameIndex(FI, ValTy); - return DAG.getNode(ISD::ADD, Op->getDebugLoc(), ValTy, InArgsAddr, + return DAG.getNode(ISD::ADD, SDLoc(Op), ValTy, InArgsAddr, DAG.getConstant(0, ValTy)); } SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const { EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits()); - SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, Op.getDebugLoc(), FPTy, + SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Op), FPTy, Op.getOperand(0)); - return DAG.getNode(ISD::BITCAST, Op.getDebugLoc(), Op.getValueType(), Trunc); + return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc); } //===----------------------------------------------------------------------===// @@ -2238,7 +2238,7 @@ static unsigned getNextIntArgReg(unsigned Reg) { SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset, - SDValue Chain, SDValue Arg, DebugLoc DL, + SDValue Chain, SDValue Arg, SDLoc DL, bool IsTailCall, SelectionDAG &DAG) const { if (!IsTailCall) { SDValue PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, @@ -2313,7 +2313,7 @@ SDValue MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &DL = CLI.DL; + SDLoc DL = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; SmallVector<SDValue, 32> &OutVals = CLI.OutVals; SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; @@ -2513,7 +2513,7 @@ SDValue MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, SelectionDAG &DAG, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, const SDNode *CallNode, const Type *RetTy) const { @@ -2552,7 +2552,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, SelectionDAG &DAG, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); @@ -2718,7 +2718,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc DL, SelectionDAG &DAG) const { + SDLoc DL, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of // the return value to a location SmallVector<CCValAssign, 16> RVLocs; @@ -3328,7 +3328,7 @@ MVT MipsTargetLowering::MipsCC::getRegVT(MVT VT, const Type *OrigTy, } void MipsTargetLowering:: -copyByValRegs(SDValue Chain, DebugLoc DL, std::vector<SDValue> &OutChains, +copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains, SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags, SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg, const MipsCC &CC, const ByValArgInfo &ByVal) const { @@ -3372,7 +3372,7 @@ copyByValRegs(SDValue Chain, DebugLoc DL, std::vector<SDValue> &OutChains, // Copy byVal arg to registers and stack. void MipsTargetLowering:: -passByValArg(SDValue Chain, DebugLoc DL, +passByValArg(SDValue Chain, SDLoc DL, std::deque< std::pair<unsigned, SDValue> > &RegsToPass, SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr, MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, @@ -3470,7 +3470,7 @@ passByValArg(SDValue Chain, DebugLoc DL, void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains, const MipsCC &CC, SDValue Chain, - DebugLoc DL, SelectionDAG &DAG) const { + SDLoc DL, SelectionDAG &DAG) const { unsigned NumRegs = CC.numIntArgRegs(); const uint16_t *ArgRegs = CC.intArgRegs(); const CCState &CCInfo = CC.getCCInfo(); diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h index f090dde..fe043ae 100644 --- a/lib/Target/Mips/MipsISelLowering.h +++ b/lib/Target/Mips/MipsISelLowering.h @@ -336,7 +336,7 @@ namespace llvm { SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, const SDNode *CallNode, const Type *RetTy) const; @@ -376,7 +376,7 @@ namespace llvm { /// copyByValArg - Copy argument registers which were used to pass a byval /// argument to the stack. Create a stack frame object for the byval /// argument. - void copyByValRegs(SDValue Chain, DebugLoc DL, + void copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains, SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags, SmallVectorImpl<SDValue> &InVals, @@ -384,7 +384,7 @@ namespace llvm { const MipsCC &CC, const ByValArgInfo &ByVal) const; /// passByValArg - Pass a byval argument in registers or on stack. - void passByValArg(SDValue Chain, DebugLoc DL, + void passByValArg(SDValue Chain, SDLoc DL, std::deque< std::pair<unsigned, SDValue> > &RegsToPass, SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr, MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, @@ -395,17 +395,17 @@ namespace llvm { /// to the stack. Also create a stack frame object for the first variable /// argument. void writeVarArgRegs(std::vector<SDValue> &OutChains, const MipsCC &CC, - SDValue Chain, DebugLoc DL, SelectionDAG &DAG) const; + SDValue Chain, SDLoc DL, SelectionDAG &DAG) const; virtual SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue passArgOnStack(SDValue StackPtr, unsigned Offset, SDValue Chain, - SDValue Arg, DebugLoc DL, bool IsTailCall, + SDValue Arg, SDLoc DL, bool IsTailCall, SelectionDAG &DAG) const; virtual SDValue @@ -423,7 +423,7 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const; + SDLoc dl, SelectionDAG &DAG) const; // Inline asm support ConstraintType getConstraintType(const std::string &Constraint) const; diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp index 8a6523a..a6f848f 100644 --- a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -214,7 +214,7 @@ void MipsSEDAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) { } SDNode *MipsSEDAGToDAGISel::selectAddESubE(unsigned MOp, SDValue InFlag, - SDValue CmpLHS, DebugLoc DL, + SDValue CmpLHS, SDLoc DL, SDNode *Node) const { unsigned Opc = InFlag.getOpcode(); (void)Opc; @@ -316,7 +316,7 @@ bool MipsSEDAGToDAGISel::selectIntAddr(SDValue Addr, SDValue &Base, std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) { unsigned Opcode = Node->getOpcode(); - DebugLoc DL = Node->getDebugLoc(); + SDLoc DL(Node); /// // Instruction Selection not handled by the auto-generated @@ -374,7 +374,7 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) { AnalyzeImm.Analyze(Imm, Size, false); MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin(); - DebugLoc DL = CN->getDebugLoc(); + SDLoc DL(CN); SDNode *RegOpnd; SDValue ImmOpnd = CurDAG->getTargetConstant(SignExtend64<16>(Inst->ImmOpnd), MVT::i64); @@ -416,7 +416,7 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) { } SDNode *Rdhwr = - CurDAG->getMachineNode(RdhwrOpc, Node->getDebugLoc(), + CurDAG->getMachineNode(RdhwrOpc, SDLoc(Node), Node->getValueType(0), CurDAG->getRegister(SrcReg, PtrVT)); SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, DestReg, diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.h b/lib/Target/Mips/MipsSEISelDAGToDAG.h index a235e96..03ed1f9 100644 --- a/lib/Target/Mips/MipsSEISelDAGToDAG.h +++ b/lib/Target/Mips/MipsSEISelDAGToDAG.h @@ -32,11 +32,11 @@ private: bool replaceUsesWithZeroReg(MachineRegisterInfo *MRI, const MachineInstr&); - std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, DebugLoc dl, + std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, SDLoc dl, EVT Ty, bool HasLo, bool HasHi); SDNode *selectAddESubE(unsigned MOp, SDValue InFlag, SDValue CmpLHS, - DebugLoc DL, SDNode *Node) const; + SDLoc DL, SDNode *Node) const; virtual bool selectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset) const; diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp index 8544bb8..f640ecc 100644 --- a/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/lib/Target/Mips/MipsSEISelLowering.cpp @@ -186,7 +186,7 @@ static bool selectMADD(SDNode *ADDENode, SelectionDAG *CurDAG) { if (!MultHi.hasOneUse() || !MultLo.hasOneUse()) return false; - DebugLoc DL = ADDENode->getDebugLoc(); + SDLoc DL(ADDENode); // Initialize accumulator. SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, @@ -262,7 +262,7 @@ static bool selectMSUB(SDNode *SUBENode, SelectionDAG *CurDAG) { if (!MultHi.hasOneUse() || !MultLo.hasOneUse()) return false; - DebugLoc DL = SUBENode->getDebugLoc(); + SDLoc DL(SUBENode); // Initialize accumulator. SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, @@ -337,7 +337,7 @@ static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty, (SplatValue.getZExtValue() >= EltSize)) return SDValue(); - return DAG.getNode(Opc, N->getDebugLoc(), Ty, N->getOperand(0), + return DAG.getNode(Opc, SDLoc(N), Ty, N->getOperand(0), DAG.getConstant(SplatValue.getZExtValue(), MVT::i32)); } @@ -402,7 +402,7 @@ static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) { if (!isLegalDSPCondCode(Ty, cast<CondCodeSDNode>(N->getOperand(2))->get())) return SDValue(); - return DAG.getNode(MipsISD::SETCC_DSP, N->getDebugLoc(), Ty, N->getOperand(0), + return DAG.getNode(MipsISD::SETCC_DSP, SDLoc(N), Ty, N->getOperand(0), N->getOperand(1), N->getOperand(2)); } @@ -417,7 +417,7 @@ static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG) { if (SetCC.getOpcode() != MipsISD::SETCC_DSP) return SDValue(); - return DAG.getNode(MipsISD::SELECT_CC_DSP, N->getDebugLoc(), Ty, + return DAG.getNode(MipsISD::SELECT_CC_DSP, SDLoc(N), Ty, SetCC.getOperand(0), SetCC.getOperand(1), N->getOperand(1), N->getOperand(2), SetCC.getOperand(2)); } @@ -500,7 +500,7 @@ SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc, bool HasLo, bool HasHi, SelectionDAG &DAG) const { EVT Ty = Op.getOperand(0).getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue Mult = DAG.getNode(NewOpc, DL, MVT::Untyped, Op.getOperand(0), Op.getOperand(1)); SDValue Lo, Hi; @@ -520,7 +520,7 @@ SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc, } -static SDValue initAccumulator(SDValue In, DebugLoc DL, SelectionDAG &DAG) { +static SDValue initAccumulator(SDValue In, SDLoc DL, SelectionDAG &DAG) { SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In, DAG.getConstant(0, MVT::i32)); SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In, @@ -528,7 +528,7 @@ static SDValue initAccumulator(SDValue In, DebugLoc DL, SelectionDAG &DAG) { return DAG.getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, InLo, InHi); } -static SDValue extractLOHI(SDValue Op, DebugLoc DL, SelectionDAG &DAG) { +static SDValue extractLOHI(SDValue Op, SDLoc DL, SelectionDAG &DAG) { SDValue Lo = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op, DAG.getConstant(Mips::sub_lo, MVT::i32)); SDValue Hi = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op, @@ -549,7 +549,7 @@ static SDValue extractLOHI(SDValue Op, DebugLoc DL, SelectionDAG &DAG) { // out64 = merge-values (v0, v1) // static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other; SmallVector<SDValue, 3> Ops; unsigned OpNo = 0; diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index d4378c2..120d716 100644 --- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -162,7 +162,7 @@ static unsigned int getCodeAddrSpace(MemSDNode *N, } SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); LoadSDNode *LD = cast<LoadSDNode>(N); EVT LoadedVT = LD->getMemoryVT(); SDNode *NVPTXLD = NULL; @@ -401,7 +401,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) { SDValue Op1 = N->getOperand(1); SDValue Addr, Offset, Base; unsigned Opcode; - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SDNode *LD; MemSDNode *MemSD = cast<MemSDNode>(N); EVT LoadedVT = MemSD->getMemoryVT(); @@ -782,7 +782,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { SDValue Chain = N->getOperand(0); SDValue Op1 = N->getOperand(1); unsigned Opcode; - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SDNode *LD; EVT RetVT = N->getValueType(0); @@ -979,7 +979,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDUVector(SDNode *N) { } SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); StoreSDNode *ST = cast<StoreSDNode>(N); EVT StoreVT = ST->getMemoryVT(); SDNode *NVPTXST = NULL; @@ -1214,7 +1214,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) { SDValue Op1 = N->getOperand(1); SDValue Addr, Offset, Base; unsigned Opcode; - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SDNode *ST; EVT EltVT = Op1.getValueType(); MemSDNode *MemSD = cast<MemSDNode>(N); diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp index 6e01a5a..5caf08a2 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -275,7 +275,7 @@ bool NVPTXTargetLowering::shouldSplitVectorElementType(EVT VT) const { SDValue NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); Op = DAG.getTargetGlobalAddress(GV, dl, getPointerTy()); return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op); @@ -435,7 +435,7 @@ std::string NVPTXTargetLowering::getPrototype( SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &dl = CLI.DL; + SDLoc dl = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; SmallVector<SDValue, 32> &OutVals = CLI.OutVals; SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; @@ -810,7 +810,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SDValue NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { SDNode *Node = Op.getNode(); - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); SmallVector<SDValue, 8> Ops; unsigned NumOperands = Node->getNumOperands(); for (unsigned i = 0; i < NumOperands; ++i) { @@ -866,7 +866,7 @@ SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const { SDNode *Node = Op.getNode(); LoadSDNode *LD = cast<LoadSDNode>(Node); - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); assert(LD->getExtensionType() == ISD::NON_EXTLOAD); assert(Node->getValueType(0) == MVT::i1 && "Custom lowering for i1 load only"); @@ -896,7 +896,7 @@ SDValue NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const { SDNode *N = Op.getNode(); SDValue Val = N->getOperand(1); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); EVT ValVT = Val.getValueType(); if (ValVT.isVector()) { @@ -985,7 +985,7 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const { // st i8, addr SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const { SDNode *Node = Op.getNode(); - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); StoreSDNode *ST = cast<StoreSDNode>(Node); SDValue Tmp1 = ST->getChain(); SDValue Tmp2 = ST->getBasePtr(); @@ -1046,7 +1046,7 @@ bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) { SDValue NVPTXTargetLowering::LowerFormalArguments( SDValue Chain, CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, + const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); const DataLayout *TD = getDataLayout(); @@ -1240,7 +1240,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( SDValue NVPTXTargetLowering::LowerReturn( SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, - const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl, + const SmallVectorImpl<SDValue> &OutVals, SDLoc dl, SelectionDAG &DAG) const { bool isABI = (nvptxSubtarget.getSmVersion() >= 20); @@ -1450,7 +1450,7 @@ unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const { static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG, SmallVectorImpl<SDValue> &Results) { EVT ResVT = N->getValueType(0); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); assert(ResVT.isVector() && "Vector load must have vector type"); @@ -1543,7 +1543,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl<SDValue> &Results) { SDValue Chain = N->getOperand(0); SDValue Intrin = N->getOperand(1); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // Get the intrinsic ID unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue(); diff --git a/lib/Target/NVPTX/NVPTXISelLowering.h b/lib/Target/NVPTX/NVPTXISelLowering.h index c873c4e..d3ed63a 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.h +++ b/lib/Target/NVPTX/NVPTXISelLowering.h @@ -112,7 +112,7 @@ public: virtual SDValue LowerFormalArguments( SDValue Chain, CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, + const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; virtual SDValue @@ -125,7 +125,7 @@ public: virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, - const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl, + const SmallVectorImpl<SDValue> &OutVals, SDLoc dl, SelectionDAG &DAG) const; virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index b707145..e006945 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -110,7 +110,7 @@ namespace { /// SelectCC - Select a comparison of the specified values with the /// specified condition code, returning the CR# of the expression. - SDValue SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, DebugLoc dl); + SDValue SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDLoc dl); /// SelectAddrImm - Returns true if the address N can be represented by /// a base register plus a signed 16-bit displacement [r+imm]. @@ -397,7 +397,7 @@ bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask, SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) { SDValue Op0 = N->getOperand(0); SDValue Op1 = N->getOperand(1); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); APInt LKZ, LKO, RKZ, RKO; CurDAG->ComputeMaskedBits(Op0, LKZ, LKO); @@ -466,7 +466,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) { /// SelectCC - Select a comparison of the specified values with the specified /// condition code, returning the CR# of the expression. SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS, - ISD::CondCode CC, DebugLoc dl) { + ISD::CondCode CC, SDLoc dl) { // Always select the LHS. unsigned Opc; @@ -710,7 +710,7 @@ static unsigned int getVCmpEQInst(MVT::SimpleValueType VecVT) { SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); unsigned Imm; ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); EVT PtrVT = CurDAG->getTargetLoweringInfo().getPointerTy(); @@ -894,7 +894,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) { // Select - Convert the specified operand from a target-independent to a // target-specific node if it hasn't already been changed. SDNode *PPCDAGToDAGISel::Select(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); if (N->isMachineOpcode()) return NULL; // Already selected. @@ -1528,7 +1528,7 @@ void PPCDAGToDAGISel::PostprocessISelDAG() { // immediate operand, add it now. if (ReplaceFlags) { if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(ImmOpnd)) { - DebugLoc dl = GA->getDebugLoc(); + SDLoc dl(GA); const GlobalValue *GV = GA->getGlobal(); ImmOpnd = CurDAG->getTargetGlobalAddress(GV, dl, MVT::i64, 0, Flags); } else if (ConstantPoolSDNode *CP = diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index b0a684e..6c8af0c 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1056,7 +1056,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, SelectionDAG &DAG, bool Aligned) const { // FIXME dl should come from parent load or store, not from address - DebugLoc dl = N.getDebugLoc(); + SDLoc dl(N); // If this can be more profitably realized as r+r, fail. if (SelectAddressRegReg(N, Disp, Base, DAG)) return false; @@ -1166,7 +1166,6 @@ bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, return true; } - /// getPreIndexedAddressParts - returns true by value, base pointer and /// offset pointer and addressing mode by reference if the node's address /// can be legally represented as pre-indexed load / store address. @@ -1284,7 +1283,7 @@ static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, SelectionDAG &DAG) { EVT PtrVT = HiPart.getValueType(); SDValue Zero = DAG.getConstant(0, PtrVT); - DebugLoc DL = HiPart.getDebugLoc(); + SDLoc DL(HiPart); SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); @@ -1309,7 +1308,7 @@ SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, // The actual address of the GlobalValue is stored in the TOC. if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); - return DAG.getNode(PPCISD::TOC_ENTRY, CP->getDebugLoc(), MVT::i64, GA, + return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(CP), MVT::i64, GA, DAG.getRegister(PPC::X2, MVT::i64)); } @@ -1330,7 +1329,7 @@ SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { // The actual address of the GlobalValue is stored in the TOC. if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); - return DAG.getNode(PPCISD::TOC_ENTRY, JT->getDebugLoc(), MVT::i64, GA, + return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), MVT::i64, GA, DAG.getRegister(PPC::X2, MVT::i64)); } @@ -1358,7 +1357,7 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); - DebugLoc dl = GA->getDebugLoc(); + SDLoc dl(GA); const GlobalValue *GV = GA->getGlobal(); EVT PtrVT = getPointerTy(); bool is64bit = PPCSubTarget.isPPC64(); @@ -1444,7 +1443,7 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { EVT PtrVT = Op.getValueType(); GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); - DebugLoc DL = GSDN->getDebugLoc(); + SDLoc DL(GSDN); const GlobalValue *GV = GSDN->getGlobal(); // 64-bit SVR4 ABI code is always position-independent. @@ -1475,7 +1474,7 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // If we're comparing for equality to zero, expose the fact that this is // implented as a ctlz/srl pair on ppc, so that the dag combiner can @@ -1524,7 +1523,7 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, SDValue InChain = Node->getOperand(0); SDValue VAListPtr = Node->getOperand(1); const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); @@ -1635,7 +1634,7 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, SDValue Trmp = Op.getOperand(1); // trampoline SDValue FPtr = Op.getOperand(2); // nested function SDValue Nest = Op.getOperand(3); // 'nest' parameter value - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); bool isPPC64 = (PtrVT == MVT::i64); @@ -1677,7 +1676,7 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, MachineFunction &MF = DAG.getMachineFunction(); PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { // vastart just stores the address of the VarArgsFrameIndex slot into the @@ -1860,7 +1859,7 @@ PPCTargetLowering::LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { if (PPCSubTarget.isSVR4ABI()) { @@ -1882,7 +1881,7 @@ PPCTargetLowering::LowerFormalArguments_32SVR4( CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // 32-bit SVR4 ABI Stack Frame Layout: @@ -2099,7 +2098,7 @@ PPCTargetLowering::LowerFormalArguments_32SVR4( SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, SelectionDAG &DAG, SDValue ArgVal, - DebugLoc dl) const { + SDLoc dl) const { if (Flags.isSExt()) ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, DAG.getValueType(ObjectVT)); @@ -2142,7 +2141,7 @@ PPCTargetLowering::LowerFormalArguments_64SVR4( CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // TODO: add description of PPC stack frame format, or at least some docs. // @@ -2431,7 +2430,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin( CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // TODO: add description of PPC stack frame format, or at least some docs. // @@ -2933,7 +2932,7 @@ StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, SDValue Chain, const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs, SmallVector<SDValue, 8> &MemOpChains, - DebugLoc dl) { + SDLoc dl) { for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { SDValue Arg = TailCallArgs[i].Arg; SDValue FIN = TailCallArgs[i].FrameIdxOp; @@ -2955,7 +2954,7 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, int SPDiff, bool isPPC64, bool isDarwinABI, - DebugLoc dl) { + SDLoc dl) { if (SPDiff) { // Calculate the new stack slot for the return address. int SlotSize = isPPC64 ? 8 : 4; @@ -3012,7 +3011,7 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, SDValue &LROpOut, SDValue &FPOpOut, bool isDarwinABI, - DebugLoc dl) const { + SDLoc dl) const { if (SPDiff) { // Load the LR and FP stack slot for later adjusting. EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; @@ -3042,7 +3041,7 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, - DebugLoc dl) { + SDLoc dl) { SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), false, false, MachinePointerInfo(0), @@ -3057,7 +3056,7 @@ LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVector<SDValue, 8> &MemOpChains, SmallVector<TailCallArgumentInfo, 8> &TailCallArguments, - DebugLoc dl) { + SDLoc dl) { EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); if (!isTailCall) { if (isVector) { @@ -3078,7 +3077,7 @@ LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, static void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, - DebugLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, + SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, SDValue LROp, SDValue FPOp, bool isDarwinABI, SmallVector<TailCallArgumentInfo, 8> &TailCallArguments) { MachineFunction &MF = DAG.getMachineFunction(); @@ -3106,7 +3105,7 @@ void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, static unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, - SDValue &Chain, DebugLoc dl, int SPDiff, bool isTailCall, + SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall, SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys, const PPCSubtarget &PPCSubTarget) { @@ -3292,7 +3291,7 @@ SDValue PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { SmallVector<CCValAssign, 16> RVLocs; @@ -3335,7 +3334,7 @@ PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, } SDValue -PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl, +PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl, bool isTailCall, bool isVarArg, SelectionDAG &DAG, SmallVector<std::pair<unsigned, SDValue>, 8> @@ -3434,7 +3433,7 @@ SDValue PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &dl = CLI.DL; + SDLoc &dl = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; SmallVector<SDValue, 32> &OutVals = CLI.OutVals; SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; @@ -3471,7 +3470,7 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description // of the 32-bit SVR4 ABI stack frame layout. @@ -3684,7 +3683,7 @@ PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, - DebugLoc dl) const { + SDLoc dl) const { SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, CallSeqStart.getNode()->getOperand(0), Flags, DAG, dl); @@ -3703,7 +3702,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { unsigned NumOps = Outs.size(); @@ -4074,7 +4073,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { unsigned NumOps = Outs.size(); @@ -4431,7 +4430,7 @@ PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const { + SDLoc dl, SelectionDAG &DAG) const { SmallVector<CCValAssign, 16> RVLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), @@ -4480,7 +4479,7 @@ PPCTargetLowering::LowerReturn(SDValue Chain, SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget) const { // When we pop the dynamic allocation we need to restore the SP link. - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Get the corect type for pointers. EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); @@ -4565,7 +4564,7 @@ SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, // Get the inputs. SDValue Chain = Op.getOperand(0); SDValue Size = Op.getOperand(1); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Get the corect type for pointers. EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); @@ -4582,7 +4581,7 @@ SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), Op.getOperand(1)); @@ -4590,7 +4589,7 @@ SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0), Op.getOperand(1)); } @@ -4616,7 +4615,7 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { EVT CmpVT = Op.getOperand(0).getValueType(); SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // If the RHS of the comparison is a 0.0, we don't need to do the // subtraction at all. @@ -4697,7 +4696,7 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { // FIXME: Split this code up when LegalizeDAGTypes lands. SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, - DebugLoc dl) const { + SDLoc dl) const { assert(Op.getOperand(0).getValueType().isFloatingPoint()); SDValue Src = Op.getOperand(0); if (Src.getValueType() == MVT::f32) @@ -4756,7 +4755,7 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Don't handle ppc_fp128 here; let it be lowered to a libcall. if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) return SDValue(); @@ -4890,7 +4889,7 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); /* The rounding mode is in bits 30:31 of FPSR, and has the following settings: @@ -4956,7 +4955,7 @@ SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); unsigned BitWidth = VT.getSizeInBits(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); assert(Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && "Unexpected SHL!"); @@ -4984,7 +4983,7 @@ SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned BitWidth = VT.getSizeInBits(); assert(Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && @@ -5012,7 +5011,7 @@ SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { } SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT VT = Op.getValueType(); unsigned BitWidth = VT.getSizeInBits(); assert(Op.getNumOperands() == 3 && @@ -5047,7 +5046,7 @@ SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { /// BuildSplatI - Build a canonical splati of Val with an element size of /// SplatSize. Cast the result to VT. static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, - SelectionDAG &DAG, DebugLoc dl) { + SelectionDAG &DAG, SDLoc dl) { assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); static const EVT VTys[] = { // canonical VT to use for each size. @@ -5074,7 +5073,7 @@ static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, /// BuildIntrinsicOp - Return a unary operator intrinsic node with the /// specified intrinsic ID. static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, - SelectionDAG &DAG, DebugLoc dl, + SelectionDAG &DAG, SDLoc dl, EVT DestVT = MVT::Other) { if (DestVT == MVT::Other) DestVT = Op.getValueType(); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, @@ -5084,7 +5083,7 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, /// BuildIntrinsicOp - Return a binary operator intrinsic node with the /// specified intrinsic ID. static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, - SelectionDAG &DAG, DebugLoc dl, + SelectionDAG &DAG, SDLoc dl, EVT DestVT = MVT::Other) { if (DestVT == MVT::Other) DestVT = LHS.getValueType(); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, @@ -5095,7 +5094,7 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, /// specified intrinsic ID. static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, SDValue Op2, SelectionDAG &DAG, - DebugLoc dl, EVT DestVT = MVT::Other) { + SDLoc dl, EVT DestVT = MVT::Other) { if (DestVT == MVT::Other) DestVT = Op0.getValueType(); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); @@ -5105,7 +5104,7 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified /// amount. The result has the specified value type. static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, - EVT VT, SelectionDAG &DAG, DebugLoc dl) { + EVT VT, SelectionDAG &DAG, SDLoc dl) { // Force LHS/RHS to be the right type. LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); @@ -5124,7 +5123,7 @@ static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, // sequence of ops that should be used. SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); @@ -5280,7 +5279,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, /// the specified operations to build the shuffle. static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, - DebugLoc dl) { + SDLoc dl) { unsigned OpNum = (PFEntry >> 26) & 0x0F; unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); @@ -5359,7 +5358,7 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, /// lowered into a vperm. SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); @@ -5526,7 +5525,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { // If this is a lowered altivec predicate compare, CompareOpc is set to the // opcode number of the comparison. - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); int CompareOpc; bool isDot; if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) @@ -5590,7 +5589,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Create a stack slot that is 16-byte aligned. MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); @@ -5607,7 +5606,7 @@ SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, } SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (Op.getValueType() == MVT::v4i32) { SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); @@ -5694,7 +5693,7 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::FP_TO_UINT: case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, - Op.getDebugLoc()); + SDLoc(Op)); case ISD::UINT_TO_FP: case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); @@ -5724,7 +5723,7 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, SelectionDAG &DAG) const { const TargetMachine &TM = getTargetMachine(); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); switch (N->getOpcode()) { default: llvm_unreachable("Do not know how to custom type legalize this operation!"); @@ -6675,7 +6674,7 @@ SDValue PPCTargetLowering::DAGCombineFastRecip(SDValue Op, ++Iterations; SelectionDAG &DAG = DCI.DAG; - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue FPOne = DAG.getConstantFP(1.0, VT.getScalarType()); @@ -6737,7 +6736,7 @@ SDValue PPCTargetLowering::DAGCombineFastRecipFSQRT(SDValue Op, ++Iterations; SelectionDAG &DAG = DCI.DAG; - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue FPThreeHalves = DAG.getConstantFP(1.5, VT.getScalarType()); @@ -6785,7 +6784,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { const TargetMachine &TM = getTargetMachine(); SelectionDAG &DAG = DCI.DAG; - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); switch (N->getOpcode()) { default: break; case PPCISD::SHL: @@ -6826,7 +6825,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, DCI); if (RV.getNode() != 0) { DCI.AddToWorklist(RV.getNode()); - RV = DAG.getNode(ISD::FP_EXTEND, N->getOperand(1).getDebugLoc(), + RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N->getOperand(1)), N->getValueType(0), RV); DCI.AddToWorklist(RV.getNode()); return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), @@ -6839,7 +6838,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, DCI); if (RV.getNode() != 0) { DCI.AddToWorklist(RV.getNode()); - RV = DAG.getNode(ISD::FP_ROUND, N->getOperand(1).getDebugLoc(), + RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N->getOperand(1)), N->getValueType(0), RV, N->getOperand(1).getOperand(1)); DCI.AddToWorklist(RV.getNode()); @@ -7521,7 +7520,7 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, MachineFrameInfo *MFI = MF.getFrameInfo(); MFI->setReturnAddressIsTaken(true); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); // Make sure the function does not optimize away the store of the RA to @@ -7551,7 +7550,7 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index a870517..5c5612b 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -484,7 +484,7 @@ namespace llvm { SDValue &LROpOut, SDValue &FPOpOut, bool isDarwinABI, - DebugLoc dl) const; + SDLoc dl) const; SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; @@ -505,7 +505,7 @@ namespace llvm { SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget) const; SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, DebugLoc dl) const; + SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, SDLoc dl) const; SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const; @@ -520,9 +520,9 @@ namespace llvm { SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; - SDValue FinishCall(CallingConv::ID CallConv, DebugLoc dl, bool isTailCall, + SDValue FinishCall(CallingConv::ID CallConv, SDLoc dl, bool isTailCall, bool isVarArg, SelectionDAG &DAG, SmallVector<std::pair<unsigned, SDValue>, 8> @@ -537,7 +537,7 @@ namespace llvm { LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; virtual SDValue @@ -555,11 +555,11 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const; + SDLoc dl, SelectionDAG &DAG) const; SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, SelectionDAG &DAG, - SDValue ArgVal, DebugLoc dl) const; + SDValue ArgVal, SDLoc dl) const; void setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG, @@ -570,25 +570,25 @@ namespace llvm { LowerFormalArguments_Darwin(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerFormalArguments_64SVR4(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerFormalArguments_32SVR4(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, - SelectionDAG &DAG, DebugLoc dl) const; + SelectionDAG &DAG, SDLoc dl) const; SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee, @@ -597,7 +597,7 @@ namespace llvm { const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee, @@ -606,7 +606,7 @@ namespace llvm { const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, @@ -614,7 +614,7 @@ namespace llvm { const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp index 128454c..4a6f4cc 100644 --- a/lib/Target/R600/AMDGPUISelLowering.cpp +++ b/lib/Target/R600/AMDGPUISelLowering.cpp @@ -86,7 +86,7 @@ SDValue AMDGPUTargetLowering::LowerReturn( bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc DL, SelectionDAG &DAG) const { + SDLoc DL, SelectionDAG &DAG) const { return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain); } @@ -117,7 +117,7 @@ SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = Op.getValueType(); switch (IntrinsicID) { @@ -157,7 +157,7 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = Op.getValueType(); SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT), Op.getOperand(1)); @@ -169,7 +169,7 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op, /// LRP(a, b, c) = muladd(a, b, (1 - a) * c) SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = Op.getValueType(); SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT, DAG.getConstantFP(1.0f, MVT::f32), @@ -184,7 +184,7 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op, /// \brief Generate Min/Max node SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = Op.getValueType(); SDValue LHS = Op.getOperand(0); @@ -245,7 +245,7 @@ SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op, SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = Op.getValueType(); SDValue Num = Op.getOperand(0); diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h index 8a4db6f..1d8c706 100644 --- a/lib/Target/R600/AMDGPUISelLowering.h +++ b/lib/Target/R600/AMDGPUISelLowering.h @@ -49,7 +49,7 @@ public: bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc DL, SelectionDAG &DAG) const; + SDLoc DL, SelectionDAG &DAG) const; virtual SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { CLI.Callee.dump(); diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp index 9b0e518..00d7c8f 100644 --- a/lib/Target/R600/AMDILISelDAGToDAG.cpp +++ b/lib/Target/R600/AMDILISelDAGToDAG.cpp @@ -211,7 +211,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { const SDValue Ops[] = { RC, N->getOperand(0), SubReg0, N->getOperand(1), SubReg1 }; return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, - N->getDebugLoc(), N->getValueType(0), Ops); + SDLoc(N), N->getValueType(0), Ops); } case ISD::ConstantFP: @@ -616,7 +616,7 @@ bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base, } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr)) && isInt<16>(IMMOffset->getZExtValue())) { Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), - CurDAG->getEntryNode().getDebugLoc(), + SDLoc(CurDAG->getEntryNode()), AMDGPU::ZERO, MVT::i32); Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32); return true; diff --git a/lib/Target/R600/AMDILISelLowering.cpp b/lib/Target/R600/AMDILISelLowering.cpp index e20dbe0..0168906 100644 --- a/lib/Target/R600/AMDILISelLowering.cpp +++ b/lib/Target/R600/AMDILISelLowering.cpp @@ -329,7 +329,7 @@ SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const { SDValue Data = Op.getOperand(0); VTSDNode *BaseType = cast<VTSDNode>(Op.getOperand(1)); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT DVT = Data.getValueType(); EVT BVT = BaseType->getVT(); unsigned baseBits = BVT.getScalarType().getSizeInBits(); @@ -385,7 +385,7 @@ AMDGPUTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { SDValue Result; Result = DAG.getNode( AMDGPUISD::BRANCH_COND, - Op.getDebugLoc(), + SDLoc(Op), Op.getValueType(), Chain, Jump, Cond); return Result; @@ -393,7 +393,7 @@ AMDGPUTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { SDValue AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT OVT = Op.getValueType(); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); @@ -474,7 +474,7 @@ AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const { SDValue AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT OVT = Op.getValueType(); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); @@ -545,7 +545,7 @@ AMDGPUTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const { SDValue AMDGPUTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT OVT = Op.getValueType(); MVT INTTY = MVT::i32; if (OVT == MVT::v2i8) { @@ -562,7 +562,7 @@ AMDGPUTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const { SDValue AMDGPUTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT OVT = Op.getValueType(); MVT INTTY = MVT::i32; if (OVT == MVT::v2i16) { @@ -579,7 +579,7 @@ AMDGPUTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const { SDValue AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT OVT = Op.getValueType(); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp index 93231db..de6c504 100644 --- a/lib/Target/R600/R600ISelLowering.cpp +++ b/lib/Target/R600/R600ISelLowering.cpp @@ -491,7 +491,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex); MFI->LiveOuts.push_back(Reg); - return DAG.getCopyToReg(Chain, Op.getDebugLoc(), Reg, Op.getOperand(2)); + return DAG.getCopyToReg(Chain, SDLoc(Op), Reg, Op.getOperand(2)); } case AMDGPUIntrinsic::R600_store_swizzle: { const SDValue Args[8] = { @@ -504,7 +504,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const DAG.getConstant(2, MVT::i32), // SWZ_Z DAG.getConstant(3, MVT::i32) // SWZ_W }; - return DAG.getNode(AMDGPUISD::EXPORT, Op.getDebugLoc(), Op.getValueType(), + return DAG.getNode(AMDGPUISD::EXPORT, SDLoc(Op), Op.getValueType(), Args, 8); } @@ -518,7 +518,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); EVT VT = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); switch(IntrinsicID) { default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); case AMDGPUIntrinsic::R600_load_input: { @@ -528,7 +528,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const MachineRegisterInfo &MRI = MF.getRegInfo(); MRI.addLiveIn(Reg); return DAG.getCopyFromReg(DAG.getEntryNode(), - DAG.getEntryNode().getDebugLoc(), Reg, VT); + SDLoc(DAG.getEntryNode()), Reg, VT); } case AMDGPUIntrinsic::R600_interp_input: { @@ -722,7 +722,7 @@ void R600TargetLowering::ReplaceNodeResults(SDNode *N, SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const { return DAG.getNode( ISD::SETCC, - Op.getDebugLoc(), + SDLoc(Op), MVT::i1, Op, DAG.getConstantFP(0.0f, MVT::f32), DAG.getCondCode(ISD::SETNE) @@ -730,7 +730,7 @@ SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const { } SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT, - DebugLoc DL, + SDLoc DL, unsigned DwordOffset) const { unsigned ByteOffset = DwordOffset * 4; PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()), @@ -770,7 +770,7 @@ bool R600TargetLowering::isZero(SDValue Op) const { } SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = Op.getValueType(); SDValue LHS = Op.getOperand(0); @@ -891,7 +891,7 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const SDValue R600TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { return DAG.getNode(ISD::SELECT_CC, - Op.getDebugLoc(), + SDLoc(Op), Op.getValueType(), Op.getOperand(0), DAG.getConstant(0, MVT::i32), @@ -922,7 +922,7 @@ SDValue R600TargetLowering::stackPtrToRegIndex(SDValue Ptr, default: llvm_unreachable("Invalid stack width"); } - return DAG.getNode(ISD::SRL, Ptr.getDebugLoc(), Ptr.getValueType(), Ptr, + return DAG.getNode(ISD::SRL, SDLoc(Ptr), Ptr.getValueType(), Ptr, DAG.getConstant(SRLPad, MVT::i32)); } @@ -956,7 +956,7 @@ void R600TargetLowering::getStackAddress(unsigned StackWidth, } SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); StoreSDNode *StoreNode = cast<StoreSDNode>(Op); SDValue Chain = Op.getOperand(0); SDValue Value = Op.getOperand(1); @@ -1068,7 +1068,7 @@ ConstantAddressBlock(unsigned AddressSpace) { SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); LoadSDNode *LoadNode = cast<LoadSDNode>(Op); SDValue Chain = Op.getOperand(0); SDValue Ptr = Op.getOperand(1); @@ -1170,7 +1170,7 @@ SDValue R600TargetLowering::LowerFormalArguments( CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, SelectionDAG &DAG, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { unsigned ParamOffsetBytes = 36; Function::const_arg_iterator FuncArg = @@ -1219,7 +1219,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, case ISD::FP_ROUND: { SDValue Arg = N->getOperand(0); if (Arg.getOpcode() == ISD::UINT_TO_FP && Arg.getValueType() == MVT::f64) { - return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), N->getValueType(0), + return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), N->getValueType(0), Arg.getOperand(0)); } break; @@ -1244,7 +1244,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, return SDValue(); } - return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), N->getValueType(0), + return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N->getValueType(0), SelectCC.getOperand(0), // LHS SelectCC.getOperand(1), // RHS DAG.getConstant(-1, MVT::i32), // True @@ -1267,7 +1267,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) { unsigned Element = Const->getZExtValue(); - return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), N->getVTList(), + return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getVTList(), Arg->getOperand(0).getOperand(Element)); } } @@ -1302,7 +1302,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, ISD::CondCode LHSCC = cast<CondCodeSDNode>(LHS.getOperand(4))->get(); LHSCC = ISD::getSetCCInverse(LHSCC, LHS.getOperand(0).getValueType().isInteger()); - return DAG.getSelectCC(N->getDebugLoc(), + return DAG.getSelectCC(SDLoc(N), LHS.getOperand(0), LHS.getOperand(1), LHS.getOperand(2), @@ -1344,7 +1344,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, NewBldVec[i] = Arg.getOperand(i); } } - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); NewArgs[1] = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4f32, NewBldVec, 4); return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs, 8); } diff --git a/lib/Target/R600/R600ISelLowering.h b/lib/Target/R600/R600ISelLowering.h index 2b8f2fa..663aab4 100644 --- a/lib/Target/R600/R600ISelLowering.h +++ b/lib/Target/R600/R600ISelLowering.h @@ -36,7 +36,7 @@ public: CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, SelectionDAG &DAG, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; virtual EVT getSetCCResultType(LLVMContext &, EVT VT) const; private: @@ -47,7 +47,7 @@ private: /// lowered to load instructions which retreive the values from the Vertex /// Buffer. SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT, - DebugLoc DL, unsigned DwordOffset) const; + SDLoc DL, unsigned DwordOffset) const; void lowerImplicitParameter(MachineInstr *MI, MachineBasicBlock &BB, MachineRegisterInfo & MRI, unsigned dword_offset) const; diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp index 6bcc384..c392c7b 100644 --- a/lib/Target/R600/SIISelLowering.cpp +++ b/lib/Target/R600/SIISelLowering.cpp @@ -87,7 +87,7 @@ SDValue SITargetLowering::LowerFormalArguments( CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, SelectionDAG &DAG, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); @@ -265,7 +265,7 @@ static SDNode *findUser(SDValue Value, unsigned Opcode) { SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, SelectionDAG &DAG) const { - DebugLoc DL = BRCOND.getDebugLoc(); + SDLoc DL(BRCOND); SDNode *Intr = BRCOND.getOperand(1).getNode(); SDValue Target = BRCOND.getOperand(2); @@ -345,7 +345,7 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); SDValue Value = Op.getOperand(1); SDValue VirtualAddress = Op.getOperand(2); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); if (StoreNode->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS) { return SDValue(); @@ -371,7 +371,7 @@ SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { SDValue False = Op.getOperand(3); SDValue CC = Op.getOperand(4); EVT VT = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); // Possible Min/Max pattern SDValue MinMax = LowerMinMax(Op, DAG); @@ -390,7 +390,7 @@ SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { SDValue SITargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); EVT VT = N->getValueType(0); switch (N->getOpcode()) { @@ -564,7 +564,7 @@ void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand, // This is a conservative aproach, it is possible that we can't determine // the correct register class and copy too often, but better save than sorry. SDValue RC = DAG.getTargetConstant(RegClass, MVT::i32); - SDNode *Node = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DebugLoc(), + SDNode *Node = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, SDLoc(), Operand.getValueType(), Operand, RC); Operand = SDValue(Node, 0); } @@ -701,8 +701,7 @@ SDNode *SITargetLowering::foldOperands(MachineSDNode *Node, Ops.push_back(Node->getOperand(i)); // Create a complete new instruction - return DAG.getMachineNode(Desc->Opcode, Node->getDebugLoc(), - Node->getVTList(), Ops); + return DAG.getMachineNode(Desc->Opcode, SDLoc(Node), Node->getVTList(), Ops); } /// \brief Helper function for adjustWritemask @@ -756,7 +755,7 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node, if (Writemask == (1U << Lane)) { SDValue RC = DAG.getTargetConstant(AMDGPU::VReg_32RegClassID, MVT::i32); SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, - DebugLoc(), Users[Lane]->getValueType(0), + SDLoc(), Users[Lane]->getValueType(0), SDValue(Node, 0), RC); DAG.ReplaceAllUsesWith(Users[Lane], Copy); return; diff --git a/lib/Target/R600/SIISelLowering.h b/lib/Target/R600/SIISelLowering.h index 62dfeda..ec01052 100644 --- a/lib/Target/R600/SIISelLowering.h +++ b/lib/Target/R600/SIISelLowering.h @@ -44,7 +44,7 @@ public: SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, SelectionDAG &DAG, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr * MI, diff --git a/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/lib/Target/Sparc/SparcISelDAGToDAG.cpp index a709685..52ec45e 100644 --- a/lib/Target/Sparc/SparcISelDAGToDAG.cpp +++ b/lib/Target/Sparc/SparcISelDAGToDAG.cpp @@ -136,7 +136,7 @@ bool SparcDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) { } SDNode *SparcDAGToDAGISel::Select(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); if (N->isMachineOpcode()) return NULL; // Already selected. diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 695cb51..8eb8702 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -164,7 +164,7 @@ SparcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc DL, SelectionDAG &DAG) const { + SDLoc DL, SelectionDAG &DAG) const { if (Subtarget->is64Bit()) return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); @@ -175,7 +175,7 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc DL, SelectionDAG &DAG) const { + SDLoc DL, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); // CCValAssign - represent the assignment of the return value to locations. @@ -238,7 +238,7 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc DL, SelectionDAG &DAG) const { + SDLoc DL, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of the return value to locations. SmallVector<CCValAssign, 16> RVLocs; @@ -314,7 +314,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { if (Subtarget->is64Bit()) @@ -332,7 +332,7 @@ LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); @@ -532,7 +532,7 @@ LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); @@ -653,7 +653,7 @@ SDValue SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &dl = CLI.DL; + SDLoc &dl = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; SmallVector<SDValue, 32> &OutVals = CLI.OutVals; SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; @@ -979,7 +979,7 @@ SDValue SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc DL = CLI.DL; + SDLoc DL = CLI.DL; SDValue Chain = CLI.Chain; // Analyze operands of the call, assigning locations to each operand. @@ -1448,7 +1448,7 @@ SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const { if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) return DAG.getTargetGlobalAddress(GA->getGlobal(), - GA->getDebugLoc(), + SDLoc(GA), GA->getValueType(0), GA->getOffset(), TF); @@ -1470,7 +1470,7 @@ SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF, SDValue SparcTargetLowering::makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = Op.getValueType(); SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG)); SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG)); @@ -1480,7 +1480,7 @@ SDValue SparcTargetLowering::makeHiLoPair(SDValue Op, // Build SDNodes for producing an address from a GlobalAddress, ConstantPool, // or ExternalSymbol SDNode. SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = getPointerTy(); // Handle PIC mode first. @@ -1529,7 +1529,7 @@ SDValue SparcTargetLowering::LowerConstantPool(SDValue Op, } static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Convert the fp value to integer in an FP register. assert(Op.getValueType() == MVT::i32); Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0)); @@ -1537,7 +1537,7 @@ static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) { } static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); assert(Op.getOperand(0).getValueType() == MVT::i32); SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); // Convert the int value to FP in an FP register. @@ -1550,7 +1550,7 @@ static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) { SDValue LHS = Op.getOperand(2); SDValue RHS = Op.getOperand(3); SDValue Dest = Op.getOperand(4); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned Opc, SPCC = ~0U; // If this is a br_cc of a "setcc", and if the setcc got lowered into @@ -1581,7 +1581,7 @@ static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) { ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); SDValue TrueVal = Op.getOperand(2); SDValue FalseVal = Op.getOperand(3); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned Opc, SPCC = ~0U; // If this is a select_cc of a "setcc", and if the setcc got lowered into @@ -1613,7 +1613,7 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue Offset = DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(), DAG.getRegister(SP::I6, TLI.getPointerTy()), @@ -1630,7 +1630,7 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) { SDValue VAListPtr = Node->getOperand(1); EVT PtrVT = VAListPtr.getValueType(); const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); - DebugLoc DL = Node->getDebugLoc(); + SDLoc DL(Node); SDValue VAList = DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV), false, false, false, 0); // Increment the pointer, VAList, to the next vaarg. @@ -1649,7 +1649,7 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) { static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) { SDValue Chain = Op.getOperand(0); // Legalize the chain. SDValue Size = Op.getOperand(1); // Legalize the size. - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned SPReg = SP::O6; SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32); @@ -1666,7 +1666,7 @@ static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) { static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue Chain = DAG.getNode(SPISD::FLUSHW, dl, MVT::Other, DAG.getEntryNode()); return Chain; @@ -1677,7 +1677,7 @@ static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { MFI->setFrameAddressIsTaken(true); EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned FrameReg = SP::I6; uint64_t depth = Op.getConstantOperandVal(0); @@ -1708,7 +1708,7 @@ static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) { MFI->setReturnAddressIsTaken(true); EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned RetReg = SP::I7; uint64_t depth = Op.getConstantOperandVal(0); diff --git a/lib/Target/Sparc/SparcISelLowering.h b/lib/Target/Sparc/SparcISelLowering.h index fd706be..0a75069 100644 --- a/lib/Target/Sparc/SparcISelLowering.h +++ b/lib/Target/Sparc/SparcISelLowering.h @@ -78,19 +78,19 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; virtual SDValue @@ -106,17 +106,17 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const; + SDLoc dl, SelectionDAG &DAG) const; SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc DL, SelectionDAG &DAG) const; + SDLoc DL, SelectionDAG &DAG) const; SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc DL, SelectionDAG &DAG) const; + SDLoc DL, SelectionDAG &DAG) const; SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp index d436ba9..f10ba23 100644 --- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -474,7 +474,7 @@ void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM, // Truncate values from i64 to i32, for shifts. assert(VT == MVT::i32 && Base.getValueType() == MVT::i64 && "Unexpected truncation"); - DebugLoc DL = Base.getDebugLoc(); + SDLoc DL(Base); SDValue Trunc = CurDAG->getNode(ISD::TRUNCATE, DL, VT, Base); insertDAGNode(CurDAG, Base.getNode(), Trunc); Base = Trunc; @@ -522,7 +522,7 @@ SDNode *SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0, uint64_t UpperVal, uint64_t LowerVal) { EVT VT = Node->getValueType(0); - DebugLoc DL = Node->getDebugLoc(); + SDLoc DL(Node); SDValue Upper = CurDAG->getConstant(UpperVal, VT); if (Op0.getNode()) Upper = CurDAG->getNode(Opcode, DL, VT, Op0, Upper); diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 4e3c1a6..f575911 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -436,7 +436,7 @@ LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, // Value is a value that has been passed to us in the location described by VA // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining // any loads onto Chain. -static SDValue convertLocVTToValVT(SelectionDAG &DAG, DebugLoc DL, +static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL, CCValAssign &VA, SDValue Chain, SDValue Value) { // If the argument has been promoted from a smaller type, insert an @@ -461,7 +461,7 @@ static SDValue convertLocVTToValVT(SelectionDAG &DAG, DebugLoc DL, // Value is a value of type VA.getValVT() that we need to copy into // the location described by VA. Return a copy of Value converted to // VA.getValVT(). The caller is responsible for handling indirect values. -static SDValue convertValVTToLocVT(SelectionDAG &DAG, DebugLoc DL, +static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL, CCValAssign &VA, SDValue Value) { switch (VA.getLocInfo()) { case CCValAssign::SExt: @@ -480,7 +480,7 @@ static SDValue convertValVTToLocVT(SelectionDAG &DAG, DebugLoc DL, SDValue SystemZTargetLowering:: LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, SelectionDAG &DAG, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); @@ -599,7 +599,7 @@ SDValue SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &DL = CLI.DL; + SDLoc &DL = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; SmallVector<SDValue, 32> &OutVals = CLI.OutVals; SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; @@ -745,7 +745,7 @@ SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc DL, SelectionDAG &DAG) const { + SDLoc DL, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); // Assign locations to each returned value. @@ -879,7 +879,7 @@ static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned, ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD; if (CmpOp0.getValueType() != MVT::i32 || Load->getExtensionType() != ExtType) - CmpOp0 = DAG.getExtLoad(ExtType, Load->getDebugLoc(), MVT::i32, + CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(), Load->getBasePtr(), Load->getPointerInfo(), Load->getMemoryVT(), Load->isVolatile(), Load->isNonTemporal(), @@ -949,7 +949,7 @@ static SDValue emitCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, IsUnsigned = true; } - DebugLoc DL = CmpOp0.getDebugLoc(); + SDLoc DL(CmpOp0); return DAG.getNode((IsUnsigned ? SystemZISD::UCMP : SystemZISD::CMP), DL, MVT::Glue, CmpOp0, CmpOp1); } @@ -959,7 +959,7 @@ static SDValue emitCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation // on the extended Op0 and (unextended) Op1. Store the even register result // in Even and the odd register result in Odd. -static void lowerGR128Binary(SelectionDAG &DAG, DebugLoc DL, EVT VT, +static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT, unsigned Extend, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd) { @@ -983,7 +983,7 @@ SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { SDValue CmpOp0 = Op.getOperand(2); SDValue CmpOp1 = Op.getOperand(3); SDValue Dest = Op.getOperand(4); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); unsigned CCMask; SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask); @@ -998,7 +998,7 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, SDValue TrueOp = Op.getOperand(2); SDValue FalseOp = Op.getOperand(3); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); unsigned CCMask; SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask); @@ -1015,7 +1015,7 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, SelectionDAG &DAG) const { - DebugLoc DL = Node->getDebugLoc(); + SDLoc DL(Node); const GlobalValue *GV = Node->getGlobal(); int64_t Offset = Node->getOffset(); EVT PtrVT = getPointerTy(); @@ -1054,7 +1054,7 @@ SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, SelectionDAG &DAG) const { - DebugLoc DL = Node->getDebugLoc(); + SDLoc DL(Node); const GlobalValue *GV = Node->getGlobal(); EVT PtrVT = getPointerTy(); TLSModel::Model model = TM.getTLSModel(GV); @@ -1093,7 +1093,7 @@ SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, SelectionDAG &DAG) const { - DebugLoc DL = Node->getDebugLoc(); + SDLoc DL(Node); const BlockAddress *BA = Node->getBlockAddress(); int64_t Offset = Node->getOffset(); EVT PtrVT = getPointerTy(); @@ -1105,7 +1105,7 @@ SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, SelectionDAG &DAG) const { - DebugLoc DL = JT->getDebugLoc(); + SDLoc DL(JT); EVT PtrVT = getPointerTy(); SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); @@ -1115,7 +1115,7 @@ SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const { - DebugLoc DL = CP->getDebugLoc(); + SDLoc DL(CP); EVT PtrVT = getPointerTy(); SDValue Result; @@ -1132,7 +1132,7 @@ SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue In = Op.getOperand(0); EVT InVT = In.getValueType(); EVT ResVT = Op.getValueType(); @@ -1169,7 +1169,7 @@ SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, SDValue Chain = Op.getOperand(0); SDValue Addr = Op.getOperand(1); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); // The initial values of each field. const unsigned NumFields = 4; @@ -1203,7 +1203,7 @@ SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, SDValue SrcPtr = Op.getOperand(2); const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32), /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, @@ -1214,7 +1214,7 @@ SDValue SystemZTargetLowering:: lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); SDValue Size = Op.getOperand(1); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); unsigned SPReg = getStackPointerRegisterToSaveRestore(); @@ -1240,7 +1240,7 @@ lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); assert(!is32Bit(VT) && "Only support 64-bit UMUL_LOHI"); // UMUL_LOHI64 returns the low result in the odd register and the high @@ -1257,7 +1257,7 @@ SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, SDValue Op0 = Op.getOperand(0); SDValue Op1 = Op.getOperand(1); EVT VT = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); // We use DSGF for 32-bit division. if (is32Bit(VT)) { @@ -1277,7 +1277,7 @@ SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); // DL(G) uses a double-width dividend, so we need to clear the even // register in the GR128 input. The instruction returns the remainder @@ -1342,7 +1342,7 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { // low 32 bits by truncating Low to an i32 and inserting it directly // using a subreg. The interesting cases are those where the truncation // can be folded. - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64); SDNode *Result = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, @@ -1368,7 +1368,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, SDValue Addr = Node->getBasePtr(); SDValue Src2 = Node->getVal(); MachineMemOperand *MMO = Node->getMemOperand(); - DebugLoc DL = Node->getDebugLoc(); + SDLoc DL(Node); EVT PtrVT = Addr.getValueType(); // Convert atomic subtracts of constants into additions. @@ -1442,7 +1442,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, SDValue CmpVal = Node->getOperand(2); SDValue SwapVal = Node->getOperand(3); MachineMemOperand *MMO = Node->getMemOperand(); - DebugLoc DL = Node->getDebugLoc(); + SDLoc DL(Node); EVT PtrVT = Addr.getValueType(); // Get the address of the containing word. @@ -1474,7 +1474,7 @@ SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); - return DAG.getCopyFromReg(Op.getOperand(0), Op.getDebugLoc(), + return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), SystemZ::R15D, Op.getValueType()); } @@ -1482,7 +1482,7 @@ SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); - return DAG.getCopyToReg(Op.getOperand(0), Op.getDebugLoc(), + return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), SystemZ::R15D, Op.getOperand(1)); } diff --git a/lib/Target/SystemZ/SystemZISelLowering.h b/lib/Target/SystemZ/SystemZISelLowering.h index 66e8f7c..e408bd2 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.h +++ b/lib/Target/SystemZ/SystemZISelLowering.h @@ -147,7 +147,7 @@ public: LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, SelectionDAG &DAG, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const LLVM_OVERRIDE; virtual SDValue LowerCall(CallLoweringInfo &CLI, @@ -158,7 +158,7 @@ public: CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc DL, SelectionDAG &DAG) const LLVM_OVERRIDE; + SDLoc DL, SelectionDAG &DAG) const LLVM_OVERRIDE; private: const SystemZSubtarget &Subtarget; diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index 14bb160..0a29901 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -236,7 +236,7 @@ namespace { // These are 32-bit even in 64-bit mode since RIP relative offset // is 32-bit. if (AM.GV) - Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(), + Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(), MVT::i32, AM.Disp, AM.SymbolFlags); else if (AM.CP) @@ -373,7 +373,7 @@ static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, else Ops.push_back(Chain.getOperand(i)); SDValue NewChain = - CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(), + CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, &Ops[0], Ops.size()); Ops.clear(); Ops.push_back(NewChain); @@ -524,7 +524,7 @@ void X86DAGToDAGISel::PreprocessISelDAG() { MemVT = SrcIsSSE ? SrcVT : DstVT; SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); // FIXME: optimize the case where the src/dest is a load or store? SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, @@ -782,7 +782,7 @@ static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, return true; EVT VT = N.getValueType(); - DebugLoc DL = N.getDebugLoc(); + SDLoc DL(N); SDValue Eight = DAG.getConstant(8, MVT::i8); SDValue NewMask = DAG.getConstant(0xff, VT); SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight); @@ -830,7 +830,7 @@ static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, return true; EVT VT = N.getValueType(); - DebugLoc DL = N.getDebugLoc(); + SDLoc DL(N); SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT); SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask); SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1)); @@ -932,11 +932,11 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, if (ReplacingAnyExtend) { assert(X.getValueType() != VT); // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND. - SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X); + SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X); InsertDAGNode(DAG, N, NewX); X = NewX; } - DebugLoc DL = N.getDebugLoc(); + SDLoc DL(N); SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8); SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt); SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8); @@ -960,7 +960,7 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, unsigned Depth) { - DebugLoc dl = N.getDebugLoc(); + SDLoc dl(N); DEBUG({ dbgs() << "MatchAddress: "; AM.dump(); @@ -1502,7 +1502,7 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain}; - SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), + SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), MVT::i32, MVT::i32, MVT::Other, Ops); cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); return ResNode; @@ -1637,7 +1637,7 @@ static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = { // + empty, the operand is not needed any more with the new op selected. // + non-empty, otherwise. static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG, - DebugLoc dl, + SDLoc dl, enum AtomicOpc &Op, EVT NVT, SDValue Val) { if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) { @@ -1689,7 +1689,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) { if (Node->hasAnyUseOfValue(0)) return 0; - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); // Optimize common patterns for __sync_or_and_fetch and similar arith // operations where the result is not used. This allows us to use the "lock" @@ -1920,7 +1920,7 @@ static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc, if (ChainCheck) // Make a new TokenFactor with all the other input chains except // for the load. - InputChain = CurDAG->getNode(ISD::TokenFactor, Chain.getDebugLoc(), + InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, &ChainOps[0], ChainOps.size()); } if (!ChainCheck) @@ -1968,7 +1968,7 @@ SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) { SDValue Segment = CurDAG->getRegister(0, MVT::i32); const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx, Disp, Segment, VMask, Chain}; - SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), VTs, Ops); + SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), VTs, Ops); // Node has 2 outputs: VDst and MVT::Other. // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other. // We replace VDst of Node with VDst of ResNode, and Other of Node with Other @@ -1982,7 +1982,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { EVT NVT = Node->getValueType(0); unsigned Opc, MOpc; unsigned Opcode = Node->getOpcode(); - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n'); @@ -2666,7 +2666,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { EVT LdVT = LoadNode->getMemoryVT(); unsigned newOpc = getFusedLdStOpcode(LdVT, Opc); MachineSDNode *Result = CurDAG->getMachineNode(newOpc, - Node->getDebugLoc(), + SDLoc(Node), MVT::i32, MVT::Other, Ops); Result->setMemRefs(MemOp, MemOp + 2); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 6d53a8f..3694aeb 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -55,7 +55,7 @@ using namespace llvm; STATISTIC(NumTailCalls, "Number of tail calls"); // Forward declarations. -static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, +static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, SDValue V2); /// Generate a DAG to grab 128-bits from a vector > 128 bits. This @@ -64,7 +64,7 @@ static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, /// want. It need not be aligned to a 128-bit bounday. That makes /// lowering EXTRACT_VECTOR_ELT operations easier. static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, - SelectionDAG &DAG, DebugLoc dl) { + SelectionDAG &DAG, SDLoc dl) { EVT VT = Vec.getValueType(); assert(VT.is256BitVector() && "Unexpected vector size!"); EVT ElVT = VT.getVectorElementType(); @@ -104,7 +104,7 @@ static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, /// lowering INSERT_VECTOR_ELT operations easier. static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, - DebugLoc dl) { + SDLoc dl) { // Inserting UNDEF is Result if (Vec.getOpcode() == ISD::UNDEF) return Result; @@ -134,7 +134,7 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, /// large BUILD_VECTORS. static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT, unsigned NumElems, SelectionDAG &DAG, - DebugLoc dl) { + SDLoc dl) { SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); return Insert128BitVector(V, V2, NumElems/2, DAG, dl); } @@ -1504,9 +1504,9 @@ X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const { if (!Subtarget->is64Bit()) - // This doesn't have DebugLoc associated with it, but is not really the + // This doesn't have SDLoc associated with it, but is not really the // same as a Register. - return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); + return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy()); return Table; } @@ -1593,7 +1593,7 @@ X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const { + SDLoc dl, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); @@ -1761,7 +1761,7 @@ SDValue X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // Assign locations to each value returned by this call. @@ -1868,7 +1868,7 @@ argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, - DebugLoc dl) { + SDLoc dl) { SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), @@ -1906,7 +1906,7 @@ SDValue X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, const CCValAssign &VA, MachineFrameInfo *MFI, unsigned i) const { @@ -1948,7 +1948,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { @@ -2225,7 +2225,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const { unsigned LocMemOffset = VA.getLocMemOffset(); @@ -2245,7 +2245,7 @@ SDValue X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall, bool Is64Bit, - int FPDiff, DebugLoc dl) const { + int FPDiff, SDLoc dl) const { // Adjust the Return address stack slot. EVT VT = getPointerTy(); OutRetAddr = getReturnAddressFrameIndex(DAG); @@ -2261,7 +2261,7 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, static SDValue EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, - unsigned SlotSize, int FPDiff, DebugLoc dl) { + unsigned SlotSize, int FPDiff, SDLoc dl) { // Store the return address to the appropriate stack slot. if (!FPDiff) return Chain; // Calculate the new stack slot for the return address. @@ -2278,7 +2278,7 @@ SDValue X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &dl = CLI.DL; + SDLoc &dl = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; SmallVector<SDValue, 32> &OutVals = CLI.OutVals; SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; @@ -2443,7 +2443,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // GOT pointer. if (!isTailCall) { RegsToPass.push_back(std::make_pair(unsigned(X86::EBX), - DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()))); + DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy()))); } else { // If we are tail calling and generating PIC/GOT style code load the // address of the callee into ECX. The value in ecx is used as target of @@ -3062,7 +3062,7 @@ static bool isTargetShuffle(unsigned Opcode) { } } -static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, +static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, SDValue V1, SelectionDAG &DAG) { switch(Opc) { default: llvm_unreachable("Unknown x86 shuffle node"); @@ -3073,7 +3073,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, } } -static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, +static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, SDValue V1, unsigned TargetMask, SelectionDAG &DAG) { switch(Opc) { @@ -3087,7 +3087,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, } } -static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, +static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, SDValue V1, SDValue V2, unsigned TargetMask, SelectionDAG &DAG) { switch(Opc) { @@ -3100,7 +3100,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, } } -static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, +static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG) { switch(Opc) { default: llvm_unreachable("Unknown x86 shuffle node"); @@ -3628,7 +3628,7 @@ static SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { MVT VT = SVOp->getValueType(0).getSimpleVT(); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); if (VT != MVT::v8i32 && VT != MVT::v8f32) return SDValue(); @@ -4263,7 +4263,7 @@ static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, } MaskVec.push_back(Idx); } - return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), + return DAG.getVectorShuffle(VT, SDLoc(SVOp), SVOp->getOperand(1), SVOp->getOperand(0), &MaskVec[0]); } @@ -4396,7 +4396,7 @@ static bool isZeroShuffle(ShuffleVectorSDNode *N) { /// getZeroVector - Returns a vector of specified type with all zero elements. /// static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, - SelectionDAG &DAG, DebugLoc dl) { + SelectionDAG &DAG, SDLoc dl) { assert(VT.isVector() && "Expected a vector type"); // Always build SSE zero vectors as <4 x i32> bitcasted @@ -4435,7 +4435,7 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately. /// Then bitcast to their original type, ensuring they get CSE'd. static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG, - DebugLoc dl) { + SDLoc dl) { assert(VT.isVector() && "Expected a vector type"); SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); @@ -4469,7 +4469,7 @@ static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) { /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd /// operation of specified width. -static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, +static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, SDValue V2) { unsigned NumElems = VT.getVectorNumElements(); SmallVector<int, 8> Mask; @@ -4480,7 +4480,7 @@ static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, } /// getUnpackl - Returns a vector_shuffle node for an unpackl operation. -static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, +static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, SDValue V2) { unsigned NumElems = VT.getVectorNumElements(); SmallVector<int, 8> Mask; @@ -4492,7 +4492,7 @@ static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, } /// getUnpackh - Returns a vector_shuffle node for an unpackh operation. -static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, +static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, SDValue V2) { unsigned NumElems = VT.getVectorNumElements(); SmallVector<int, 8> Mask; @@ -4510,7 +4510,7 @@ static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { EVT VT = V.getValueType(); int NumElems = VT.getVectorNumElements(); - DebugLoc dl = V.getDebugLoc(); + SDLoc dl(V); while (NumElems > 4) { if (EltNo < NumElems/2) { @@ -4527,7 +4527,7 @@ static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { /// getLegalSplat - Generate a legal splat with supported x86 shuffles static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { EVT VT = V.getValueType(); - DebugLoc dl = V.getDebugLoc(); + SDLoc dl(V); if (VT.is128BitVector()) { V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); @@ -4554,7 +4554,7 @@ static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { EVT SrcVT = SV->getValueType(0); SDValue V1 = SV->getOperand(0); - DebugLoc dl = SV->getDebugLoc(); + SDLoc dl(SV); int EltNo = SV->getSplatIndex(); int NumElems = SrcVT.getVectorNumElements(); @@ -4599,13 +4599,13 @@ static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, SelectionDAG &DAG) { EVT VT = V2.getValueType(); SDValue V1 = IsZero - ? getZeroVector(VT, Subtarget, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT); + ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT); unsigned NumElems = VT.getVectorNumElements(); SmallVector<int, 16> MaskVec; for (unsigned i = 0; i != NumElems; ++i) // If this is the insertion idx, put the low elt of V2 here. MaskVec.push_back(i == Idx ? NumElems : i); - return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); + return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]); } /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the @@ -4903,7 +4903,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, if (NumNonZero > 8) return SDValue(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue V(0, 0); bool First = true; for (unsigned i = 0; i < 16; ++i) { @@ -4951,7 +4951,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, if (NumNonZero > 4) return SDValue(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue V(0, 0); bool First = true; for (unsigned i = 0; i < 8; ++i) { @@ -4977,7 +4977,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, /// static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits, SelectionDAG &DAG, - const TargetLowering &TLI, DebugLoc dl) { + const TargetLowering &TLI, SDLoc dl) { assert(VT.is128BitVector() && "Unknown type for VShift"); EVT ShVT = MVT::v2i64; unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ; @@ -4989,7 +4989,7 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, } SDValue -X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, +X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, SDLoc dl, SelectionDAG &DAG) const { // Check if the scalar load can be widened into a vector load. And if @@ -5042,7 +5042,7 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, return SDValue(); int64_t StartOffset = Offset & ~(RequiredAlign-1); if (StartOffset) - Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), + Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(), Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); int EltNo = (Offset - StartOffset) >> 2; @@ -5073,7 +5073,7 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, /// rather than undef via VZEXT_LOAD, but we do not detect that case today. /// There's even a handy isZeroNode for that purpose. static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, - DebugLoc &DL, SelectionDAG &DAG) { + SDLoc &DL, SelectionDAG &DAG) { EVT EltVT = VT.getVectorElementType(); unsigned NumElems = Elts.size(); @@ -5172,7 +5172,7 @@ X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const { return SDValue(); MVT VT = Op.getValueType().getSimpleVT(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); assert((VT.is128BitVector() || VT.is256BitVector()) && "Unsupported vector type for broadcast."); @@ -5299,7 +5299,7 @@ X86TargetLowering::buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const { if (!isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT)) return SDValue(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); unsigned NumElems = Op.getNumOperands(); SDValue VecIn1; @@ -5367,7 +5367,7 @@ X86TargetLowering::buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const { SDValue X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT VT = Op.getValueType().getSimpleVT(); MVT ExtVT = VT.getVectorElementType(); @@ -5700,7 +5700,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction // to create 256-bit vectors from two other 128-bit ones. static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT ResVT = Op.getValueType().getSimpleVT(); assert(ResVT.is256BitVector() && "Value type must be 256-bit wide"); @@ -5726,7 +5726,7 @@ LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, const X86Subtarget *Subtarget, SelectionDAG &DAG) { SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); MVT VT = SVOp->getValueType(0).getSimpleVT(); MVT EltVT = VT.getVectorElementType(); unsigned NumElems = VT.getVectorNumElements(); @@ -5787,7 +5787,7 @@ LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget, ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); SmallVector<int, 8> MaskVals; // Determine if more than 1 of the words in each of the low and high quadwords @@ -6042,7 +6042,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, const X86TargetLowering &TLI) { SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); ArrayRef<int> MaskVals = SVOp->getMask(); // Promote splats to a larger type which usually leads to more efficient code. @@ -6171,7 +6171,7 @@ SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp, MVT VT = SVOp->getValueType(0).getSimpleVT(); SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end()); bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; @@ -6217,7 +6217,7 @@ static SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { MVT VT = SVOp->getValueType(0).getSimpleVT(); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); unsigned NumElems = VT.getVectorNumElements(); MVT NewVT; unsigned Scale; @@ -6255,7 +6255,7 @@ SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, /// static SDValue getVZextMovL(MVT VT, EVT OpVT, SDValue SrcOp, SelectionDAG &DAG, - const X86Subtarget *Subtarget, DebugLoc dl) { + const X86Subtarget *Subtarget, SDLoc dl) { if (VT == MVT::v2f64 || VT == MVT::v4f32) { LoadSDNode *LD = NULL; if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) @@ -6300,7 +6300,7 @@ LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { unsigned NumElems = VT.getVectorNumElements(); unsigned NumLaneElems = NumElems / 2; - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); MVT EltVT = VT.getVectorElementType(); MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems); SDValue Output[2]; @@ -6406,7 +6406,7 @@ static SDValue LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); - DebugLoc dl = SVOp->getDebugLoc(); + SDLoc dl(SVOp); MVT VT = SVOp->getValueType(0).getSimpleVT(); assert(VT.is128BitVector() && "Unsupported vector size"); @@ -6557,7 +6557,7 @@ static bool MayFoldVectorLoad(SDValue V) { } static -SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { +SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) { EVT VT = Op.getValueType(); // Canonizalize to v2f64. @@ -6568,7 +6568,7 @@ SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { } static -SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, +SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) { SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); @@ -6587,7 +6587,7 @@ SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, } static -SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { +SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) { SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); EVT VT = Op.getValueType(); @@ -6603,7 +6603,7 @@ SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { } static -SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { +SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) { SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); EVT VT = Op.getValueType(); @@ -6673,7 +6673,7 @@ X86TargetLowering::LowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const { return SDValue(); ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); unsigned NumElems = VT.getVectorNumElements(); @@ -6752,7 +6752,7 @@ SDValue X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const { ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); MVT VT = Op.getValueType().getSimpleVT(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); @@ -6811,7 +6811,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); MVT VT = Op.getValueType().getSimpleVT(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned NumElems = VT.getVectorNumElements(); bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; @@ -7122,7 +7122,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType().getSimpleVT(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (!Op.getOperand(0).getValueType().getSimpleVT().is128BitVector()) return SDValue(); @@ -7194,7 +7194,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, // If this is a 256-bit vector result, first extract the 128-bit vector and // then extract the element from the 128-bit vector. if (VecVT.is256BitVector()) { - DebugLoc dl = Op.getNode()->getDebugLoc(); + SDLoc dl(Op.getNode()); unsigned NumElems = VecVT.getVectorNumElements(); SDValue Idx = Op.getOperand(1); unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); @@ -7217,7 +7217,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, } MVT VT = Op.getValueType().getSimpleVT(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // TODO: handle v16i8. if (VT.getSizeInBits() == 16) { SDValue Vec = Op.getOperand(0); @@ -7276,7 +7276,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, static SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType().getSimpleVT(); MVT EltVT = VT.getVectorElementType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue N0 = Op.getOperand(0); SDValue N1 = Op.getOperand(1); @@ -7331,7 +7331,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { MVT VT = Op.getValueType().getSimpleVT(); MVT EltVT = VT.getVectorElementType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue N0 = Op.getOperand(0); SDValue N1 = Op.getOperand(1); SDValue N2 = Op.getOperand(2); @@ -7376,7 +7376,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { LLVMContext *Context = DAG.getContext(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT OpVT = Op.getValueType().getSimpleVT(); // If this is a 256-bit vector result, first insert into a 128-bit @@ -7409,7 +7409,7 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, SelectionDAG &DAG) { if (Subtarget->hasFp256()) { - DebugLoc dl = Op.getNode()->getDebugLoc(); + SDLoc dl(Op.getNode()); SDValue Vec = Op.getNode()->getOperand(0); SDValue Idx = Op.getNode()->getOperand(1); @@ -7429,7 +7429,7 @@ static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, SelectionDAG &DAG) { if (Subtarget->hasFp256()) { - DebugLoc dl = Op.getNode()->getDebugLoc(); + SDLoc dl(Op.getNode()); SDValue Vec = Op.getNode()->getOperand(0); SDValue SubVec = Op.getNode()->getOperand(1); SDValue Idx = Op.getNode()->getOperand(2); @@ -7471,13 +7471,13 @@ X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), CP->getAlignment(), CP->getOffset(), OpFlag); - DebugLoc DL = CP->getDebugLoc(); + SDLoc DL(CP); Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); // With PIC, the address is actually $g + Offset. if (OpFlag) { Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), DAG.getNode(X86ISD::GlobalBaseReg, - DebugLoc(), getPointerTy()), + SDLoc(), getPointerTy()), Result); } @@ -7503,14 +7503,14 @@ SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), OpFlag); - DebugLoc DL = JT->getDebugLoc(); + SDLoc DL(JT); Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); // With PIC, the address is actually $g + Offset. if (OpFlag) Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), DAG.getNode(X86ISD::GlobalBaseReg, - DebugLoc(), getPointerTy()), + SDLoc(), getPointerTy()), Result); return Result; @@ -7541,7 +7541,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); // With PIC, the address is actually $g + Offset. @@ -7549,7 +7549,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { !Subtarget->is64Bit()) { Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), DAG.getNode(X86ISD::GlobalBaseReg, - DebugLoc(), getPointerTy()), + SDLoc(), getPointerTy()), Result); } @@ -7570,7 +7570,7 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { CodeModel::Model M = getTargetMachine().getCodeModel(); const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset, OpFlags); @@ -7591,7 +7591,7 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { } SDValue -X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, +X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl, int64_t Offset, SelectionDAG &DAG) const { // Create the TargetGlobalAddress node, folding in the constant // offset if it is legal. @@ -7640,7 +7640,7 @@ SDValue X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); - return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG); + return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG); } static SDValue @@ -7649,7 +7649,7 @@ GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, unsigned char OperandFlags, bool LocalDynamic = false) { MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); - DebugLoc dl = GA->getDebugLoc(); + SDLoc dl(GA); SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0), GA->getOffset(), @@ -7678,10 +7678,10 @@ static SDValue LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, const EVT PtrVT) { SDValue InFlag; - DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better + SDLoc dl(GA); // ? function entry point might be better SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, DAG.getNode(X86ISD::GlobalBaseReg, - DebugLoc(), PtrVT), InFlag); + SDLoc(), PtrVT), InFlag); InFlag = Chain.getValue(1); return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); @@ -7699,7 +7699,7 @@ static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, const EVT PtrVT, bool is64Bit) { - DebugLoc dl = GA->getDebugLoc(); + SDLoc dl(GA); // Get the start address of the TLS block for this module. X86MachineFunctionInfo* MFI = DAG.getMachineFunction() @@ -7713,7 +7713,7 @@ static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, } else { SDValue InFlag; SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, - DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), InFlag); + DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag); InFlag = Chain.getValue(1); Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSLDM, /*LocalDynamic=*/true); @@ -7738,7 +7738,7 @@ static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, const EVT PtrVT, TLSModel::Model model, bool is64Bit, bool isPIC) { - DebugLoc dl = GA->getDebugLoc(); + SDLoc dl(GA); // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), @@ -7777,7 +7777,7 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, if (model == TLSModel::InitialExec) { if (isPIC && !is64Bit) { Offset = DAG.getNode(ISD::ADD, dl, PtrVT, - DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), + DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Offset); } @@ -7831,7 +7831,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { OpFlag = X86II::MO_TLVP_PIC_BASE; else OpFlag = X86II::MO_TLVP; - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, GA->getValueType(0), GA->getOffset(), OpFlag); @@ -7841,7 +7841,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { if (PIC32) Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), DAG.getNode(X86ISD::GlobalBaseReg, - DebugLoc(), getPointerTy()), + SDLoc(), getPointerTy()), Offset); // Lowering the machine isd will make sure everything is in the right @@ -7878,7 +7878,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { // thread-localness. if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) GV = GA->resolveAliasedGlobal(false); - DebugLoc dl = GA->getDebugLoc(); + SDLoc dl(GA); SDValue Chain = DAG.getEntryNode(); // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or @@ -7936,7 +7936,7 @@ SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{ assert(Op.getNumOperands() == 3 && "Not a double-shift!"); EVT VT = Op.getValueType(); unsigned VTBits = VT.getSizeInBits(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; SDValue ShOpLo = Op.getOperand(0); SDValue ShOpHi = Op.getOperand(1); @@ -7995,7 +7995,7 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, return Op; } - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned Size = SrcVT.getSizeInBits()/8; MachineFunction &MF = DAG.getMachineFunction(); int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); @@ -8011,7 +8011,7 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot, SelectionDAG &DAG) const { // Build the FILD - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDVTList Tys; bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); if (useSSE) @@ -8086,7 +8086,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, #endif */ - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); LLVMContext *Context = DAG.getContext(); // Build some magic constants. @@ -8140,7 +8140,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // FP constant to bias correct the final result. SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), MVT::f64); @@ -8188,7 +8188,7 @@ SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG) const { SDValue N0 = Op.getOperand(0); EVT SVT = N0.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); assert((SVT == MVT::v4i8 || SVT == MVT::v4i16 || SVT == MVT::v8i8 || SVT == MVT::v8i16) && @@ -8203,7 +8203,7 @@ SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op, SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { SDValue N0 = Op.getOperand(0); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (Op.getValueType().isVector()) return lowerUINT_TO_FP_vec(Op, DAG); @@ -8292,7 +8292,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, std::pair<SDValue,SDValue> X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned, bool IsReplace) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT DstTy = Op.getValueType(); @@ -8389,7 +8389,7 @@ static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG, MVT VT = Op->getValueType(0).getSimpleVT(); SDValue In = Op->getOperand(0); MVT InVT = In.getValueType().getSimpleVT(); - DebugLoc dl = Op->getDebugLoc(); + SDLoc dl(Op); // Optimize vectors in AVX mode: // @@ -8438,7 +8438,7 @@ SDValue X86TargetLowering::LowerANY_EXTEND(SDValue Op, } SDValue X86TargetLowering::LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); MVT VT = Op.getValueType().getSimpleVT(); SDValue In = Op.getOperand(0); MVT SVT = In.getValueType().getSimpleVT(); @@ -8470,7 +8470,7 @@ SDValue X86TargetLowering::LowerZERO_EXTEND(SDValue Op, } SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); MVT VT = Op.getValueType().getSimpleVT(); SDValue In = Op.getOperand(0); MVT SVT = In.getValueType().getSimpleVT(); @@ -8591,8 +8591,8 @@ SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, MVT VT = Op.getValueType().getSimpleVT(); if (VT.isVector()) { if (VT == MVT::v8i16) - return DAG.getNode(ISD::TRUNCATE, Op.getDebugLoc(), VT, - DAG.getNode(ISD::FP_TO_SINT, Op.getDebugLoc(), + return DAG.getNode(ISD::TRUNCATE, SDLoc(Op), VT, + DAG.getNode(ISD::FP_TO_SINT, SDLoc(Op), MVT::v8i32, Op.getOperand(0))); return SDValue(); } @@ -8605,7 +8605,7 @@ SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, if (StackSlot.getNode()) // Load the result. - return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), + return DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MachinePointerInfo(), false, false, false, 0); @@ -8622,7 +8622,7 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, if (StackSlot.getNode()) // Load the result. - return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), + return DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MachinePointerInfo(), false, false, false, 0); @@ -8631,7 +8631,7 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, } static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); MVT VT = Op.getValueType().getSimpleVT(); SDValue In = Op.getOperand(0); MVT SVT = In.getValueType().getSimpleVT(); @@ -8645,7 +8645,7 @@ static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) { SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const { LLVMContext *Context = DAG.getContext(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT VT = Op.getValueType().getSimpleVT(); MVT EltVT = VT; unsigned NumElts = VT == MVT::f64 ? 2 : 4; @@ -8679,7 +8679,7 @@ SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const { SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { LLVMContext *Context = DAG.getContext(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT VT = Op.getValueType().getSimpleVT(); MVT EltVT = VT; unsigned NumElts = VT == MVT::f64 ? 2 : 4; @@ -8716,7 +8716,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { LLVMContext *Context = DAG.getContext(); SDValue Op0 = Op.getOperand(0); SDValue Op1 = Op.getOperand(1); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT VT = Op.getValueType().getSimpleVT(); MVT SrcVT = Op1.getValueType().getSimpleVT(); @@ -8793,7 +8793,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) { SDValue N0 = Op.getOperand(0); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); MVT VT = Op.getValueType().getSimpleVT(); // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). @@ -8815,7 +8815,7 @@ SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op, return SDValue(); SDNode *N = Op.getNode(); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SmallVector<SDValue, 8> Opnds; DenseMap<SDValue, unsigned> VecInMap; @@ -8899,7 +8899,7 @@ SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op, /// equivalent. SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // CF and OF aren't always set the way we want. Determine which // of these we need. @@ -9114,7 +9114,7 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, if (C->getAPIntValue() == 0) return EmitTest(Op0, X86CC, DAG); - DebugLoc dl = Op0.getDebugLoc(); + SDLoc dl(Op0); if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 || Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) { // Use SUB instead of CMP to enable CSE between SUB and CMP. @@ -9141,7 +9141,7 @@ SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp, // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence // build an SDNode sequence that transfers the result from FPSW into EFLAGS: // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8)))) - DebugLoc dl = Cmp.getDebugLoc(); + SDLoc dl(Cmp); SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp); SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW); SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW, @@ -9158,7 +9158,7 @@ static bool isAllOnes(SDValue V) { /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node /// if it's possible. SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, - DebugLoc dl, SelectionDAG &DAG) const { + SDLoc dl, SelectionDAG &DAG) const { SDValue Op0 = And.getOperand(0); SDValue Op1 = And.getOperand(1); if (Op0.getOpcode() == ISD::TRUNCATE) @@ -9235,7 +9235,7 @@ static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { "Unsupported value type for operation"); unsigned NumElems = VT.getVectorNumElements(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue CC = Op.getOperand(2); // Extract the LHS vectors @@ -9265,7 +9265,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, MVT VT = Op.getValueType().getSimpleVT(); ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); bool isFP = Op.getOperand(1).getValueType().getSimpleVT().isFloatingPoint(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (isFP) { #ifndef NDEBUG @@ -9455,7 +9455,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { assert(VT == MVT::i8 && "SetCC type must be 8-bit integer"); SDValue Op0 = Op.getOperand(0); SDValue Op1 = Op.getOperand(1); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); // Optimize to BT if possible. @@ -9549,7 +9549,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { SDValue Cond = Op.getOperand(0); SDValue Op1 = Op.getOperand(1); SDValue Op2 = Op.getOperand(2); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); SDValue CC; if (Cond.getOpcode() == ISD::SETCC) { @@ -9739,7 +9739,7 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND(SDValue Op, MVT VT = Op->getValueType(0).getSimpleVT(); SDValue In = Op->getOperand(0); MVT InVT = In.getValueType().getSimpleVT(); - DebugLoc dl = Op->getDebugLoc(); + SDLoc dl(Op); if ((VT != MVT::v4i64 || InVT != MVT::v4i32) && (VT != MVT::v8i32 || InVT != MVT::v8i16)) @@ -9812,7 +9812,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); SDValue Cond = Op.getOperand(1); SDValue Dest = Op.getOperand(2); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue CC; bool Inverted = false; @@ -10082,7 +10082,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, "This should be used only on Windows targets or when segmented stacks " "are being used"); assert(!Subtarget->isTargetEnvMacho() && "Not implemented"); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Get the inputs. SDValue Chain = Op.getOperand(0); @@ -10140,7 +10140,7 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { // vastart just stores the address of the VarArgsFrameIndex slot into the @@ -10207,7 +10207,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { SDValue SrcPtr = Op.getOperand(1); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); unsigned Align = Op.getConstantOperandVal(3); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT ArgVT = Op.getNode()->getValueType(0); Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); @@ -10273,7 +10273,7 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget, SDValue SrcPtr = Op.getOperand(2); const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, @@ -10283,7 +10283,7 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget, // getTargetVShiftNode - Handle vector element shifts where the shift amount // may or may not be a constant. Takes immediate version of shift as input. -static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT, +static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, EVT VT, SDValue SrcOp, SDValue ShAmt, SelectionDAG &DAG) { assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32"); @@ -10327,7 +10327,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT, } static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. @@ -10972,7 +10972,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { } static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. @@ -11027,7 +11027,7 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, MFI->setReturnAddressIsTaken(true); unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT PtrVT = getPointerTy(); if (Depth > 0) { @@ -11051,7 +11051,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { MFI->setFrameAddressIsTaken(true); EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful + SDLoc dl(Op); // FIXME probably not meaningful unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); assert(((FrameReg == X86::RBP && VT == MVT::i64) || @@ -11074,7 +11074,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); SDValue Offset = Op.getOperand(1); SDValue Handler = Op.getOperand(2); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl (Op); EVT PtrVT = getPointerTy(); unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); @@ -11097,7 +11097,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL, DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), Op.getOperand(1)); @@ -11105,7 +11105,7 @@ SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0), Op.getOperand(1)); } @@ -11120,7 +11120,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, SDValue Trmp = Op.getOperand(1); // trampoline SDValue FPtr = Op.getOperand(2); // nested function SDValue Nest = Op.getOperand(3); // 'nest' parameter value - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl (Op); const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); @@ -11290,7 +11290,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, const TargetFrameLowering &TFI = *TM.getFrameLowering(); unsigned StackAlignment = TFI.getStackAlignment(); EVT VT = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); // Save FP Control Word to stack slot int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); @@ -11337,7 +11337,7 @@ static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); EVT OpVT = VT; unsigned NumBits = VT.getSizeInBits(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); Op = Op.getOperand(0); if (VT == MVT::i8) { @@ -11371,7 +11371,7 @@ static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); EVT OpVT = VT; unsigned NumBits = VT.getSizeInBits(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); Op = Op.getOperand(0); if (VT == MVT::i8) { @@ -11395,7 +11395,7 @@ static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); unsigned NumBits = VT.getSizeInBits(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); Op = Op.getOperand(0); // Issue a bsf (scan bits forward) which also sets EFLAGS. @@ -11421,7 +11421,7 @@ static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { "Unsupported value type for operation"); unsigned NumElems = VT.getVectorNumElements(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Extract the LHS vectors SDValue LHS = Op.getOperand(0); @@ -11457,7 +11457,7 @@ static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) { static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT VT = Op.getValueType(); // Decompose 256-bit ops into smaller 128-bit ops. @@ -11533,7 +11533,7 @@ SDValue X86TargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const { EVT EltTy = VT.getVectorElementType(); unsigned NumElts = VT.getVectorNumElements(); SDValue N0 = Op.getOperand(0); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Lower sdiv X, pow2-const. BuildVectorSDNode *C = dyn_cast<BuildVectorSDNode>(Op.getOperand(1)); @@ -11574,7 +11574,7 @@ SDValue X86TargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const { static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, const X86Subtarget *Subtarget) { EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue R = Op.getOperand(0); SDValue Amt = Op.getOperand(1); @@ -11740,7 +11740,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, const X86Subtarget* Subtarget) { EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue R = Op.getOperand(0); SDValue Amt = Op.getOperand(1); @@ -11876,7 +11876,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue R = Op.getOperand(0); SDValue Amt = Op.getOperand(1); SDValue V; @@ -12012,7 +12012,7 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { SDValue RHS = N->getOperand(1); unsigned BaseOp = 0; unsigned Cond = 0; - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); switch (Op.getOpcode()) { default: llvm_unreachable("Unknown ovf instruction!"); case ISD::SADDO: @@ -12079,7 +12079,7 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); EVT VT = Op.getValueType(); @@ -12146,7 +12146,7 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, SelectionDAG &DAG) { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); SynchronizationScope FenceScope = static_cast<SynchronizationScope>( @@ -12183,7 +12183,7 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget, SelectionDAG &DAG) { EVT T = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); unsigned Reg = 0; unsigned size = 0; switch(T.getSimpleVT().SimpleTy) { @@ -12217,7 +12217,7 @@ static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget, assert(Subtarget->is64Bit() && "Result not type legalized?"); SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); SDValue TheChain = Op.getOperand(0); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, @@ -12253,7 +12253,7 @@ SDValue X86TargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { SDNode *Node = Op.getNode(); - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); EVT T = Node->getValueType(0); SDValue negOp = DAG.getNode(ISD::SUB, dl, T, DAG.getConstant(0, T), Node->getOperand(2)); @@ -12269,7 +12269,7 @@ static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { SDNode *Node = Op.getNode(); - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); // Convert seq_cst store -> xchg @@ -12312,9 +12312,9 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { } if (!ExtraOp) - return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), + return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1)); - return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), + return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1), Op.getOperand(2)); } @@ -12324,7 +12324,7 @@ SDValue X86TargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { // For MacOSX, we want to call an alternative entry point: __sincos_stret, // which returns the values as { float, float } (in XMM0) or // { double, double } (which is returned in XMM0, XMM1). - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue Arg = Op.getOperand(0); EVT ArgVT = Arg.getValueType(); Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); @@ -12457,7 +12457,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { static void ReplaceATOMIC_LOAD(SDNode *Node, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) { - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); // Convert wide load -> cmpxchg8b/cmpxchg16b @@ -12478,7 +12478,7 @@ static void ReplaceATOMIC_LOAD(SDNode *Node, static void ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, SelectionDAG &DAG, unsigned NewOp) { - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); assert (Node->getValueType(0) == MVT::i64 && "Only know how to expand i64 atomics"); @@ -12503,7 +12503,7 @@ ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, void X86TargetLowering::ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, SelectionDAG &DAG) const { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); switch (N->getOpcode()) { default: @@ -15057,7 +15057,7 @@ static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) { static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget* Subtarget) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); @@ -15153,7 +15153,7 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); EVT VT = N->getValueType(0); // Don't create instructions with illegal types after legalize types has run. @@ -15272,7 +15272,7 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, // All checks match so transform back to vector_shuffle so that DAG combiner // can finish the job - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); // Create shuffle node taking into account the case that its a unary shuffle SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(VT) : InVec.getOperand(1); @@ -15299,7 +15299,7 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST && InputVector.getNode()->getOperand(0).getValueType() == MVT::x86mmx && InputVector.hasOneUse() && N->getValueType(0) == MVT::i32) - return DAG.getNode(X86ISD::MMX_MOVD2W, InputVector.getDebugLoc(), + return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector), N->getValueType(0), InputVector.getNode()->getOperand(0)); @@ -15344,7 +15344,7 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); // Ok, we've now decided to do the transformation. - DebugLoc dl = InputVector.getDebugLoc(); + SDLoc dl(InputVector); // Store the value to a temporary stack slot. SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); @@ -15455,7 +15455,7 @@ static unsigned matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SDValue Cond = N->getOperand(0); // Get the LHS/RHS of the select. SDValue LHS = N->getOperand(1); @@ -15723,7 +15723,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, case ISD::SETLT: case ISD::SETGT: { ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE; - Cond = DAG.getSetCC(Cond.getDebugLoc(), Cond.getValueType(), + Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(), Cond.getOperand(0), Cond.getOperand(1), NewCC); return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS); } @@ -15999,7 +15999,7 @@ static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // If the flag operand isn't dead, don't touch this CMOV. if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) @@ -16202,7 +16202,7 @@ static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, } if (MulAmt2 && (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); if (isPowerOf2_64(MulAmt2) && !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) @@ -16252,7 +16252,7 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { APInt ShAmt = N1C->getAPIntValue(); Mask = Mask.shl(ShAmt); if (Mask != 0) - return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, + return DAG.getNode(ISD::AND, SDLoc(N), VT, N00, DAG.getConstant(Mask, VT)); } } @@ -16268,7 +16268,7 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { // hardware support for this operation. This is better expressed as an ADD // of two values. if (N1C && (1 == N1C->getZExtValue())) { - return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N0); + return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0); } } @@ -16302,7 +16302,7 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, SDValue N1 = N->getOperand(1); SDValue CMP0 = N0->getOperand(1); SDValue CMP1 = N1->getOperand(1); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // The SETCCs should both refer to the same CMP. if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) @@ -16421,7 +16421,7 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG, SDValue N0 = Narrow->getOperand(0); SDValue N1 = Narrow->getOperand(1); - DebugLoc DL = Narrow->getDebugLoc(); + SDLoc DL(Narrow); // The Left side has to be a trunc. if (N0.getOpcode() != ISD::TRUNCATE) @@ -16493,7 +16493,7 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, if (Subtarget->hasBMI() && (VT == MVT::i32 || VT == MVT::i64)) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // Check LHS for neg if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 && @@ -16527,7 +16527,7 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // Check LHS for vnot if (N0.getOpcode() == ISD::XOR && @@ -16611,7 +16611,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, if ((SraAmt + 1) != EltBits) return SDValue(); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // Now we know we at least have a plendvb with the mask val. See if // we can form a psignb/w/d. @@ -16660,7 +16660,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, if (ShAmt1.getOpcode() == ISD::TRUNCATE) ShAmt1 = ShAmt1.getOperand(0); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); unsigned Opc = X86ISD::SHLD; SDValue Op0 = N0.getOperand(0); SDValue Op1 = N1.getOperand(0); @@ -16707,7 +16707,7 @@ static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1) // and change it to SUB and CMOV. @@ -16757,7 +16757,7 @@ static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, // Create BLSMSK instructions by finding X ^ (X-1) SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && isAllOnes(N0.getOperand(1))) @@ -16777,7 +16777,7 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, LoadSDNode *Ld = cast<LoadSDNode>(N); EVT RegVT = Ld->getValueType(0); EVT MemVT = Ld->getMemoryVT(); - DebugLoc dl = Ld->getDebugLoc(); + SDLoc dl(Ld); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); unsigned RegSz = RegVT.getSizeInBits(); @@ -16972,7 +16972,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, StoreSDNode *St = cast<StoreSDNode>(N); EVT VT = St->getValue().getValueType(); EVT StVT = St->getMemoryVT(); - DebugLoc dl = St->getDebugLoc(); + SDLoc dl(St); SDValue StoredVal = St->getOperand(1); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -17135,8 +17135,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) return SDValue(); - DebugLoc LdDL = Ld->getDebugLoc(); - DebugLoc StDL = N->getDebugLoc(); + SDLoc LdDL(Ld); + SDLoc StDL(N); // If we are a 64-bit capable x86, lower to a single movq load/store pair. // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store // pair instead. @@ -17335,7 +17335,7 @@ static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && isHorizontalBinOp(LHS, RHS, true)) - return DAG.getNode(X86ISD::FHADD, N->getDebugLoc(), VT, LHS, RHS); + return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS); return SDValue(); } @@ -17350,7 +17350,7 @@ static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG, if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && isHorizontalBinOp(LHS, RHS, false)) - return DAG.getNode(X86ISD::FHSUB, N->getDebugLoc(), VT, LHS, RHS); + return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS); return SDValue(); } @@ -17387,7 +17387,7 @@ static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) { case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break; } - return DAG.getNode(NewOp, N->getDebugLoc(), N->getValueType(0), + return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0), N->getOperand(0), N->getOperand(1)); } @@ -17431,7 +17431,7 @@ static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { if (Op.getOpcode() == X86ISD::VZEXT_LOAD && VT.getVectorElementType().getSizeInBits() == OpVT.getVectorElementType().getSizeInBits()) { - return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); + return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); } return SDValue(); } @@ -17445,7 +17445,7 @@ static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG, SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); EVT ExtraVT = cast<VTSDNode>(N1)->getVT(); - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the // both SSE and AVX2 since there is no sign-extended shift right @@ -17492,7 +17492,7 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG, const X86Subtarget* Subtarget) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); EVT VT = N->getValueType(0); // Let legalize expand this if it isn't a legal type yet. @@ -17537,7 +17537,7 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, // (and (i32 x86isd::setcc_carry), 1) // This eliminates the zext. This transformation is necessary because // ISD::SETCC is always legalized to i8. - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); @@ -17575,17 +17575,17 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) { if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB) if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0))) if (C->getAPIntValue() == 0 && LHS.hasOneUse()) { - SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), + SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N), LHS.getValueType(), RHS, LHS.getOperand(1)); - return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), + return DAG.getSetCC(SDLoc(N), N->getValueType(0), addV, DAG.getConstant(0, addV.getValueType()), CC); } if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB) if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0))) if (C->getAPIntValue() == 0 && RHS.hasOneUse()) { - SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), + SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N), RHS.getValueType(), LHS, RHS.getOperand(1)); - return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), + return DAG.getSetCC(SDLoc(N), N->getValueType(0), addV, DAG.getConstant(0, addV.getValueType()), CC); } return SDValue(); @@ -17594,7 +17594,7 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) { // Helper function of PerformSETCCCombine. It is to materialize "setb reg" // as "sbb reg,reg", since it can be extended without zext and produces // an all-ones bit which is more useful than 0/1 in some cases. -static SDValue MaterializeSETB(DebugLoc DL, SDValue EFLAGS, SelectionDAG &DAG) { +static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG) { return DAG.getNode(ISD::AND, DL, MVT::i8, DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS), @@ -17605,7 +17605,7 @@ static SDValue MaterializeSETB(DebugLoc DL, SDValue EFLAGS, SelectionDAG &DAG) { static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0)); SDValue EFLAGS = N->getOperand(1); @@ -17619,7 +17619,7 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() && EFLAGS.getValueType().isInteger() && !isa<ConstantSDNode>(EFLAGS.getOperand(1))) { - SDValue NewSub = DAG.getNode(X86ISD::SUB, EFLAGS.getDebugLoc(), + SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(), EFLAGS.getOperand(1), EFLAGS.getOperand(0)); SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo()); @@ -17649,7 +17649,7 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget *Subtarget) { - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); SDValue Chain = N->getOperand(0); SDValue Dest = N->getOperand(1); SDValue EFLAGS = N->getOperand(3); @@ -17674,7 +17674,7 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32)) if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0); return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); @@ -17709,7 +17709,7 @@ static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, // We don't have a good way to replace an EFLAGS use, so only do this when // dead right now. SDValue(N, 1).use_empty()) { - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); EVT VT = N->getValueType(0); SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, @@ -17728,7 +17728,7 @@ static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, // (sub (sete X, 0), Y) -> sbb 0, Y // (sub (setne X, 0), Y) -> adc -1, Y static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); // Look through ZExts. SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); @@ -17774,7 +17774,7 @@ static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG, if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && isHorizontalBinOp(Op0, Op1, true)) - return DAG.getNode(X86ISD::HADD, N->getDebugLoc(), VT, Op0, Op1); + return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1); return OptimizeConditionalInDecrement(N, DAG); } @@ -17794,10 +17794,10 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, isa<ConstantSDNode>(Op1.getOperand(1))) { APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue(); EVT VT = Op0.getValueType(); - SDValue NewXor = DAG.getNode(ISD::XOR, Op1.getDebugLoc(), VT, + SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, Op1.getOperand(0), DAG.getConstant(~XorC, VT)); - return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, NewXor, + return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor, DAG.getConstant(C->getAPIntValue()+1, VT)); } } @@ -17807,7 +17807,7 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && isHorizontalBinOp(Op0, Op1, true)) - return DAG.getNode(X86ISD::HSUB, N->getDebugLoc(), VT, Op0, Op1); + return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1); return OptimizeConditionalInDecrement(N, DAG); } @@ -17824,7 +17824,7 @@ static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG, if (In.getOpcode() != X86ISD::VZEXT) return SDValue(); - return DAG.getNode(X86ISD::VZEXT, N->getDebugLoc(), N->getValueType(0), + return DAG.getNode(X86ISD::VZEXT, SDLoc(N), N->getValueType(0), In.getOperand(0)); } @@ -18384,7 +18384,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, getTargetMachine()))) return; - Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), + Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op), GA->getValueType(0), Offset); break; } diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 0d7b069..7ed054d 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -760,16 +760,16 @@ namespace llvm { SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerMemArgument(SDValue Chain, CallingConv::ID CallConv, const SmallVectorImpl<ISD::InputArg> &ArgInfo, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, const CCValAssign &VA, MachineFrameInfo *MFI, unsigned i) const; SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const; @@ -791,7 +791,7 @@ namespace llvm { bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const; SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall, bool Is64Bit, - int FPDiff, DebugLoc dl) const; + int FPDiff, SDLoc dl) const; unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG) const; @@ -800,7 +800,7 @@ namespace llvm { bool isSigned, bool isReplace) const; - SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, + SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, SDLoc dl, SelectionDAG &DAG) const; SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; @@ -808,7 +808,7 @@ namespace llvm { SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, + SDValue LowerGlobalAddress(const GlobalValue *GV, SDLoc dl, int64_t Offset, SelectionDAG &DAG) const; SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; @@ -830,7 +830,7 @@ namespace llvm { SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; SDValue LowerToBT(SDValue And, ISD::CondCode CC, - DebugLoc dl, SelectionDAG &DAG) const; + SDLoc dl, SelectionDAG &DAG) const; SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; @@ -865,7 +865,7 @@ namespace llvm { LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; virtual SDValue LowerCall(CallLoweringInfo &CLI, @@ -876,7 +876,7 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const; + SDLoc dl, SelectionDAG &DAG) const; virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const; diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 3d41bf5..ed3677e 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -4333,7 +4333,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, std::vector<SDValue> AddrOps; std::vector<SDValue> BeforeOps; std::vector<SDValue> AfterOps; - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); unsigned NumOps = N->getNumOperands(); for (unsigned i = 0; i != NumOps-1; ++i) { SDValue Op = N->getOperand(i); diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp index f934fdd..d1db79f 100644 --- a/lib/Target/X86/X86SelectionDAGInfo.cpp +++ b/lib/Target/X86/X86SelectionDAGInfo.cpp @@ -27,7 +27,7 @@ X86SelectionDAGInfo::~X86SelectionDAGInfo() { } SDValue -X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, +X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, @@ -175,7 +175,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, } SDValue -X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, +X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline, diff --git a/lib/Target/X86/X86SelectionDAGInfo.h b/lib/Target/X86/X86SelectionDAGInfo.h index d1d66fe..d728af5 100644 --- a/lib/Target/X86/X86SelectionDAGInfo.h +++ b/lib/Target/X86/X86SelectionDAGInfo.h @@ -34,7 +34,7 @@ public: ~X86SelectionDAGInfo(); virtual - SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, + SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, @@ -42,7 +42,7 @@ public: MachinePointerInfo DstPtrInfo) const; virtual - SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, + SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, diff --git a/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/lib/Target/XCore/XCoreISelDAGToDAG.cpp index d34442d..152e939 100644 --- a/lib/Target/XCore/XCoreISelDAGToDAG.cpp +++ b/lib/Target/XCore/XCoreISelDAGToDAG.cpp @@ -109,7 +109,7 @@ bool XCoreDAGToDAGISel::SelectADDRspii(SDValue Addr, SDValue &Base, } SDNode *XCoreDAGToDAGISel::Select(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); switch (N->getOpcode()) { default: break; case ISD::Constant: { @@ -204,12 +204,12 @@ replaceInChain(SelectionDAG *CurDAG, SDValue Chain, SDValue Old, SDValue New) } if (!found) return SDValue(); - return CurDAG->getNode(ISD::TokenFactor, Chain->getDebugLoc(), MVT::Other, + return CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, &Ops[0], Ops.size()); } SDNode *XCoreDAGToDAGISel::SelectBRIND(SDNode *N) { - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); // (brind (int_xcore_checkevent (addr))) SDValue Chain = N->getOperand(0); SDValue Addr = N->getOperand(1); diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 2d27f1a..1f90f59 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -215,7 +215,7 @@ void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, SDValue XCoreTargetLowering:: LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2), Op.getOperand(3), Op.getOperand(4)); return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0), @@ -227,7 +227,7 @@ getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV, SelectionDAG &DAG) const { // FIXME there is no actual debug info here - DebugLoc dl = GA.getDebugLoc(); + SDLoc dl(GA); const GlobalValue *UnderlyingGV = GV; // If GV is an alias then use the aliasee to determine the wrapper type if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) @@ -243,7 +243,7 @@ getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV, SDValue XCoreTargetLowering:: LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); const GlobalValue *GV = GN->getGlobal(); int64_t Offset = GN->getOffset(); @@ -259,7 +259,7 @@ LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const return GA; } -static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) { +static inline SDValue BuildGetId(SelectionDAG &DAG, SDLoc dl) { return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, DAG.getConstant(Intrinsic::xcore_getid, MVT::i32)); } @@ -267,7 +267,7 @@ static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) { SDValue XCoreTargetLowering:: LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy()); @@ -280,7 +280,7 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); // FIXME there isn't really debug info here - DebugLoc dl = CP->getDebugLoc(); + SDLoc dl(CP); EVT PtrVT = Op.getValueType(); SDValue Res; if (CP->isMachineConstantPoolEntry()) { @@ -303,7 +303,7 @@ LowerBR_JT(SDValue Op, SelectionDAG &DAG) const SDValue Chain = Op.getOperand(0); SDValue Table = Op.getOperand(1); SDValue Index = Op.getOperand(2); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); unsigned JTI = JT->getIndex(); MachineFunction &MF = DAG.getMachineFunction(); @@ -322,7 +322,7 @@ LowerBR_JT(SDValue Op, SelectionDAG &DAG) const } SDValue XCoreTargetLowering:: -lowerLoadWordFromAlignedBasePlusOffset(DebugLoc DL, SDValue Chain, SDValue Base, +lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base, int64_t Offset, SelectionDAG &DAG) const { if ((Offset & 0x3) == 0) { @@ -388,7 +388,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = LD->getChain(); SDValue BasePtr = LD->getBasePtr(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); if (!LD->isVolatile()) { const GlobalValue *GV; @@ -469,7 +469,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const SDValue Chain = ST->getChain(); SDValue BasePtr = ST->getBasePtr(); SDValue Value = ST->getValue(); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); if (ST->getAlignment() == 2) { SDValue Low = Value; @@ -516,7 +516,7 @@ LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const { assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI && "Unexpected operand to lower!"); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); SDValue Zero = DAG.getConstant(0, MVT::i32); @@ -533,7 +533,7 @@ LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const { assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI && "Unexpected operand to lower!"); - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); SDValue Zero = DAG.getConstant(0, MVT::i32); @@ -618,7 +618,7 @@ TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const } else { return SDValue(); } - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); SDValue LL, RL, AddendL, AddendH; LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mul.getOperand(0), DAG.getConstant(0, MVT::i32)); @@ -677,7 +677,7 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const return Result; } - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); // Extract components SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, @@ -710,7 +710,7 @@ LowerVAARG(SDValue Op, SelectionDAG &DAG) const llvm_unreachable("unimplemented"); // FIXME Arguments passed by reference need a extra dereference. SDNode *Node = Op.getNode(); - DebugLoc dl = Node->getDebugLoc(); + SDLoc dl(Node); const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); EVT VT = Node->getValueType(0); SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0), @@ -731,7 +731,7 @@ LowerVAARG(SDValue Op, SelectionDAG &DAG) const SDValue XCoreTargetLowering:: LowerVASTART(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // vastart stores the address of the VarArgsFrameIndex slot into the // memory location argument MachineFunction &MF = DAG.getMachineFunction(); @@ -743,7 +743,7 @@ LowerVASTART(SDValue Op, SelectionDAG &DAG) const SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); // Depths > 0 not supported yet! if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) return SDValue(); @@ -783,7 +783,7 @@ LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { SDValue Addr = Trmp; - DebugLoc dl = Op.getDebugLoc(); + SDLoc dl(Op); OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32), Addr, MachinePointerInfo(TrmpAddr), false, false, 0); @@ -817,7 +817,7 @@ LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { SDValue XCoreTargetLowering:: LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); switch (IntNo) { case Intrinsic::xcore_crc8: @@ -847,7 +847,7 @@ SDValue XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { SelectionDAG &DAG = CLI.DAG; - DebugLoc &dl = CLI.DL; + SDLoc &dl = CLI.DL; SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; SmallVector<SDValue, 32> &OutVals = CLI.OutVals; SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; @@ -883,7 +883,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // Analyze operands of the call, assigning locations to each operand. @@ -1006,7 +1006,7 @@ SDValue XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // Assign locations to each value returned by this call. @@ -1037,7 +1037,7 @@ XCoreTargetLowering::LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { @@ -1062,7 +1062,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); @@ -1188,7 +1188,7 @@ XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const { + SDLoc dl, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of // the return value to a location @@ -1305,7 +1305,7 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; - DebugLoc dl = N->getDebugLoc(); + SDLoc dl(N); switch (N->getOpcode()) { default: break; case XCoreISD::LADD: { diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h index c7dfa26..f765f02 100644 --- a/lib/Target/XCore/XCoreISelLowering.h +++ b/lib/Target/XCore/XCoreISelLowering.h @@ -115,7 +115,7 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerCCCCallTo(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, @@ -123,17 +123,17 @@ namespace llvm { const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const; SDValue getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV, SelectionDAG &DAG) const; - SDValue lowerLoadWordFromAlignedBasePlusOffset(DebugLoc DL, SDValue Chain, + SDValue lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base, int64_t Offset, SelectionDAG &DAG) const; @@ -177,7 +177,7 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; virtual SDValue @@ -189,7 +189,7 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc dl, SelectionDAG &DAG) const; + SDLoc dl, SelectionDAG &DAG) const; virtual bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, |