diff options
author | Stephen Hines <srhines@google.com> | 2014-12-04 19:51:48 +0000 |
---|---|---|
committer | Android Git Automerger <android-git-automerger@android.com> | 2014-12-04 19:51:48 +0000 |
commit | a21bbdfad461e957fa42ac9d6860ddc9de2da3e9 (patch) | |
tree | 8d32ff2094b47e15a8def30d62fd7dee6e009de3 /lib/Target/XCore/XCoreISelLowering.cpp | |
parent | 6b8c6a5088c221af2b25065b8b6b8b0fec8a116f (diff) | |
parent | 876d6995443e99d13696f3941c3a789a4daa7c7a (diff) | |
download | external_llvm-a21bbdfad461e957fa42ac9d6860ddc9de2da3e9.zip external_llvm-a21bbdfad461e957fa42ac9d6860ddc9de2da3e9.tar.gz external_llvm-a21bbdfad461e957fa42ac9d6860ddc9de2da3e9.tar.bz2 |
am 876d6995: Merge "Update aosp/master LLVM for rebase to r222494."
* commit '876d6995443e99d13696f3941c3a789a4daa7c7a':
Update aosp/master LLVM for rebase to r222494.
Diffstat (limited to 'lib/Target/XCore/XCoreISelLowering.cpp')
-rw-r--r-- | lib/Target/XCore/XCoreISelLowering.cpp | 60 |
1 files changed, 35 insertions, 25 deletions
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index be7ef64..96c43ae 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -69,7 +69,7 @@ getTargetNodeName(unsigned Opcode) const } XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM) - : TargetLowering(TM, new XCoreTargetObjectFile()), TM(TM), + : TargetLowering(TM), TM(TM), Subtarget(TM.getSubtarget<XCoreSubtarget>()) { // Set up the register classes. @@ -426,7 +426,9 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const { assert(LD->getExtensionType() == ISD::NON_EXTLOAD && "Unexpected extension type"); assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); - if (allowsUnalignedMemoryAccesses(LD->getMemoryVT())) + if (allowsMisalignedMemoryAccesses(LD->getMemoryVT(), + LD->getAddressSpace(), + LD->getAlignment())) return SDValue(); unsigned ABIAlignment = getDataLayout()-> @@ -461,14 +463,15 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const { if (LD->getAlignment() == 2) { SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr, LD->getPointerInfo(), MVT::i16, - LD->isVolatile(), LD->isNonTemporal(), 2); + LD->isVolatile(), LD->isNonTemporal(), + LD->isInvariant(), 2); SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, DAG.getConstant(2, MVT::i32)); SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr, LD->getPointerInfo().getWithOffset(2), MVT::i16, LD->isVolatile(), - LD->isNonTemporal(), 2); + LD->isNonTemporal(), LD->isInvariant(), 2); SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, DAG.getConstant(16, MVT::i32)); SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted); @@ -504,7 +507,9 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const StoreSDNode *ST = cast<StoreSDNode>(Op); assert(!ST->isTruncatingStore() && "Unexpected store type"); assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); - if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { + if (allowsMisalignedMemoryAccesses(ST->getMemoryVT(), + ST->getAddressSpace(), + ST->getAlignment())) { return SDValue(); } unsigned ABIAlignment = getDataLayout()-> @@ -800,7 +805,8 @@ SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, return SDValue(); MachineFunction &MF = DAG.getMachineFunction(); - const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo(); + const TargetRegisterInfo *RegInfo = + getTargetMachine().getSubtargetImpl()->getRegisterInfo(); return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), RegInfo->getFrameRegister(MF), MVT::i32); } @@ -846,7 +852,8 @@ LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { SDLoc dl(Op); // Absolute SP = (FP + FrameToArgs) + Offset - const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo(); + const TargetRegisterInfo *RegInfo = + getTargetMachine().getSubtargetImpl()->getRegisterInfo(); SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RegInfo->getFrameRegister(MF), MVT::i32); SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl, @@ -969,7 +976,7 @@ LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { N->getBasePtr(), N->getPointerInfo(), N->isVolatile(), N->isNonTemporal(), N->isInvariant(), N->getAlignment(), - N->getTBAAInfo(), N->getRanges()); + N->getAAInfo(), N->getRanges()); } if (N->getMemoryVT() == MVT::i16) { if (N->getAlignment() < 2) @@ -977,13 +984,13 @@ LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), N->getBasePtr(), N->getPointerInfo(), MVT::i16, N->isVolatile(), N->isNonTemporal(), - N->getAlignment(), N->getTBAAInfo()); + N->isInvariant(), N->getAlignment(), N->getAAInfo()); } if (N->getMemoryVT() == MVT::i8) return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), N->getBasePtr(), N->getPointerInfo(), MVT::i8, N->isVolatile(), N->isNonTemporal(), - N->getAlignment(), N->getTBAAInfo()); + N->isInvariant(), N->getAlignment(), N->getAAInfo()); return SDValue(); } @@ -999,7 +1006,7 @@ LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(), N->getPointerInfo(), N->isVolatile(), N->isNonTemporal(), - N->getAlignment(), N->getTBAAInfo()); + N->getAlignment(), N->getAAInfo()); } if (N->getMemoryVT() == MVT::i16) { if (N->getAlignment() < 2) @@ -1007,13 +1014,13 @@ LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(), N->getPointerInfo(), MVT::i16, N->isVolatile(), N->isNonTemporal(), - N->getAlignment(), N->getTBAAInfo()); + N->getAlignment(), N->getAAInfo()); } if (N->getMemoryVT() == MVT::i8) return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(), N->getPointerInfo(), MVT::i8, N->isVolatile(), N->isNonTemporal(), - N->getAlignment(), N->getTBAAInfo()); + N->getAlignment(), N->getAAInfo()); return SDValue(); } @@ -1118,8 +1125,8 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); // The ABI dictates there should be one stack slot available to the callee // on function entry (for saving lr). @@ -1129,8 +1136,8 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, SmallVector<CCValAssign, 16> RVLocs; // Analyze return values to determine the number of bytes of stack required. - CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4); RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore); @@ -1284,8 +1291,8 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, // Assign locations to all of the incoming arguments. SmallVector<CCValAssign, 16> ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); @@ -1443,7 +1450,7 @@ CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { SmallVector<CCValAssign, 16> RVLocs; - CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); + CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); if (!CCInfo.CheckReturn(Outs, RetCC_XCore)) return false; if (CCInfo.getNextStackOffset() != 0 && isVarArg) @@ -1467,8 +1474,8 @@ XCoreTargetLowering::LowerReturn(SDValue Chain, SmallVector<CCValAssign, 16> RVLocs; // CCState - Info about the registers and stack slot. - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); // Analyze return values. if (!isVarArg) @@ -1541,7 +1548,8 @@ XCoreTargetLowering::LowerReturn(SDValue Chain, MachineBasicBlock * XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *BB) const { - const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); + const TargetInstrInfo &TII = + *getTargetMachine().getSubtargetImpl()->getInstrInfo(); DebugLoc dl = MI->getDebugLoc(); assert((MI->getOpcode() == XCore::SELECT_CC) && "Unexpected instr type to insert"); @@ -1803,7 +1811,9 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, // Replace unaligned store of unaligned load with memmove. StoreSDNode *ST = cast<StoreSDNode>(N); if (!DCI.isBeforeLegalize() || - allowsUnalignedMemoryAccesses(ST->getMemoryVT()) || + allowsMisalignedMemoryAccesses(ST->getMemoryVT(), + ST->getAddressSpace(), + ST->getAlignment()) || ST->isVolatile() || ST->isIndexed()) { break; } @@ -1912,7 +1922,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM, if (Ty->getTypeID() == Type::VoidTyID) return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs); - const DataLayout *TD = TM.getDataLayout(); + const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout(); unsigned Size = TD->getTypeAllocSize(Ty); if (AM.BaseGV) { return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && |