diff options
Diffstat (limited to 'lib/CodeGen/SelectionDAG/SelectionDAG.cpp')
-rw-r--r-- | lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 253 |
1 files changed, 124 insertions, 129 deletions
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 65f5edc..5477e7b 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2281,35 +2281,6 @@ bool SelectionDAG::isVerifiedDebugInfoDesc(SDValue Op) const { } -/// getShuffleScalarElt - Returns the scalar element that will make up the ith -/// element of the result of the vector shuffle. -SDValue SelectionDAG::getShuffleScalarElt(const ShuffleVectorSDNode *N, - unsigned i) { - EVT VT = N->getValueType(0); - if (N->getMaskElt(i) < 0) - return getUNDEF(VT.getVectorElementType()); - unsigned Index = N->getMaskElt(i); - unsigned NumElems = VT.getVectorNumElements(); - SDValue V = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1); - Index %= NumElems; - - if (V.getOpcode() == ISD::BIT_CONVERT) { - V = V.getOperand(0); - EVT VVT = V.getValueType(); - if (!VVT.isVector() || VVT.getVectorNumElements() != (unsigned)NumElems) - return SDValue(); - } - if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) - return (Index == 0) ? V.getOperand(0) - : getUNDEF(VT.getVectorElementType()); - if (V.getOpcode() == ISD::BUILD_VECTOR) - return V.getOperand(Index); - if (const ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(V)) - return getShuffleScalarElt(SVN, Index); - return SDValue(); -} - - /// getNode - Gets or creates the specified node. /// SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT) { @@ -3160,8 +3131,7 @@ static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG, if (Str.empty()) { if (VT.isInteger()) return DAG.getConstant(0, VT); - else if (VT.getSimpleVT().SimpleTy == MVT::f32 || - VT.getSimpleVT().SimpleTy == MVT::f64) + else if (VT == MVT::f32 || VT == MVT::f64) return DAG.getConstantFP(0.0, VT); else if (VT.isVector()) { unsigned NumElts = VT.getVectorNumElements(); @@ -3305,14 +3275,16 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, SDValue Src, uint64_t Size, unsigned Align, bool isVol, bool AlwaysInline, - const Value *DstSV, uint64_t DstSVOff, - const Value *SrcSV, uint64_t SrcSVOff) { + MachinePointerInfo DstPtrInfo, + MachinePointerInfo SrcPtrInfo) { // Turn a memcpy of undef to nop. if (Src.getOpcode() == ISD::UNDEF) return Chain; // Expand memcpy to a series of load and store ops if the size operand falls // below a certain threshold. + // TODO: In the AlwaysInline case, if the size is big then generate a loop + // rather than maybe a humongous number of loads and stores. const TargetLowering &TLI = DAG.getTargetLoweringInfo(); std::vector<EVT> MemOps; bool DstAlignCanChange = false; @@ -3363,7 +3335,8 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, Value = getMemsetStringVal(VT, dl, DAG, TLI, Str, SrcOff); Store = DAG.getStore(Chain, dl, Value, getMemBasePlusOffset(Dst, DstOff, DAG), - DstSV, DstSVOff + DstOff, isVol, false, Align); + DstPtrInfo.getWithOffset(DstOff), isVol, + false, Align); } else { // The type might not be legal for the target. This should only happen // if the type is smaller than a legal type, as on PPC, so the right @@ -3374,12 +3347,12 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, assert(NVT.bitsGE(VT)); Value = DAG.getExtLoad(ISD::EXTLOAD, NVT, dl, Chain, getMemBasePlusOffset(Src, SrcOff, DAG), - SrcSV, SrcSVOff + SrcOff, VT, isVol, false, + SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false, MinAlign(SrcAlign, SrcOff)); Store = DAG.getTruncStore(Chain, dl, Value, getMemBasePlusOffset(Dst, DstOff, DAG), - DstSV, DstSVOff + DstOff, VT, isVol, false, - Align); + DstPtrInfo.getWithOffset(DstOff), VT, isVol, + false, Align); } OutChains.push_back(Store); SrcOff += VTSize; @@ -3395,8 +3368,8 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, SDValue Src, uint64_t Size, unsigned Align, bool isVol, bool AlwaysInline, - const Value *DstSV, uint64_t DstSVOff, - const Value *SrcSV, uint64_t SrcSVOff) { + MachinePointerInfo DstPtrInfo, + MachinePointerInfo SrcPtrInfo) { // Turn a memmove of undef to nop. if (Src.getOpcode() == ISD::UNDEF) return Chain; @@ -3443,7 +3416,8 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, Value = DAG.getLoad(VT, dl, Chain, getMemBasePlusOffset(Src, SrcOff, DAG), - SrcSV, SrcSVOff + SrcOff, isVol, false, SrcAlign); + SrcPtrInfo.getWithOffset(SrcOff), isVol, + false, SrcAlign); LoadValues.push_back(Value); LoadChains.push_back(Value.getValue(1)); SrcOff += VTSize; @@ -3458,7 +3432,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, Store = DAG.getStore(Chain, dl, LoadValues[i], getMemBasePlusOffset(Dst, DstOff, DAG), - DstSV, DstSVOff + DstOff, isVol, false, Align); + DstPtrInfo.getWithOffset(DstOff), isVol, false, Align); OutChains.push_back(Store); DstOff += VTSize; } @@ -3471,7 +3445,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, unsigned Align, bool isVol, - const Value *DstSV, uint64_t DstSVOff) { + MachinePointerInfo DstPtrInfo) { // Turn a memset of undef to nop. if (Src.getOpcode() == ISD::UNDEF) return Chain; @@ -3512,7 +3486,8 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl, SDValue Value = getMemsetValue(Src, VT, DAG, dl); SDValue Store = DAG.getStore(Chain, dl, Value, getMemBasePlusOffset(Dst, DstOff, DAG), - DstSV, DstSVOff + DstOff, isVol, false, 0); + DstPtrInfo.getWithOffset(DstOff), + isVol, false, 0); OutChains.push_back(Store); DstOff += VTSize; } @@ -3524,8 +3499,8 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl, SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, - const Value *DstSV, uint64_t DstSVOff, - const Value *SrcSV, uint64_t SrcSVOff) { + MachinePointerInfo DstPtrInfo, + MachinePointerInfo SrcPtrInfo) { // Check to see if we should lower the memcpy to loads and stores first. // For cases within the target-specified limits, this is the best choice. @@ -3537,7 +3512,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),Align, - isVol, false, DstSV, DstSVOff, SrcSV, SrcSVOff); + isVol, false, DstPtrInfo, SrcPtrInfo); if (Result.getNode()) return Result; } @@ -3547,7 +3522,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Result = TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, - DstSV, DstSVOff, SrcSV, SrcSVOff); + DstPtrInfo, SrcPtrInfo); if (Result.getNode()) return Result; @@ -3557,7 +3532,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, assert(ConstantSize && "AlwaysInline requires a constant size!"); return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Align, isVol, - true, DstSV, DstSVOff, SrcSV, SrcSVOff); + true, DstPtrInfo, SrcPtrInfo); } // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc @@ -3588,8 +3563,8 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, - const Value *DstSV, uint64_t DstSVOff, - const Value *SrcSV, uint64_t SrcSVOff) { + MachinePointerInfo DstPtrInfo, + MachinePointerInfo SrcPtrInfo) { // Check to see if we should lower the memmove to loads and stores first. // For cases within the target-specified limits, this is the best choice. @@ -3602,7 +3577,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Result = getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Align, isVol, - false, DstSV, DstSVOff, SrcSV, SrcSVOff); + false, DstPtrInfo, SrcPtrInfo); if (Result.getNode()) return Result; } @@ -3611,7 +3586,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst, // code. If the target chooses to do this, this is the next best. SDValue Result = TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol, - DstSV, DstSVOff, SrcSV, SrcSVOff); + DstPtrInfo, SrcPtrInfo); if (Result.getNode()) return Result; @@ -3640,7 +3615,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, - const Value *DstSV, uint64_t DstSVOff) { + MachinePointerInfo DstPtrInfo) { // Check to see if we should lower the memset to stores first. // For cases within the target-specified limits, this is the best choice. @@ -3652,7 +3627,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), - Align, isVol, DstSV, DstSVOff); + Align, isVol, DstPtrInfo); if (Result.getNode()) return Result; @@ -3662,7 +3637,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst, // code. If the target chooses to do this, this is the next best. SDValue Result = TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol, - DstSV, DstSVOff); + DstPtrInfo); if (Result.getNode()) return Result; @@ -3698,19 +3673,12 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst, } SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, - SDValue Chain, - SDValue Ptr, SDValue Cmp, - SDValue Swp, const Value* PtrVal, + SDValue Chain, SDValue Ptr, SDValue Cmp, + SDValue Swp, MachinePointerInfo PtrInfo, unsigned Alignment) { if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = getEVTAlignment(MemVT); - // Check if the memory reference references a frame index - if (!PtrVal) - if (const FrameIndexSDNode *FI = - dyn_cast<const FrameIndexSDNode>(Ptr.getNode())) - PtrVal = PseudoSourceValue::getFixedStack(FI->getIndex()); - MachineFunction &MF = getMachineFunction(); unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; @@ -3718,8 +3686,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, Flags |= MachineMemOperand::MOVolatile; MachineMemOperand *MMO = - MF.getMachineMemOperand(PtrVal, Flags, 0, - MemVT.getStoreSize(), Alignment); + MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment); return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO); } @@ -3758,12 +3725,6 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = getEVTAlignment(MemVT); - // Check if the memory reference references a frame index - if (!PtrVal) - if (const FrameIndexSDNode *FI = - dyn_cast<const FrameIndexSDNode>(Ptr.getNode())) - PtrVal = PseudoSourceValue::getFixedStack(FI->getIndex()); - MachineFunction &MF = getMachineFunction(); unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; @@ -3771,7 +3732,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, Flags |= MachineMemOperand::MOVolatile; MachineMemOperand *MMO = - MF.getMachineMemOperand(PtrVal, Flags, 0, + MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, MemVT.getStoreSize(), Alignment); return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO); @@ -3832,18 +3793,18 @@ SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, const EVT *VTs, unsigned NumVTs, const SDValue *Ops, unsigned NumOps, - EVT MemVT, const Value *srcValue, int SVOff, + EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, bool Vol, bool ReadMem, bool WriteMem) { return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps, - MemVT, srcValue, SVOff, Align, Vol, + MemVT, PtrInfo, Align, Vol, ReadMem, WriteMem); } SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList, const SDValue *Ops, unsigned NumOps, - EVT MemVT, const Value *srcValue, int SVOff, + EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, bool Vol, bool ReadMem, bool WriteMem) { if (Align == 0) // Ensure that codegen never sees alignment 0 @@ -3858,8 +3819,7 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList, if (Vol) Flags |= MachineMemOperand::MOVolatile; MachineMemOperand *MMO = - MF.getMachineMemOperand(srcValue, Flags, SVOff, - MemVT.getStoreSize(), Align); + MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align); return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO); } @@ -3870,6 +3830,7 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList, EVT MemVT, MachineMemOperand *MMO) { assert((Opcode == ISD::INTRINSIC_VOID || Opcode == ISD::INTRINSIC_W_CHAIN || + Opcode == ISD::PREFETCH || (Opcode <= INT_MAX && (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && "Opcode is not a memory-accessing opcode!"); @@ -3896,31 +3857,65 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList, return SDValue(N, 0); } +/// InferPointerInfo - If the specified ptr/offset is a frame index, infer a +/// MachinePointerInfo record from it. This is particularly useful because the +/// code generator has many cases where it doesn't bother passing in a +/// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". +static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) { + // If this is FI+Offset, we can model it. + if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) + return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset); + + // If this is (FI+Offset1)+Offset2, we can model it. + if (Ptr.getOpcode() != ISD::ADD || + !isa<ConstantSDNode>(Ptr.getOperand(1)) || + !isa<FrameIndexSDNode>(Ptr.getOperand(0))) + return MachinePointerInfo(); + + int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); + return MachinePointerInfo::getFixedStack(FI, Offset+ + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); +} + +/// InferPointerInfo - If the specified ptr/offset is a frame index, infer a +/// MachinePointerInfo record from it. This is particularly useful because the +/// code generator has many cases where it doesn't bother passing in a +/// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". +static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) { + // If the 'Offset' value isn't a constant, we can't handle this. + if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) + return InferPointerInfo(Ptr, OffsetNode->getSExtValue()); + if (OffsetOp.getOpcode() == ISD::UNDEF) + return InferPointerInfo(Ptr); + return MachinePointerInfo(); +} + + SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr, SDValue Offset, - const Value *SV, int SVOffset, EVT MemVT, + MachinePointerInfo PtrInfo, EVT MemVT, bool isVolatile, bool isNonTemporal, - unsigned Alignment) { + unsigned Alignment, const MDNode *TBAAInfo) { if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = getEVTAlignment(VT); - // Check if the memory reference references a frame index - if (!SV) - if (const FrameIndexSDNode *FI = - dyn_cast<const FrameIndexSDNode>(Ptr.getNode())) - SV = PseudoSourceValue::getFixedStack(FI->getIndex()); - - MachineFunction &MF = getMachineFunction(); unsigned Flags = MachineMemOperand::MOLoad; if (isVolatile) Flags |= MachineMemOperand::MOVolatile; if (isNonTemporal) Flags |= MachineMemOperand::MONonTemporal; + + // If we don't have a PtrInfo, infer the trivial frame index case to simplify + // clients. + if (PtrInfo.V == 0) + PtrInfo = InferPointerInfo(Ptr, Offset); + + MachineFunction &MF = getMachineFunction(); MachineMemOperand *MMO = - MF.getMachineMemOperand(SV, Flags, SVOffset, - MemVT.getStoreSize(), Alignment); + MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, + TBAAInfo); return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); } @@ -3972,25 +3967,26 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, SDValue SelectionDAG::getLoad(EVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr, - const Value *SV, int SVOffset, + MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, - unsigned Alignment) { + unsigned Alignment, const MDNode *TBAAInfo) { SDValue Undef = getUNDEF(Ptr.getValueType()); return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, - SV, SVOffset, VT, isVolatile, isNonTemporal, Alignment); + PtrInfo, VT, isVolatile, isNonTemporal, Alignment, TBAAInfo); } SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, EVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr, - const Value *SV, - int SVOffset, EVT MemVT, + MachinePointerInfo PtrInfo, EVT MemVT, bool isVolatile, bool isNonTemporal, - unsigned Alignment) { + unsigned Alignment, const MDNode *TBAAInfo) { SDValue Undef = getUNDEF(Ptr.getValueType()); return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, - SV, SVOffset, MemVT, isVolatile, isNonTemporal, Alignment); + PtrInfo, MemVT, isVolatile, isNonTemporal, Alignment, + TBAAInfo); } + SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM) { @@ -3998,33 +3994,32 @@ SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base, assert(LD->getOffset().getOpcode() == ISD::UNDEF && "Load is already a indexed load!"); return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, - LD->getChain(), Base, Offset, LD->getSrcValue(), - LD->getSrcValueOffset(), LD->getMemoryVT(), + LD->getChain(), Base, Offset, LD->getPointerInfo(), + LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(), LD->getAlignment()); } SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val, - SDValue Ptr, const Value *SV, int SVOffset, + SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, - unsigned Alignment) { + unsigned Alignment, const MDNode *TBAAInfo) { if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = getEVTAlignment(Val.getValueType()); - // Check if the memory reference references a frame index - if (!SV) - if (const FrameIndexSDNode *FI = - dyn_cast<const FrameIndexSDNode>(Ptr.getNode())) - SV = PseudoSourceValue::getFixedStack(FI->getIndex()); - - MachineFunction &MF = getMachineFunction(); unsigned Flags = MachineMemOperand::MOStore; if (isVolatile) Flags |= MachineMemOperand::MOVolatile; if (isNonTemporal) Flags |= MachineMemOperand::MONonTemporal; + + if (PtrInfo.V == 0) + PtrInfo = InferPointerInfo(Ptr); + + MachineFunction &MF = getMachineFunction(); MachineMemOperand *MMO = - MF.getMachineMemOperand(SV, Flags, SVOffset, - Val.getValueType().getStoreSize(), Alignment); + MF.getMachineMemOperand(PtrInfo, Flags, + Val.getValueType().getStoreSize(), Alignment, + TBAAInfo); return getStore(Chain, dl, Val, Ptr, MMO); } @@ -4053,27 +4048,26 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val, } SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val, - SDValue Ptr, const Value *SV, - int SVOffset, EVT SVT, - bool isVolatile, bool isNonTemporal, - unsigned Alignment) { + SDValue Ptr, MachinePointerInfo PtrInfo, + EVT SVT,bool isVolatile, bool isNonTemporal, + unsigned Alignment, + const MDNode *TBAAInfo) { if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = getEVTAlignment(SVT); - // Check if the memory reference references a frame index - if (!SV) - if (const FrameIndexSDNode *FI = - dyn_cast<const FrameIndexSDNode>(Ptr.getNode())) - SV = PseudoSourceValue::getFixedStack(FI->getIndex()); - - MachineFunction &MF = getMachineFunction(); unsigned Flags = MachineMemOperand::MOStore; if (isVolatile) Flags |= MachineMemOperand::MOVolatile; if (isNonTemporal) Flags |= MachineMemOperand::MONonTemporal; + + if (PtrInfo.V == 0) + PtrInfo = InferPointerInfo(Ptr); + + MachineFunction &MF = getMachineFunction(); MachineMemOperand *MMO = - MF.getMachineMemOperand(SV, Flags, SVOffset, SVT.getStoreSize(), Alignment); + MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment, + TBAAInfo); return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); } @@ -5435,7 +5429,7 @@ const EVT *SDNode::getValueTypeList(EVT VT) { sys::SmartScopedLock<true> Lock(*VTMutex); return &(*EVTs->insert(VT).first); } else { - assert(VT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE && + assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && "Value type out of range!"); return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; } @@ -5507,9 +5501,9 @@ bool SDNode::isOperandOf(SDNode *N) const { /// reachesChainWithoutSideEffects - Return true if this operand (which must /// be a chain) reaches the specified operand without crossing any -/// side-effecting instructions. In practice, this looks through token -/// factors and non-volatile loads. In order to remain efficient, this only -/// looks a couple of nodes in, it does not do an exhaustive search. +/// side-effecting instructions on any chain path. In practice, this looks +/// through token factors and non-volatile loads. In order to remain efficient, +/// this only looks a couple of nodes in, it does not do an exhaustive search. bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth) const { if (*this == Dest) return true; @@ -5519,12 +5513,12 @@ bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, if (Depth == 0) return false; // If this is a token factor, all inputs to the TF happen in parallel. If any - // of the operands of the TF reach dest, then we can do the xform. + // of the operands of the TF does not reach dest, then we cannot do the xform. if (getOpcode() == ISD::TokenFactor) { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) - if (getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1)) - return true; - return false; + if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1)) + return false; + return true; } // Loads don't have side effects, look through them. @@ -5629,6 +5623,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const { case ISD::EH_RETURN: return "EH_RETURN"; case ISD::EH_SJLJ_SETJMP: return "EH_SJLJ_SETJMP"; case ISD::EH_SJLJ_LONGJMP: return "EH_SJLJ_LONGJMP"; + case ISD::EH_SJLJ_DISPATCHSETUP: return "EH_SJLJ_DISPATCHSETUP"; case ISD::ConstantPool: return "ConstantPool"; case ISD::ExternalSymbol: return "ExternalSymbol"; case ISD::BlockAddress: return "BlockAddress"; |