aboutsummaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
authorOwen Anderson <resistor@mac.com>2006-05-03 01:29:57 +0000
committerOwen Anderson <resistor@mac.com>2006-05-03 01:29:57 +0000
commita69571c7991813c93cba64e88eced6899ce93d81 (patch)
tree06bc81338c35527b69a6e8e7434e7c1a824bc4ca /lib/CodeGen
parent0eb4d6b52e1b5db9a4c86e5a954356ae3507a287 (diff)
downloadexternal_llvm-a69571c7991813c93cba64e88eced6899ce93d81.zip
external_llvm-a69571c7991813c93cba64e88eced6899ce93d81.tar.gz
external_llvm-a69571c7991813c93cba64e88eced6899ce93d81.tar.bz2
Refactor TargetMachine, pushing handling of TargetData into the target-specific subclasses. This has one caller-visible change: getTargetData() now returns a pointer instead of a reference.
This fixes PR 759. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@28074 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/AsmPrinter.cpp32
-rw-r--r--lib/CodeGen/DwarfWriter.cpp4
-rw-r--r--lib/CodeGen/ELFWriter.cpp8
-rw-r--r--lib/CodeGen/MachineFunction.cpp6
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp6
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAG.cpp4
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp38
-rw-r--r--lib/CodeGen/SelectionDAG/TargetLowering.cpp4
8 files changed, 51 insertions, 51 deletions
diff --git a/lib/CodeGen/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter.cpp
index 1175ac6..e1a4cf6 100644
--- a/lib/CodeGen/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter.cpp
@@ -144,7 +144,7 @@ void AsmPrinter::SetupMachineFunction(MachineFunction &MF) {
void AsmPrinter::EmitConstantPool(MachineConstantPool *MCP) {
const std::vector<MachineConstantPoolEntry> &CP = MCP->getConstants();
if (CP.empty()) return;
- const TargetData &TD = TM.getTargetData();
+ const TargetData *TD = TM.getTargetData();
SwitchSection(ConstantPoolSection, 0);
EmitAlignment(MCP->getConstantPoolAlignment());
@@ -154,7 +154,7 @@ void AsmPrinter::EmitConstantPool(MachineConstantPool *MCP) {
WriteTypeSymbolic(O, CP[i].Val->getType(), 0) << '\n';
EmitGlobalConstant(CP[i].Val);
if (i != e-1) {
- unsigned EntSize = TM.getTargetData().getTypeSize(CP[i].Val->getType());
+ unsigned EntSize = TM.getTargetData()->getTypeSize(CP[i].Val->getType());
unsigned ValEnd = CP[i].Offset + EntSize;
// Emit inter-object padding for alignment.
EmitZeros(CP[i+1].Offset-ValEnd);
@@ -168,7 +168,7 @@ void AsmPrinter::EmitConstantPool(MachineConstantPool *MCP) {
void AsmPrinter::EmitJumpTableInfo(MachineJumpTableInfo *MJTI) {
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
if (JT.empty()) return;
- const TargetData &TD = TM.getTargetData();
+ const TargetData *TD = TM.getTargetData();
// FIXME: someday we need to handle PIC jump tables
assert((TM.getRelocationModel() == Reloc::Static ||
@@ -176,7 +176,7 @@ void AsmPrinter::EmitJumpTableInfo(MachineJumpTableInfo *MJTI) {
"Unhandled relocation model emitting jump table information!");
SwitchSection(JumpTableSection, 0);
- EmitAlignment(Log2_32(TD.getPointerAlignment()));
+ EmitAlignment(Log2_32(TD->getPointerAlignment()));
for (unsigned i = 0, e = JT.size(); i != e; ++i) {
O << PrivateGlobalPrefix << "JTI" << getFunctionNumber() << '_' << i
<< ":\n";
@@ -242,7 +242,7 @@ void AsmPrinter::EmitXXStructorList(Constant *List) {
/// specified global, returned in log form. This includes an explicitly
/// requested alignment (if the global has one).
unsigned AsmPrinter::getPreferredAlignmentLog(const GlobalVariable *GV) const {
- unsigned Alignment = TM.getTargetData().getTypeAlignmentShift(GV->getType());
+ unsigned Alignment = TM.getTargetData()->getTypeAlignmentShift(GV->getType());
if (GV->getAlignment() > (1U << Alignment))
Alignment = Log2_32(GV->getAlignment());
@@ -253,7 +253,7 @@ unsigned AsmPrinter::getPreferredAlignmentLog(const GlobalVariable *GV) const {
if (Alignment < 4) {
// If the global is not external, see if it is large. If so, give it a
// larger alignment.
- if (TM.getTargetData().getTypeSize(GV->getType()->getElementType()) > 128)
+ if (TM.getTargetData()->getTypeSize(GV->getType()->getElementType()) > 128)
Alignment = 4; // 16-byte alignment.
}
}
@@ -310,13 +310,13 @@ void AsmPrinter::EmitConstantValueOnly(const Constant *CV) {
else
O << GlobalVarAddrPrefix << Mang->getValueName(GV) << GlobalVarAddrSuffix;
} else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
- const TargetData &TD = TM.getTargetData();
+ const TargetData *TD = TM.getTargetData();
switch(CE->getOpcode()) {
case Instruction::GetElementPtr: {
// generate a symbolic expression for the byte address
const Constant *ptrVal = CE->getOperand(0);
std::vector<Value*> idxVec(CE->op_begin()+1, CE->op_end());
- if (int64_t Offset = TD.getIndexedOffset(ptrVal->getType(), idxVec)) {
+ if (int64_t Offset = TD->getIndexedOffset(ptrVal->getType(), idxVec)) {
if (Offset)
O << "(";
EmitConstantValueOnly(ptrVal);
@@ -344,7 +344,7 @@ void AsmPrinter::EmitConstantValueOnly(const Constant *CV) {
|| (isa<PointerType>(Ty)
&& (OpTy == Type::LongTy || OpTy == Type::ULongTy
|| OpTy == Type::IntTy || OpTy == Type::UIntTy))
- || (((TD.getTypeSize(Ty) >= TD.getTypeSize(OpTy))
+ || (((TD->getTypeSize(Ty) >= TD->getTypeSize(OpTy))
&& OpTy->isLosslesslyConvertibleTo(Ty))))
&& "FIXME: Don't yet support this kind of constant cast expr");
EmitConstantValueOnly(Op);
@@ -426,10 +426,10 @@ void AsmPrinter::EmitString(const ConstantArray *CVA) const {
/// EmitGlobalConstant - Print a general LLVM constant to the .s file.
///
void AsmPrinter::EmitGlobalConstant(const Constant *CV) {
- const TargetData &TD = TM.getTargetData();
+ const TargetData *TD = TM.getTargetData();
if (CV->isNullValue() || isa<UndefValue>(CV)) {
- EmitZeros(TD.getTypeSize(CV->getType()));
+ EmitZeros(TD->getTypeSize(CV->getType()));
return;
} else if (const ConstantArray *CVA = dyn_cast<ConstantArray>(CV)) {
if (CVA->isString()) {
@@ -441,13 +441,13 @@ void AsmPrinter::EmitGlobalConstant(const Constant *CV) {
return;
} else if (const ConstantStruct *CVS = dyn_cast<ConstantStruct>(CV)) {
// Print the fields in successive locations. Pad to align if needed!
- const StructLayout *cvsLayout = TD.getStructLayout(CVS->getType());
+ const StructLayout *cvsLayout = TD->getStructLayout(CVS->getType());
uint64_t sizeSoFar = 0;
for (unsigned i = 0, e = CVS->getNumOperands(); i != e; ++i) {
const Constant* field = CVS->getOperand(i);
// Check if padding is needed and insert one or more 0s.
- uint64_t fieldSize = TD.getTypeSize(field->getType());
+ uint64_t fieldSize = TD->getTypeSize(field->getType());
uint64_t padSize = ((i == e-1? cvsLayout->StructSize
: cvsLayout->MemberOffsets[i+1])
- cvsLayout->MemberOffsets[i]) - fieldSize;
@@ -470,7 +470,7 @@ void AsmPrinter::EmitGlobalConstant(const Constant *CV) {
if (Data64bitsDirective)
O << Data64bitsDirective << DoubleToBits(Val) << "\t" << CommentString
<< " double value: " << Val << "\n";
- else if (TD.isBigEndian()) {
+ else if (TD->isBigEndian()) {
O << Data32bitsDirective << unsigned(DoubleToBits(Val) >> 32)
<< "\t" << CommentString << " double most significant word "
<< Val << "\n";
@@ -497,7 +497,7 @@ void AsmPrinter::EmitGlobalConstant(const Constant *CV) {
if (Data64bitsDirective)
O << Data64bitsDirective << Val << "\n";
- else if (TD.isBigEndian()) {
+ else if (TD->isBigEndian()) {
O << Data32bitsDirective << unsigned(Val >> 32)
<< "\t" << CommentString << " Double-word most significant word "
<< Val << "\n";
@@ -533,7 +533,7 @@ void AsmPrinter::EmitGlobalConstant(const Constant *CV) {
O << Data16bitsDirective;
break;
case Type::PointerTyID:
- if (TD.getPointerSize() == 8) {
+ if (TD->getPointerSize() == 8) {
O << Data64bitsDirective;
break;
}
diff --git a/lib/CodeGen/DwarfWriter.cpp b/lib/CodeGen/DwarfWriter.cpp
index bb41dc0..b700618 100644
--- a/lib/CodeGen/DwarfWriter.cpp
+++ b/lib/CodeGen/DwarfWriter.cpp
@@ -1075,7 +1075,7 @@ void DwarfWriter::EmitInt64(uint64_t Value) const {
if (Asm->Data64bitsDirective) {
O << Asm->Data64bitsDirective << "0x" << std::hex << Value << std::dec;
} else {
- if (TD.isBigEndian()) {
+ if (TD->isBigEndian()) {
EmitInt32(unsigned(Value >> 32)); O << "\n";
EmitInt32(unsigned(Value));
} else {
@@ -1361,7 +1361,7 @@ DIE *DwarfWriter::NewType(DIE *Context, TypeDesc *TyDesc, CompileUnit *Unit) {
Offset -= FieldOffset;
// Maybe we need to work from the other end.
- if (TD.isLittleEndian()) Offset = FieldSize - (Offset + Size);
+ if (TD->isLittleEndian()) Offset = FieldSize - (Offset + Size);
Member->AddUInt(DW_AT_byte_size, 0, FieldSize >> 3);
Member->AddUInt(DW_AT_bit_size, 0, Size);
diff --git a/lib/CodeGen/ELFWriter.cpp b/lib/CodeGen/ELFWriter.cpp
index 780ba54..029cfe2 100644
--- a/lib/CodeGen/ELFWriter.cpp
+++ b/lib/CodeGen/ELFWriter.cpp
@@ -158,8 +158,8 @@ ELFWriter::ELFWriter(std::ostream &o, TargetMachine &tm) : O(o), TM(tm) {
e_machine = 0; // e_machine defaults to 'No Machine'
e_flags = 0; // e_flags defaults to 0, no flags.
- is64Bit = TM.getTargetData().getPointerSizeInBits() == 64;
- isLittleEndian = TM.getTargetData().isLittleEndian();
+ is64Bit = TM.getTargetData()->getPointerSizeInBits() == 64;
+ isLittleEndian = TM.getTargetData()->isLittleEndian();
// Create the machine code emitter object for this target.
MCE = new ELFCodeEmitter(*this);
@@ -233,8 +233,8 @@ void ELFWriter::EmitGlobal(GlobalVariable *GV) {
}
const Type *GVType = (const Type*)GV->getType();
- unsigned Align = TM.getTargetData().getTypeAlignment(GVType);
- unsigned Size = TM.getTargetData().getTypeSize(GVType);
+ unsigned Align = TM.getTargetData()->getTypeAlignment(GVType);
+ unsigned Size = TM.getTargetData()->getTypeSize(GVType);
// If this global has a zero initializer, it is part of the .bss or common
// section.
diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp
index 6d66839..02646de 100644
--- a/lib/CodeGen/MachineFunction.cpp
+++ b/lib/CodeGen/MachineFunction.cpp
@@ -367,11 +367,11 @@ void MachineJumpTableInfo::print(std::ostream &OS) const {
}
unsigned MachineJumpTableInfo::getEntrySize() const {
- return TD.getPointerSize();
+ return TD->getPointerSize();
}
unsigned MachineJumpTableInfo::getAlignment() const {
- return TD.getPointerAlignment();
+ return TD->getPointerAlignment();
}
void MachineJumpTableInfo::dump() const { print(std::cerr); }
@@ -400,7 +400,7 @@ unsigned MachineConstantPool::getConstantPoolIndex(Constant *C,
unsigned Offset = 0;
if (!Constants.empty()) {
Offset = Constants.back().Offset;
- Offset += TD.getTypeSize(Constants.back().Val->getType());
+ Offset += TD->getTypeSize(Constants.back().Val->getType());
Offset = (Offset+AlignMask)&~AlignMask;
}
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 6f9e977..45c686b 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -1986,7 +1986,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) {
// Otherwise, the target does not support this operation. Lower the
// operation to an explicit libcall as appropriate.
MVT::ValueType IntPtr = TLI.getPointerTy();
- const Type *IntPtrTy = TLI.getTargetData().getIntPtrType();
+ const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType();
std::vector<std::pair<SDOperand, const Type*> > Args;
const char *FnName = 0;
@@ -2781,8 +2781,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) {
// slots and always reusing the same one. We currently always create
// new ones, as reuse may inhibit scheduling.
const Type *Ty = MVT::getTypeForValueType(ExtraVT);
- unsigned TySize = (unsigned)TLI.getTargetData().getTypeSize(Ty);
- unsigned Align = TLI.getTargetData().getTypeAlignment(Ty);
+ unsigned TySize = (unsigned)TLI.getTargetData()->getTypeSize(Ty);
+ unsigned Align = TLI.getTargetData()->getTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction();
int SSFI =
MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp
index bf95a92..3509b16 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp
@@ -130,10 +130,10 @@ void ScheduleDAG::AddOperand(MachineInstr *MI, SDOperand Op,
Align = 3; // always 8-byte align doubles.
else {
Align = TM.getTargetData()
- .getTypeAlignmentShift(CP->get()->getType());
+ ->getTypeAlignmentShift(CP->get()->getType());
if (Align == 0) {
// Alignment of packed types. FIXME!
- Align = TM.getTargetData().getTypeSize(CP->get()->getType());
+ Align = TM.getTargetData()->getTypeSize(CP->get()->getType());
Align = Log2_64(Align);
}
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index d3991a6..c4ba642 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -225,9 +225,9 @@ FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
const Type *Ty = AI->getAllocatedType();
- uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
unsigned Align =
- std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
+ std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
AI->getAlignment());
// If the alignment of the value is smaller than the size of the value,
@@ -394,7 +394,7 @@ public:
// implemented with a libcall, etc.
TargetLowering &TLI;
SelectionDAG &DAG;
- const TargetData &TD;
+ const TargetData *TD;
/// SwitchCases - Vector of CaseBlock structures used to communicate
/// SwitchInst code generation information.
@@ -1202,7 +1202,7 @@ void SelectionDAGLowering::visitShuffleVector(User &I) {
void SelectionDAGLowering::visitGetElementPtr(User &I) {
SDOperand N = getValue(I.getOperand(0));
const Type *Ty = I.getOperand(0)->getType();
- const Type *UIntPtrTy = TD.getIntPtrType();
+ const Type *UIntPtrTy = TD->getIntPtrType();
for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
OI != E; ++OI) {
@@ -1211,7 +1211,7 @@ void SelectionDAGLowering::visitGetElementPtr(User &I) {
unsigned Field = cast<ConstantUInt>(Idx)->getValue();
if (Field) {
// N = N + Offset
- uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field];
+ uint64_t Offset = TD->getStructLayout(StTy)->MemberOffsets[Field];
N = DAG.getNode(ISD::ADD, N.getValueType(), N,
getIntPtrConstant(Offset));
}
@@ -1225,15 +1225,15 @@ void SelectionDAGLowering::visitGetElementPtr(User &I) {
uint64_t Offs;
if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
- Offs = (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
+ Offs = (int64_t)TD->getTypeSize(Ty)*CSI->getValue();
else
- Offs = TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
+ Offs = TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
continue;
}
// N = N + Idx * ElementSize;
- uint64_t ElementSize = TD.getTypeSize(Ty);
+ uint64_t ElementSize = TD->getTypeSize(Ty);
SDOperand IdxN = getValue(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend
@@ -1271,8 +1271,8 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
return; // getValue will auto-populate this.
const Type *Ty = I.getAllocatedType();
- uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
- unsigned Align = std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
+ uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
+ unsigned Align = std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
I.getAlignment());
SDOperand AllocSize = getValue(I.getArraySize());
@@ -2267,12 +2267,12 @@ void SelectionDAGLowering::visitMalloc(MallocInst &I) {
Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
// Scale the source by the type size.
- uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType());
+ uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType());
Src = DAG.getNode(ISD::MUL, Src.getValueType(),
Src, getIntPtrConstant(ElementSize));
std::vector<std::pair<SDOperand, const Type*> > Args;
- Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType()));
+ Args.push_back(std::make_pair(Src, TLI.getTargetData()->getIntPtrType()));
std::pair<SDOperand,SDOperand> Result =
TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
@@ -2285,7 +2285,7 @@ void SelectionDAGLowering::visitMalloc(MallocInst &I) {
void SelectionDAGLowering::visitFree(FreeInst &I) {
std::vector<std::pair<SDOperand, const Type*> > Args;
Args.push_back(std::make_pair(getValue(I.getOperand(0)),
- TLI.getTargetData().getIntPtrType()));
+ TLI.getTargetData()->getIntPtrType()));
MVT::ValueType IntPtr = TLI.getPointerTy();
std::pair<SDOperand,SDOperand> Result =
TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
@@ -2766,7 +2766,7 @@ static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction *GEPI,
/// stores that use it. In this case, decompose the GEP and move constant
/// indices into blocks that use it.
static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
- const TargetData &TD) {
+ const TargetData *TD) {
// If this GEP is only used inside the block it is defined in, there is no
// need to rewrite it.
bool isUsedOutsideDefBB = false;
@@ -2797,7 +2797,7 @@ static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
// Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
// constant offset (which we now know is non-zero) and deal with it later.
uint64_t ConstantOffset = 0;
- const Type *UIntPtrTy = TD.getIntPtrType();
+ const Type *UIntPtrTy = TD->getIntPtrType();
Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
const Type *Ty = GEPI->getOperand(0)->getType();
@@ -2807,7 +2807,7 @@ static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
unsigned Field = cast<ConstantUInt>(Idx)->getValue();
if (Field)
- ConstantOffset += TD.getStructLayout(StTy)->MemberOffsets[Field];
+ ConstantOffset += TD->getStructLayout(StTy)->MemberOffsets[Field];
Ty = StTy->getElementType(Field);
} else {
Ty = cast<SequentialType>(Ty)->getElementType();
@@ -2817,9 +2817,9 @@ static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
if (CI->getRawValue() == 0) continue;
if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
- ConstantOffset += (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
+ ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CSI->getValue();
else
- ConstantOffset+=TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
+ ConstantOffset+=TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
continue;
}
@@ -2828,7 +2828,7 @@ static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
// Cast Idx to UIntPtrTy if needed.
Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
- uint64_t ElementSize = TD.getTypeSize(Ty);
+ uint64_t ElementSize = TD->getTypeSize(Ty);
// Mask off bits that should not be set.
ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize);
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 392b489..041d91e 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -27,8 +27,8 @@ TargetLowering::TargetLowering(TargetMachine &tm)
// All operations default to being supported.
memset(OpActions, 0, sizeof(OpActions));
- IsLittleEndian = TD.isLittleEndian();
- ShiftAmountTy = SetCCResultTy = PointerTy = getValueType(TD.getIntPtrType());
+ IsLittleEndian = TD->isLittleEndian();
+ ShiftAmountTy = SetCCResultTy = PointerTy = getValueType(TD->getIntPtrType());
ShiftAmtHandling = Undefined;
memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
memset(TargetDAGCombineArray, 0,