aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/ARM/ARMFastISel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/ARM/ARMFastISel.cpp')
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp272
1 files changed, 205 insertions, 67 deletions
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 1e94ab6..5cf73c4 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -14,6 +14,7 @@
//===----------------------------------------------------------------------===//
#include "ARM.h"
+#include "ARMAddressingModes.h"
#include "ARMBaseInstrInfo.h"
#include "ARMCallingConv.h"
#include "ARMRegisterInfo.h"
@@ -26,6 +27,7 @@
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
+#include "llvm/Operator.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
@@ -69,12 +71,10 @@ namespace {
} Base;
int Offset;
- unsigned Scale;
- unsigned PlusReg;
// Innocuous defaults for our address.
Address()
- : BaseType(RegBase), Offset(0), Scale(0), PlusReg(0) {
+ : BaseType(RegBase), Offset(0) {
Base.Reg = 0;
}
} Address;
@@ -136,6 +136,9 @@ class ARMFastISel : public FastISel {
virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
uint64_t Imm);
+ virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ uint64_t Imm1, uint64_t Imm2);
virtual unsigned FastEmitInst_extractsubreg(MVT RetVT,
unsigned Op0, bool Op0IsKill,
@@ -164,6 +167,7 @@ class ARMFastISel : public FastISel {
bool SelectCall(const Instruction *I);
bool SelectSelect(const Instruction *I);
bool SelectRet(const Instruction *I);
+ bool SelectIntCast(const Instruction *I);
// Utility routines.
private:
@@ -203,7 +207,8 @@ class ARMFastISel : public FastISel {
bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
void AddLoadStoreOperands(EVT VT, Address &Addr,
- const MachineInstrBuilder &MIB);
+ const MachineInstrBuilder &MIB,
+ unsigned Flags);
};
} // end anonymous namespace
@@ -230,16 +235,16 @@ bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {
const TargetInstrDesc &TID = MI->getDesc();
-
+
// If we're a thumb2 or not NEON function we were handled via isPredicable.
if ((TID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
AFI->isThumb2Function())
return false;
-
+
for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i)
if (TID.OpInfo[i].isPredicate())
return true;
-
+
return false;
}
@@ -257,7 +262,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
// we're not predicable but add it anyways.
if (TII.isPredicable(MI) || isARMNEONPred(MI))
AddDefaultPred(MIB);
-
+
// Do we optionally set a predicate? Preds is size > 0 iff the predicate
// defines CPSR. All other OptionalDefines in ARM are the CCR register.
bool CPSR = false;
@@ -433,6 +438,26 @@ unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
return ResultReg;
}
+unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ uint64_t Imm1, uint64_t Imm2) {
+ unsigned ResultReg = createResultReg(RC);
+ const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+
+ if (II.getNumDefs() >= 1)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ .addImm(Imm1).addImm(Imm2));
+ else {
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addImm(Imm1).addImm(Imm2));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::COPY),
+ ResultReg)
+ .addReg(II.ImplicitDefs[0]));
+ }
+ return ResultReg;
+}
+
unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
unsigned Op0, bool Op0IsKill,
uint32_t Idx) {
@@ -552,9 +577,6 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
Reloc::Model RelocM = TM.getRelocationModel();
- // TODO: No external globals for now.
- if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) return 0;
-
// TODO: Need more magic for ARM PIC.
if (!isThumb && (RelocM == Reloc::PIC_)) return 0;
@@ -589,6 +611,23 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
.addImm(0);
}
AddOptionalDefs(MIB);
+
+ if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) {
+ unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
+ if (isThumb)
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::t2LDRi12),
+ NewDestReg)
+ .addReg(DestReg)
+ .addImm(0);
+ else
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12),
+ NewDestReg)
+ .addReg(DestReg)
+ .addImm(0);
+ DestReg = NewDestReg;
+ AddOptionalDefs(MIB);
+ }
+
return DestReg;
}
@@ -727,7 +766,7 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()]
== FuncInfo.MBB) &&
isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
- // An add (in the same block) with a constant operand. Fold the
+ // An add (in the same block) with a constant operand. Fold the
// constant.
ConstantInt *CI =
cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
@@ -735,7 +774,7 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
// Iterate on the other operand.
Op = cast<AddOperator>(Op)->getOperand(0);
continue;
- }
+ }
// Unsupported
goto unsupported_gep;
}
@@ -821,37 +860,21 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) {
// Since the offset is too large for the load/store instruction
// get the reg+offset into a register.
if (needsLowering) {
- ARMCC::CondCodes Pred = ARMCC::AL;
- unsigned PredReg = 0;
-
- TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass :
- ARM::GPRRegisterClass;
- unsigned BaseReg = createResultReg(RC);
-
- if (!isThumb)
- emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- BaseReg, Addr.Base.Reg, Addr.Offset,
- Pred, PredReg,
- static_cast<const ARMBaseInstrInfo&>(TII));
- else {
- assert(AFI->isThumb2Function());
- emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- BaseReg, Addr.Base.Reg, Addr.Offset, Pred, PredReg,
- static_cast<const ARMBaseInstrInfo&>(TII));
- }
+ Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
+ /*Op0IsKill*/false, Addr.Offset, MVT::i32);
Addr.Offset = 0;
- Addr.Base.Reg = BaseReg;
}
}
void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
- const MachineInstrBuilder &MIB) {
+ const MachineInstrBuilder &MIB,
+ unsigned Flags) {
// addrmode5 output depends on the selection dag addressing dividing the
// offset by 4 that it then later multiplies. Do this here as well.
if (VT.getSimpleVT().SimpleTy == MVT::f32 ||
VT.getSimpleVT().SimpleTy == MVT::f64)
Addr.Offset /= 4;
-
+
// Frame base works a bit differently. Handle it separately.
if (Addr.BaseType == Address::FrameIndexBase) {
int FI = Addr.Base.FI;
@@ -859,7 +882,7 @@ void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
MachineMemOperand *MMO =
FuncInfo.MF->getMachineMemOperand(
MachinePointerInfo::getFixedStack(FI, Offset),
- MachineMemOperand::MOLoad,
+ Flags,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
// Now add the rest of the operands.
@@ -873,7 +896,7 @@ void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
} else {
// Now add the rest of the operands.
MIB.addReg(Addr.Base.Reg);
-
+
// ARM halfword load/stores need an additional operand.
if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0);
@@ -918,7 +941,7 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) {
ResultReg = createResultReg(RC);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg);
- AddLoadStoreOperands(VT, Addr, MIB);
+ AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad);
return true;
}
@@ -977,7 +1000,7 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) {
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(StrOpc))
.addReg(SrcReg, getKillRegState(true));
- AddLoadStoreOperands(VT, Addr, MIB);
+ AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore);
return true;
}
@@ -1061,18 +1084,16 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
// behavior.
// TODO: Factor this out.
if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
- if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
- MVT VT;
- const Type *Ty = CI->getOperand(0)->getType();
- if (!isTypeLegal(Ty, VT))
- return false;
-
+ MVT SourceVT;
+ const Type *Ty = CI->getOperand(0)->getType();
+ if (CI->hasOneUse() && (CI->getParent() == I->getParent())
+ && isTypeLegal(Ty, SourceVT)) {
bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
if (isFloat && !Subtarget->hasVFP2())
return false;
unsigned CmpOpc;
- switch (VT.SimpleTy) {
+ switch (SourceVT.SimpleTy) {
default: return false;
// TODO: Verify compares.
case MVT::f32:
@@ -1087,7 +1108,14 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
}
// Get the compare predicate.
- ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
+ // Try to take advantage of fallthrough opportunities.
+ CmpInst::Predicate Predicate = CI->getPredicate();
+ if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
+ std::swap(TBB, FBB);
+ Predicate = CmpInst::getInversePredicate(Predicate);
+ }
+
+ ARMCC::CondCodes ARMPred = getComparePred(Predicate);
// We may not handle every CC for now.
if (ARMPred == ARMCC::AL) return false;
@@ -1115,19 +1143,55 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
FuncInfo.MBB->addSuccessor(TBB);
return true;
}
+ } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
+ MVT SourceVT;
+ if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
+ (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
+ unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri;
+ unsigned OpReg = getRegForValue(TI->getOperand(0));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TstOpc))
+ .addReg(OpReg).addImm(1));
+
+ unsigned CCMode = ARMCC::NE;
+ if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
+ std::swap(TBB, FBB);
+ CCMode = ARMCC::EQ;
+ }
+
+ unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
+ .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
+
+ FastEmitBranch(FBB, DL);
+ FuncInfo.MBB->addSuccessor(TBB);
+ return true;
+ }
}
unsigned CmpReg = getRegForValue(BI->getCondition());
if (CmpReg == 0) return false;
- // Re-set the flags just in case.
- unsigned CmpOpc = isThumb ? ARM::t2CMPri : ARM::CMPri;
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
- .addReg(CmpReg).addImm(0));
+ // We've been divorced from our compare! Our block was split, and
+ // now our compare lives in a predecessor block. We musn't
+ // re-compare here, as the children of the compare aren't guaranteed
+ // live across the block boundary (we *could* check for this).
+ // Regardless, the compare has been done in the predecessor block,
+ // and it left a value for us in a virtual register. Ergo, we test
+ // the one-bit value left in the virtual register.
+ unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri;
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc))
+ .addReg(CmpReg).addImm(1));
+
+ unsigned CCMode = ARMCC::NE;
+ if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
+ std::swap(TBB, FBB);
+ CCMode = ARMCC::EQ;
+ }
unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
- .addMBB(TBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
+ .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
FastEmitBranch(FBB, DL);
FuncInfo.MBB->addSuccessor(TBB);
return true;
@@ -1249,6 +1313,10 @@ bool ARMFastISel::SelectSIToFP(const Instruction *I) {
if (!isTypeLegal(Ty, DstVT))
return false;
+ // FIXME: Handle sign-extension where necessary.
+ if (!I->getOperand(0)->getType()->isIntegerTy(32))
+ return false;
+
unsigned Op = getRegForValue(I->getOperand(0));
if (Op == 0) return false;
@@ -1474,7 +1542,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
CallingConv::ID CC,
unsigned &NumBytes) {
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CC, false, TM, ArgLocs, *Context);
+ CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context);
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false));
// Get a count of how many bytes are to be pushed on the stack.
@@ -1587,7 +1655,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
// Now the return value.
if (RetVT != MVT::isVoid) {
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CC, false, TM, RVLocs, *Context);
+ CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context);
CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true));
// Copy all of the result registers out of their specified physreg.
@@ -1643,7 +1711,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ValLocs;
- CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext());
+ CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs, I->getContext());
CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */));
const Value *RV = Ret->getOperand(0);
@@ -1717,9 +1785,6 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
else if (!isTypeLegal(RetTy, RetVT))
return false;
- // For now we're using BLX etc on the assumption that we have v5t ops.
- if (!Subtarget->hasV5TOps()) return false;
-
// TODO: For now if we have long calls specified we don't handle the call.
if (EnableARMLongCalls) return false;
@@ -1757,7 +1822,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
return false;
- // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops.
+ // Issue the call, BLr9 for darwin, BL otherwise.
// TODO: Turn this into the table of arm call ops.
MachineInstrBuilder MIB;
unsigned CallOpc = ARMSelectCallOp(NULL);
@@ -1793,9 +1858,9 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
// Can't handle inline asm or worry about intrinsics yet.
if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false;
- // Only handle global variable Callees that are direct calls.
+ // Only handle global variable Callees.
const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
- if (!GV || Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel()))
+ if (!GV)
return false;
// Check the calling convention.
@@ -1818,13 +1883,9 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
else if (!isTypeLegal(RetTy, RetVT))
return false;
- // For now we're using BLX etc on the assumption that we have v5t ops.
- // TODO: Maybe?
- if (!Subtarget->hasV5TOps()) return false;
-
// TODO: For now if we have long calls specified we don't handle the call.
if (EnableARMLongCalls) return false;
-
+
// Set up the argument vectors.
SmallVector<Value*, 8> Args;
SmallVector<unsigned, 8> ArgRegs;
@@ -1873,7 +1934,7 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
return false;
- // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops.
+ // Issue the call, BLr9 for darwin, BL otherwise.
// TODO: Turn this into the table of arm call ops.
MachineInstrBuilder MIB;
unsigned CallOpc = ARMSelectCallOp(GV);
@@ -1888,7 +1949,7 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(CallOpc))
.addGlobalAddress(GV, 0, 0));
-
+
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
MIB.addReg(RegArgs[i]);
@@ -1904,6 +1965,79 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
}
+bool ARMFastISel::SelectIntCast(const Instruction *I) {
+ // On ARM, in general, integer casts don't involve legal types; this code
+ // handles promotable integers. The high bits for a type smaller than
+ // the register size are assumed to be undefined.
+ const Type *DestTy = I->getType();
+ Value *Op = I->getOperand(0);
+ const Type *SrcTy = Op->getType();
+
+ EVT SrcVT, DestVT;
+ SrcVT = TLI.getValueType(SrcTy, true);
+ DestVT = TLI.getValueType(DestTy, true);
+
+ if (isa<TruncInst>(I)) {
+ if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
+ return false;
+ if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
+ return false;
+
+ unsigned SrcReg = getRegForValue(Op);
+ if (!SrcReg) return false;
+
+ // Because the high bits are undefined, a truncate doesn't generate
+ // any code.
+ UpdateValueMap(I, SrcReg);
+ return true;
+ }
+ if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
+ return false;
+
+ unsigned Opc;
+ bool isZext = isa<ZExtInst>(I);
+ bool isBoolZext = false;
+ if (!SrcVT.isSimple())
+ return false;
+ switch (SrcVT.getSimpleVT().SimpleTy) {
+ default: return false;
+ case MVT::i16:
+ if (isZext)
+ Opc = isThumb ? ARM::t2UXTHr : ARM::UXTHr;
+ else
+ Opc = isThumb ? ARM::t2SXTHr : ARM::SXTHr;
+ break;
+ case MVT::i8:
+ if (isZext)
+ Opc = isThumb ? ARM::t2UXTBr : ARM::UXTBr;
+ else
+ Opc = isThumb ? ARM::t2SXTBr : ARM::SXTBr;
+ break;
+ case MVT::i1:
+ if (isZext) {
+ Opc = isThumb ? ARM::t2ANDri : ARM::ANDri;
+ isBoolZext = true;
+ break;
+ }
+ return false;
+ }
+
+ // FIXME: We could save an instruction in many cases by special-casing
+ // load instructions.
+ unsigned SrcReg = getRegForValue(Op);
+ if (!SrcReg) return false;
+
+ unsigned DestReg = createResultReg(TLI.getRegClassFor(MVT::i32));
+ MachineInstrBuilder MIB;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ .addReg(SrcReg);
+ if (isBoolZext)
+ MIB.addImm(1);
+ AddOptionalDefs(MIB);
+ UpdateValueMap(I, DestReg);
+ return true;
+}
+
// TODO: SoftFP support.
bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
@@ -1941,6 +2075,10 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
return SelectSelect(I);
case Instruction::Ret:
return SelectRet(I);
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ return SelectIntCast(I);
default: break;
}
return false;