aboutsummaryrefslogtreecommitdiffstats
path: root/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp')
-rw-r--r--lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp117
1 files changed, 74 insertions, 43 deletions
diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index ae124e8..86b9542 100644
--- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -36,6 +36,7 @@
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
@@ -55,58 +56,71 @@ static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
return false;
}
+static ISD::NodeType getPreferredExtendForValue(const Value *V) {
+ // For the users of the source value being used for compare instruction, if
+ // the number of signed predicate is greater than unsigned predicate, we
+ // prefer to use SIGN_EXTEND.
+ //
+ // With this optimization, we would be able to reduce some redundant sign or
+ // zero extension instruction, and eventually more machine CSE opportunities
+ // can be exposed.
+ ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
+ unsigned NumOfSigned = 0, NumOfUnsigned = 0;
+ for (const User *U : V->users()) {
+ if (const auto *CI = dyn_cast<CmpInst>(U)) {
+ NumOfSigned += CI->isSigned();
+ NumOfUnsigned += CI->isUnsigned();
+ }
+ }
+ if (NumOfSigned > NumOfUnsigned)
+ ExtendKind = ISD::SIGN_EXTEND;
+
+ return ExtendKind;
+}
+
void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
SelectionDAG *DAG) {
- const TargetLowering *TLI = TM.getTargetLowering();
-
Fn = &fn;
MF = &mf;
+ TLI = MF->getSubtarget().getTargetLowering();
RegInfo = &MF->getRegInfo();
// Check whether the function can return without sret-demotion.
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI);
CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF,
- Fn->isVarArg(),
- Outs, Fn->getContext());
+ Fn->isVarArg(), Outs, Fn->getContext());
// Initialize the mapping of values to registers. This is only set up for
// instruction values that are used outside of the block that defines
// them.
Function::const_iterator BB = Fn->begin(), EB = Fn->end();
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I)
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
- // Don't fold inalloca allocas or other dynamic allocas into the initial
- // stack frame allocation, even if they are in the entry block.
- if (!AI->isStaticAlloca())
- continue;
-
- if (const ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
- Type *Ty = AI->getAllocatedType();
- uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
- unsigned Align =
- std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty),
- AI->getAlignment());
-
- TySize *= CUI->getZExtValue(); // Get total allocated size.
- if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
-
- StaticAllocaMap[AI] =
- MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI);
- }
- }
-
for (; BB != EB; ++BB)
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
I != E; ++I) {
- // Look for dynamic allocas.
if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
- if (!AI->isStaticAlloca()) {
+ // Static allocas can be folded into the initial stack frame adjustment.
+ if (AI->isStaticAlloca()) {
+ const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize());
+ Type *Ty = AI->getAllocatedType();
+ uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
+ unsigned Align =
+ std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty),
+ AI->getAlignment());
+
+ TySize *= CUI->getZExtValue(); // Get total allocated size.
+ if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
+
+ StaticAllocaMap[AI] =
+ MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI);
+
+ } else {
unsigned Align = std::max(
(unsigned)TLI->getDataLayout()->getPrefTypeAlignment(
AI->getAllocatedType()),
AI->getAlignment());
- unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
+ unsigned StackAlign =
+ MF->getSubtarget().getFrameLowering()->getStackAlignment();
if (Align <= StackAlign)
Align = 0;
// Inform the Frame Information that we have variable-sized objects.
@@ -126,9 +140,9 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
if (Op.Type == InlineAsm::isClobber) {
// Clobbers don't have SDValue operands, hence SDValue().
TLI->ComputeConstraintToUse(Op, SDValue(), DAG);
- std::pair<unsigned, const TargetRegisterClass*> PhysReg =
- TLI->getRegForInlineAsmConstraint(Op.ConstraintCode,
- Op.ConstraintVT);
+ std::pair<unsigned, const TargetRegisterClass *> PhysReg =
+ TLI->getRegForInlineAsmConstraint(Op.ConstraintCode,
+ Op.ConstraintVT);
if (PhysReg.first == SP)
MF->getFrameInfo()->setHasInlineAsmWithSPAdjust(true);
}
@@ -136,6 +150,21 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
}
}
+ // Look for calls to the @llvm.va_start intrinsic. We can omit some
+ // prologue boilerplate for variadic functions that don't examine their
+ // arguments.
+ if (const auto *II = dyn_cast<IntrinsicInst>(I)) {
+ if (II->getIntrinsicID() == Intrinsic::vastart)
+ MF->getFrameInfo()->setHasVAStart(true);
+ }
+
+ // If we have a musttail call in a variadic funciton, we need to ensure we
+ // forward implicit register parameters.
+ if (const auto *CI = dyn_cast<CallInst>(I)) {
+ if (CI->isMustTailCall() && Fn->isVarArg())
+ MF->getFrameInfo()->setHasMustTailInVarArgFunc(true);
+ }
+
// Mark values used outside their block as exported, by allocating
// a virtual register for them.
if (isUsedOutsideOfDefiningBlock(I))
@@ -166,13 +195,16 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
StaticAllocaMap.find(AI);
if (SI != StaticAllocaMap.end()) { // Check for VLAs.
int FI = SI->second;
- MMI.setVariableDbgInfo(DI->getVariable(),
+ MMI.setVariableDbgInfo(DI->getVariable(), DI->getExpression(),
FI, DI->getDebugLoc());
}
}
}
}
}
+
+ // Decide the preferred extend type for a value.
+ PreferredExtendType[I] = getPreferredExtendForValue(I);
}
// Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
@@ -208,7 +240,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
EVT VT = ValueVTs[vti];
unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+ const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
for (unsigned i = 0; i != NumRegisters; ++i)
BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i);
PHIReg += NumRegisters;
@@ -241,12 +273,13 @@ void FunctionLoweringInfo::clear() {
ArgDbgValues.clear();
ByValArgFrameIndexMap.clear();
RegFixups.clear();
+ PreferredExtendType.clear();
}
/// CreateReg - Allocate a single virtual register for the given type.
unsigned FunctionLoweringInfo::CreateReg(MVT VT) {
- return RegInfo->
- createVirtualRegister(TM.getTargetLowering()->getRegClassFor(VT));
+ return RegInfo->createVirtualRegister(
+ MF->getSubtarget().getTargetLowering()->getRegClassFor(VT));
}
/// CreateRegs - Allocate the appropriate number of virtual registers of
@@ -257,7 +290,7 @@ unsigned FunctionLoweringInfo::CreateReg(MVT VT) {
/// will assign registers for each member or element.
///
unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*TLI, Ty, ValueVTs);
@@ -306,8 +339,6 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
if (!Ty->isIntegerTy() || Ty->isVectorTy())
return;
- const TargetLowering *TLI = TM.getTargetLowering();
-
SmallVector<EVT, 1> ValueVTs;
ComputeValueVTs(*TLI, Ty, ValueVTs);
assert(ValueVTs.size() == 1 &&
@@ -452,7 +483,7 @@ void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
// Gather all the type infos for this landing pad and pass them along to
// MachineModuleInfo.
- std::vector<const GlobalVariable *> TyInfo;
+ std::vector<const GlobalValue *> TyInfo;
unsigned N = I.getNumArgOperands();
for (unsigned i = N - 1; i > 1; --i) {
@@ -510,14 +541,14 @@ void llvm::AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI,
Value *Val = I.getClause(i - 1);
if (I.isCatch(i - 1)) {
MMI.addCatchTypeInfo(MBB,
- dyn_cast<GlobalVariable>(Val->stripPointerCasts()));
+ dyn_cast<GlobalValue>(Val->stripPointerCasts()));
} else {
// Add filters in a list.
Constant *CVal = cast<Constant>(Val);
- SmallVector<const GlobalVariable*, 4> FilterList;
+ SmallVector<const GlobalValue*, 4> FilterList;
for (User::op_iterator
II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II)
- FilterList.push_back(cast<GlobalVariable>((*II)->stripPointerCasts()));
+ FilterList.push_back(cast<GlobalValue>((*II)->stripPointerCasts()));
MMI.addFilterTypeInfo(MBB, FilterList);
}