aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp10
-rw-r--r--lib/Target/ARM/ARMISelLowering.h2
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp13
-rw-r--r--lib/Target/X86/X86ISelLowering.h2
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp103
5 files changed, 4 insertions, 126 deletions
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 891ea62..35a9bf7 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -1805,16 +1805,6 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const {
return HasRet;
}
-bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
- if (!EnableARMTailCalls)
- return false;
-
- if (!CI->isTailCall())
- return false;
-
- return !Subtarget->isThumb1Only();
-}
-
// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
// one of the above mentioned nodes. It has to be wrapped because otherwise
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index e09c1da..402e1c6 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -457,8 +457,6 @@ namespace llvm {
virtual bool isUsedByReturnOnly(SDNode *N) const;
- virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
-
SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const;
SDValue getVFPCmp(SDValue LHS, SDValue RHS,
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 58acf4f..576c879 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -45,7 +45,6 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/VectorExtras.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
@@ -1596,18 +1595,6 @@ static bool IsTailCallConvention(CallingConv::ID CC) {
return (CC == CallingConv::Fast || CC == CallingConv::GHC);
}
-bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
- if (!CI->isTailCall())
- return false;
-
- CallSite CS(CI);
- CallingConv::ID CalleeCC = CS.getCallingConv();
- if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
- return false;
-
- return true;
-}
-
/// FuncIsMadeTailCallSafe - Return true if the function is being made into
/// a tailcall target by changing its ABI.
static bool FuncIsMadeTailCallSafe(CallingConv::ID CC) {
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 6301057..7c1b13a 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -843,8 +843,6 @@ namespace llvm {
virtual bool isUsedByReturnOnly(SDNode *N) const;
- virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
-
virtual EVT
getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
ISD::NodeType ExtendKind) const;
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp
index eb80f5c..f0babcc 100644
--- a/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -47,17 +47,16 @@ using namespace llvm;
using namespace llvm::PatternMatch;
STATISTIC(NumBlocksElim, "Number of blocks eliminated");
-STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
-STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
+STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
+STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
"sunken Cmps");
STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
"of sunken Casts");
STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
"computations were sunk");
-STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
-STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
-STATISTIC(NumRetsDup, "Number of return instructions duplicated");
+STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
+STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
static cl::opt<bool> DisableBranchOpts(
"disable-cgp-branch-opts", cl::Hidden, cl::init(false),
@@ -105,7 +104,6 @@ namespace {
bool OptimizeCallInst(CallInst *CI);
bool MoveExtToFormExtLoad(Instruction *I);
bool OptimizeExtUses(Instruction *I);
- bool DupRetToEnableTailCallOpts(ReturnInst *RI);
};
}
@@ -549,96 +547,6 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
return Simplifier.fold(CI, TD);
}
-/// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return
-/// instructions to the predecessor to enable tail call optimizations. The
-/// case it is currently looking for is:
-/// bb0:
-/// %tmp0 = tail call i32 @f0()
-/// br label %return
-/// bb1:
-/// %tmp1 = tail call i32 @f1()
-/// br label %return
-/// bb2:
-/// %tmp2 = tail call i32 @f2()
-/// br label %return
-/// return:
-/// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
-/// ret i32 %retval
-///
-/// =>
-///
-/// bb0:
-/// %tmp0 = tail call i32 @f0()
-/// ret i32 %tmp0
-/// bb1:
-/// %tmp1 = tail call i32 @f1()
-/// ret i32 %tmp1
-/// bb2:
-/// %tmp2 = tail call i32 @f2()
-/// ret i32 %tmp2
-///
-bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
- Value *V = RI->getReturnValue();
- if (!V)
- return false;
-
- if (PHINode *PN = dyn_cast<PHINode>(V)) {
- BasicBlock *BB = RI->getParent();
- if (PN->getParent() != BB)
- return false;
-
- // It's not safe to eliminate the sign / zero extension of the return value.
- // See llvm::isInTailCallPosition().
- const Function *F = BB->getParent();
- unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
- if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
- return false;
-
- // Make sure there are no instructions between PHI and return.
- BasicBlock::iterator BI = PN;
- do { ++BI; } while (isa<DbgInfoIntrinsic>(BI));
- if (&*BI != RI)
- return false;
-
- /// Only dup the ReturnInst if the CallInst is likely to be emitted as a
- /// tail call.
- SmallVector<CallInst*, 4> TailCalls;
- for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
- CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I));
- if (CI && TLI->mayBeEmittedAsTailCall(CI))
- TailCalls.push_back(CI);
- }
-
- bool Changed = false;
- for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) {
- CallInst *CI = TailCalls[i];
- CallSite CS(CI);
-
- // Conservatively require the attributes of the call to match those of
- // the return. Ignore noalias because it doesn't affect the call sequence.
- unsigned CalleeRetAttr = CS.getAttributes().getRetAttributes();
- if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
- continue;
-
- // Make sure the call instruction is followed by an unconditional branch
- // to the return block.
- BasicBlock *CallBB = CI->getParent();
- BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator());
- if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
- continue;
-
- // Duplicate the return into CallBB.
- (void)FoldReturnIntoUncondBranch(RI, BB, CallBB);
- Changed = true;
- ++NumRetsDup;
- }
-
- return Changed;
- }
-
- return false;
-}
-
//===----------------------------------------------------------------------===//
// Memory Optimization
//===----------------------------------------------------------------------===//
@@ -1062,9 +970,6 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) {
if (CallInst *CI = dyn_cast<CallInst>(I))
return OptimizeCallInst(CI);
- if (ReturnInst *RI = dyn_cast<ReturnInst>(I))
- return DupRetToEnableTailCallOpts(RI);
-
return false;
}