aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBob Wilson <bob.wilson@apple.com>2010-07-09 16:37:18 +0000
committerBob Wilson <bob.wilson@apple.com>2010-07-09 16:37:18 +0000
commit02266e29f9250d74c5ec720aff23add3410ae920 (patch)
treeeab38ddb96c513afb1c30f74a30992a9ceaf3e50
parent9085fcab8276a8aaba33dc78bec2cdb0845351ba (diff)
downloadexternal_llvm-02266e29f9250d74c5ec720aff23add3410ae920.zip
external_llvm-02266e29f9250d74c5ec720aff23add3410ae920.tar.gz
external_llvm-02266e29f9250d74c5ec720aff23add3410ae920.tar.bz2
--- Reverse-merging r107947 into '.':
U utils/TableGen/FastISelEmitter.cpp --- Reverse-merging r107943 into '.': U test/CodeGen/X86/fast-isel.ll U test/CodeGen/X86/fast-isel-loads.ll U include/llvm/Target/TargetLowering.h U include/llvm/Support/PassNameParser.h U include/llvm/CodeGen/FunctionLoweringInfo.h U include/llvm/CodeGen/CallingConvLower.h U include/llvm/CodeGen/FastISel.h U include/llvm/CodeGen/SelectionDAGISel.h U lib/CodeGen/LLVMTargetMachine.cpp U lib/CodeGen/CallingConvLower.cpp U lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp U lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp U lib/CodeGen/SelectionDAG/FastISel.cpp U lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp U lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp U lib/CodeGen/SelectionDAG/InstrEmitter.cpp U lib/CodeGen/SelectionDAG/TargetLowering.cpp U lib/Target/XCore/XCoreISelLowering.cpp U lib/Target/XCore/XCoreISelLowering.h U lib/Target/X86/X86ISelLowering.cpp U lib/Target/X86/X86FastISel.cpp U lib/Target/X86/X86ISelLowering.h git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@107987 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/CodeGen/CallingConvLower.h3
-rw-r--r--include/llvm/CodeGen/FastISel.h35
-rw-r--r--include/llvm/CodeGen/FunctionLoweringInfo.h10
-rw-r--r--include/llvm/CodeGen/SelectionDAGISel.h13
-rw-r--r--include/llvm/Support/PassNameParser.h1
-rw-r--r--include/llvm/Target/TargetLowering.h13
-rw-r--r--lib/CodeGen/CallingConvLower.cpp9
-rw-r--r--lib/CodeGen/LLVMTargetMachine.cpp20
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp219
-rw-r--r--lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp8
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.cpp9
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp13
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp113
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp195
-rw-r--r--lib/CodeGen/SelectionDAG/TargetLowering.cpp60
-rw-r--r--lib/Target/X86/X86FastISel.cpp259
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp5
-rw-r--r--lib/Target/X86/X86ISelLowering.h3
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp5
-rw-r--r--lib/Target/XCore/XCoreISelLowering.h3
-rw-r--r--test/CodeGen/X86/fast-isel-loads.ll2
-rw-r--r--test/CodeGen/X86/fast-isel.ll14
-rw-r--r--utils/TableGen/FastISelEmitter.cpp4
23 files changed, 368 insertions, 648 deletions
diff --git a/include/llvm/CodeGen/CallingConvLower.h b/include/llvm/CodeGen/CallingConvLower.h
index 7911907..5ce59b8 100644
--- a/include/llvm/CodeGen/CallingConvLower.h
+++ b/include/llvm/CodeGen/CallingConvLower.h
@@ -188,7 +188,8 @@ public:
/// CheckReturn - Analyze the return values of a function, returning
/// true if the return can be performed without sret-demotion, and
/// false otherwise.
- bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
+ bool CheckReturn(const SmallVectorImpl<EVT> &OutTys,
+ const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
CCAssignFn Fn);
/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h
index 7f3a7c7..c5c457d 100644
--- a/include/llvm/CodeGen/FastISel.h
+++ b/include/llvm/CodeGen/FastISel.h
@@ -19,7 +19,6 @@
#include "llvm/ADT/SmallSet.h"
#endif
#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
namespace llvm {
@@ -45,6 +44,7 @@ class TargetRegisterInfo;
/// lowering, but runs quickly.
class FastISel {
protected:
+ MachineBasicBlock *MBB;
DenseMap<const Value *, unsigned> LocalValueMap;
FunctionLoweringInfo &FuncInfo;
MachineRegisterInfo &MRI;
@@ -56,21 +56,23 @@ protected:
const TargetInstrInfo &TII;
const TargetLowering &TLI;
const TargetRegisterInfo &TRI;
- MachineInstr *LastLocalValue;
+ bool IsBottomUp;
public:
- /// getLastLocalValue - Return the position of the last instruction
- /// emitted for materializing constants for use in the current block.
- MachineInstr *getLastLocalValue() { return LastLocalValue; }
-
- /// setLastLocalValue - Update the position of the last instruction
- /// emitted for materializing constants for use in the current block.
- void setLastLocalValue(MachineInstr *I) { LastLocalValue = I; }
-
/// startNewBlock - Set the current block to which generated machine
/// instructions will be appended, and clear the local CSE map.
///
- void startNewBlock();
+ void startNewBlock(MachineBasicBlock *mbb) {
+ setCurrentBlock(mbb);
+ LocalValueMap.clear();
+ }
+
+ /// setCurrentBlock - Set the current block to which generated machine
+ /// instructions will be appended.
+ ///
+ void setCurrentBlock(MachineBasicBlock *mbb) {
+ MBB = mbb;
+ }
/// getCurDebugLoc() - Return current debug location information.
DebugLoc getCurDebugLoc() const { return DL; }
@@ -102,17 +104,6 @@ public:
/// index value.
std::pair<unsigned, bool> getRegForGEPIndex(const Value *V);
- /// recomputeInsertPt - Reset InsertPt to prepare for insterting instructions
- /// into the current block.
- void recomputeInsertPt();
-
- /// enterLocalValueArea - Prepare InsertPt to begin inserting instructions
- /// into the local value area and return the old insert position.
- MachineBasicBlock::iterator enterLocalValueArea();
-
- /// leaveLocalValueArea - Reset InsertPt to the given old insert position
- void leaveLocalValueArea(MachineBasicBlock::iterator OldInsertPt);
-
virtual ~FastISel();
protected:
diff --git a/include/llvm/CodeGen/FunctionLoweringInfo.h b/include/llvm/CodeGen/FunctionLoweringInfo.h
index c49d1ed..011d4261 100644
--- a/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -25,7 +25,6 @@
#endif
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/ISDOpcodes.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Support/CallSite.h"
#include <vector>
@@ -81,15 +80,6 @@ public:
/// function arguments that are inserted after scheduling is completed.
SmallVector<MachineInstr*, 8> ArgDbgValues;
- /// RegFixups - Registers which need to be replaced after isel is done.
- DenseMap<unsigned, unsigned> RegFixups;
-
- /// MBB - The current block.
- MachineBasicBlock *MBB;
-
- /// MBB - The current insert position inside the current block.
- MachineBasicBlock::iterator InsertPt;
-
#ifndef NDEBUG
SmallSet<const Instruction *, 8> CatchInfoLost;
SmallSet<const Instruction *, 8> CatchInfoFound;
diff --git a/include/llvm/CodeGen/SelectionDAGISel.h b/include/llvm/CodeGen/SelectionDAGISel.h
index 01d05dd..1615994 100644
--- a/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/include/llvm/CodeGen/SelectionDAGISel.h
@@ -280,14 +280,15 @@ private:
SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo);
- void PrepareEHLandingPad();
+ void PrepareEHLandingPad(MachineBasicBlock *BB);
void SelectAllBasicBlocks(const Function &Fn);
- void FinishBasicBlock();
+ void FinishBasicBlock(MachineBasicBlock *BB);
- void SelectBasicBlock(BasicBlock::const_iterator Begin,
- BasicBlock::const_iterator End,
- bool &HadTailCall);
- void CodeGenAndEmitDAG();
+ MachineBasicBlock *SelectBasicBlock(MachineBasicBlock *BB,
+ BasicBlock::const_iterator Begin,
+ BasicBlock::const_iterator End,
+ bool &HadTailCall);
+ MachineBasicBlock *CodeGenAndEmitDAG(MachineBasicBlock *BB);
void LowerArguments(const BasicBlock *BB);
void ComputeLiveOutVRegInfo();
diff --git a/include/llvm/Support/PassNameParser.h b/include/llvm/Support/PassNameParser.h
index 42639a6..cdca978 100644
--- a/include/llvm/Support/PassNameParser.h
+++ b/include/llvm/Support/PassNameParser.h
@@ -69,7 +69,6 @@ public:
virtual void passRegistered(const PassInfo *P) {
if (ignorablePass(P) || !Opt) return;
if (findOption(P->getPassArgument()) != getNumOptions()) {
- return;
errs() << "Two passes with the same argument (-"
<< P->getPassArgument() << ") attempted to be registered!\n";
llvm_unreachable(0);
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 47aa6d1..2d87d4d 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -24,7 +24,6 @@
#include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h"
-#include "llvm/Attributes.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/ADT/APFloat.h"
@@ -1160,7 +1159,8 @@ public:
/// registers. If false is returned, an sret-demotion is performed.
///
virtual bool CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<EVT> &OutTys,
+ const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
LLVMContext &Context) const
{
// Return true by default to get preexisting behavior.
@@ -1656,15 +1656,6 @@ protected:
/// optimization.
bool benefitFromCodePlacementOpt;
};
-
-/// GetReturnInfo - Given an LLVM IR type and return type attributes,
-/// compute the return value EVTs and flags, and optionally also
-/// the offsets, if the return value is being lowered to memory.
-void GetReturnInfo(const Type* ReturnType, Attributes attr,
- SmallVectorImpl<ISD::OutputArg> &Outs,
- const TargetLowering &TLI,
- SmallVectorImpl<uint64_t> *Offsets = 0);
-
} // end llvm namespace
#endif
diff --git a/lib/CodeGen/CallingConvLower.cpp b/lib/CodeGen/CallingConvLower.cpp
index 62ad817..5e47038 100644
--- a/lib/CodeGen/CallingConvLower.cpp
+++ b/lib/CodeGen/CallingConvLower.cpp
@@ -80,12 +80,13 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
/// CheckReturn - Analyze the return values of a function, returning true if
/// the return can be performed without sret-demotion, and false otherwise.
-bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
+bool CCState::CheckReturn(const SmallVectorImpl<EVT> &OutTys,
+ const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
CCAssignFn Fn) {
// Determine which register each value should be copied into.
- for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- EVT VT = Outs[i].VT;
- ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ for (unsigned i = 0, e = OutTys.size(); i != e; ++i) {
+ EVT VT = OutTys[i];
+ ISD::ArgFlagsTy ArgFlags = ArgsFlags[i];
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
return false;
}
diff --git a/lib/CodeGen/LLVMTargetMachine.cpp b/lib/CodeGen/LLVMTargetMachine.cpp
index bf3137e..d437370 100644
--- a/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/lib/CodeGen/LLVMTargetMachine.cpp
@@ -329,15 +329,19 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
if (OptLevel != CodeGenOpt::None)
PM.add(createOptimizePHIsPass());
- if (OptLevel != CodeGenOpt::None) {
- // With optimization, dead code should already be eliminated. However
- // there is one known exception: lowered code for arguments that are only
- // used by tail calls, where the tail calls reuse the incoming stack
- // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
- PM.add(createDeadMachineInstructionElimPass());
- printAndVerify(PM, "After codegen DCE pass",
- /* allowDoubleDefs= */ true);
+ // Delete dead machine instructions regardless of optimization level.
+ //
+ // At -O0, fast-isel frequently creates dead instructions.
+ //
+ // With optimization, dead code should already be eliminated. However
+ // there is one known exception: lowered code for arguments that are only
+ // used by tail calls, where the tail calls reuse the incoming stack
+ // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
+ PM.add(createDeadMachineInstructionElimPass());
+ printAndVerify(PM, "After codegen DCE pass",
+ /* allowDoubleDefs= */ true);
+ if (OptLevel != CodeGenOpt::None) {
PM.add(createOptimizeExtsPass());
if (!DisableMachineLICM)
PM.add(createMachineLICMPass());
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index 02d11bb..230368f 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -57,25 +57,6 @@
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
-/// startNewBlock - Set the current block to which generated machine
-/// instructions will be appended, and clear the local CSE map.
-///
-void FastISel::startNewBlock() {
- LocalValueMap.clear();
-
- // Start out as null, meaining no local-value instructions have
- // been emitted.
- LastLocalValue = 0;
-
- // Advance the last local value past any EH_LABEL instructions.
- MachineBasicBlock::iterator
- I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
- while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
- LastLocalValue = I;
- ++I;
- }
-}
-
bool FastISel::hasTrivialKill(const Value *V) const {
// Don't consider constants or arguments to have trivial kills.
const Instruction *I = dyn_cast<Instruction>(V);
@@ -120,30 +101,24 @@ unsigned FastISel::getRegForValue(const Value *V) {
// only locally. This is because Instructions already have the SSA
// def-dominates-use requirement enforced.
DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
- if (I != FuncInfo.ValueMap.end()) {
- unsigned Reg = I->second;
- return Reg;
- }
+ if (I != FuncInfo.ValueMap.end())
+ return I->second;
unsigned Reg = LocalValueMap[V];
if (Reg != 0)
return Reg;
// In bottom-up mode, just create the virtual register which will be used
// to hold the value. It will be materialized later.
- if (isa<Instruction>(V) &&
- (!isa<AllocaInst>(V) ||
- !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
- return FuncInfo.InitializeRegForValue(V);
-
- MachineBasicBlock::iterator SaveInsertPt = enterLocalValueArea();
-
- // Materialize the value in a register. Emit any instructions in the
- // local value area.
- Reg = materializeRegForValue(V, VT);
-
- leaveLocalValueArea(SaveInsertPt);
+ if (IsBottomUp) {
+ Reg = createResultReg(TLI.getRegClassFor(VT));
+ if (isa<Instruction>(V))
+ FuncInfo.ValueMap[V] = Reg;
+ else
+ LocalValueMap[V] = Reg;
+ return Reg;
+ }
- return Reg;
+ return materializeRegForValue(V, VT);
}
/// materializeRegForValue - Helper for getRegForVale. This function is
@@ -194,8 +169,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
Reg = lookUpRegForValue(Op);
} else if (isa<UndefValue>(V)) {
Reg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
+ BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
}
// If target-independent code couldn't handle the value, give target-specific
@@ -205,10 +179,8 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
// Don't cache constant materializations in the general ValueMap.
// To do so would require tracking what uses they dominate.
- if (Reg != 0) {
+ if (Reg != 0)
LocalValueMap[V] = Reg;
- LastLocalValue = MRI.getVRegDef(Reg);
- }
return Reg;
}
@@ -237,15 +209,12 @@ unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
unsigned &AssignedReg = FuncInfo.ValueMap[I];
if (AssignedReg == 0)
- // Use the new register.
AssignedReg = Reg;
else if (Reg != AssignedReg) {
- // Arrange for uses of AssignedReg to be replaced by uses of Reg.
- FuncInfo.RegFixups[AssignedReg] = Reg;
-
- AssignedReg = Reg;
+ const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
+ TII.copyRegToReg(*MBB, MBB->end(), AssignedReg,
+ Reg, RegClass, RegClass, DL);
}
-
return AssignedReg;
}
@@ -273,33 +242,6 @@ std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
}
-void FastISel::recomputeInsertPt() {
- if (getLastLocalValue()) {
- FuncInfo.InsertPt = getLastLocalValue();
- ++FuncInfo.InsertPt;
- } else
- FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
-
- // Now skip past any EH_LABELs, which must remain at the beginning.
- while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
- FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
- ++FuncInfo.InsertPt;
-}
-
-MachineBasicBlock::iterator FastISel::enterLocalValueArea() {
- MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
- recomputeInsertPt();
- return OldInsertPt;
-}
-
-void FastISel::leaveLocalValueArea(MachineBasicBlock::iterator OldInsertPt) {
- if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
- LastLocalValue = llvm::prior(FuncInfo.InsertPt);
-
- // Restore the previous insert position.
- FuncInfo.InsertPt = OldInsertPt;
-}
-
/// SelectBinaryOp - Select and emit code for a binary operator instruction,
/// which has an opcode which directly corresponds to the given ISD opcode.
///
@@ -492,28 +434,23 @@ bool FastISel::SelectCall(const User *I) {
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addReg(0U).addImm(DI->getOffset())
- .addMetadata(DI->getVariable());
+ BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
+ addMetadata(DI->getVariable());
} else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addImm(CI->getZExtValue()).addImm(DI->getOffset())
- .addMetadata(DI->getVariable());
+ BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()).
+ addMetadata(DI->getVariable());
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addFPImm(CF).addImm(DI->getOffset())
- .addMetadata(DI->getVariable());
+ BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()).
+ addMetadata(DI->getVariable());
} else if (unsigned Reg = lookUpRegForValue(V)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
- .addMetadata(DI->getVariable());
+ BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()).
+ addMetadata(DI->getVariable());
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
// Insert an undef so we can see what we dropped.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addReg(0U).addImm(DI->getOffset())
- .addMetadata(DI->getVariable());
+ BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
+ addMetadata(DI->getVariable());
}
return true;
}
@@ -522,13 +459,12 @@ bool FastISel::SelectCall(const User *I) {
switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
default: break;
case TargetLowering::Expand: {
- assert(FuncInfo.MBB->isLandingPad() &&
- "Call to eh.exception not in landing pad!");
+ assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!");
unsigned Reg = TLI.getExceptionAddressRegister();
const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
unsigned ResultReg = createResultReg(RC);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, Reg, RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ Reg, RC, RC, DL);
assert(InsertedCopy && "Can't copy address registers!");
InsertedCopy = InsertedCopy;
UpdateValueMap(I, ResultReg);
@@ -542,23 +478,23 @@ bool FastISel::SelectCall(const User *I) {
switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
default: break;
case TargetLowering::Expand: {
- if (FuncInfo.MBB->isLandingPad())
- AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB);
+ if (MBB->isLandingPad())
+ AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), MBB);
else {
#ifndef NDEBUG
FuncInfo.CatchInfoLost.insert(cast<CallInst>(I));
#endif
// FIXME: Mark exception selector register as live in. Hack for PR1508.
unsigned Reg = TLI.getExceptionSelectorRegister();
- if (Reg) FuncInfo.MBB->addLiveIn(Reg);
+ if (Reg) MBB->addLiveIn(Reg);
}
unsigned Reg = TLI.getExceptionSelectorRegister();
EVT SrcVT = TLI.getPointerTy();
const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
unsigned ResultReg = createResultReg(RC);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, Reg, RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg,
+ RC, RC, DL);
assert(InsertedCopy && "Can't copy address registers!");
InsertedCopy = InsertedCopy;
@@ -677,9 +613,8 @@ bool FastISel::SelectBitCast(const User *I) {
TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
ResultReg = createResultReg(DstClass);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, Op0,
- DstClass, SrcClass, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ Op0, DstClass, SrcClass, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -727,14 +662,13 @@ FastISel::SelectInstruction(const Instruction *I) {
/// the CFG.
void
FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
- if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
+ if (MBB->isLayoutSuccessor(MSucc)) {
// The unconditional fall-through case, which needs no instructions.
} else {
// The unconditional branch case.
- TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
- SmallVector<MachineOperand, 0>(), DL);
+ TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>(), DL);
}
- FuncInfo.MBB->addSuccessor(MSucc);
+ MBB->addSuccessor(MSucc);
}
/// SelectFNeg - Emit an FNeg operation.
@@ -793,19 +727,11 @@ FastISel::SelectLoad(const User *I) {
BasicBlock::iterator ScanFrom = LI;
if (const Value *V = FindAvailableLoadedValue(LI->getPointerOperand(),
LI->getParent(), ScanFrom)) {
- if (!V->use_empty() &&
- (!isa<Instruction>(V) ||
- cast<Instruction>(V)->getParent() == LI->getParent() ||
- (isa<AllocaInst>(V) &&
- FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) &&
- (!isa<Argument>(V) ||
- LI->getParent() == &LI->getParent()->getParent()->getEntryBlock())) {
unsigned ResultReg = getRegForValue(V);
if (ResultReg != 0) {
UpdateValueMap(I, ResultReg);
return true;
}
- }
}
}
@@ -928,7 +854,8 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
}
FastISel::FastISel(FunctionLoweringInfo &funcInfo)
- : FuncInfo(funcInfo),
+ : MBB(0),
+ FuncInfo(funcInfo),
MRI(FuncInfo.MF->getRegInfo()),
MFI(*FuncInfo.MF->getFrameInfo()),
MCP(*FuncInfo.MF->getConstantPool()),
@@ -936,7 +863,8 @@ FastISel::FastISel(FunctionLoweringInfo &funcInfo)
TD(*TM.getTargetData()),
TII(*TM.getInstrInfo()),
TLI(*TM.getTargetLowering()),
- TRI(*TM.getRegisterInfo()) {
+ TRI(*TM.getRegisterInfo()),
+ IsBottomUp(false) {
}
FastISel::~FastISel() {}
@@ -1065,7 +993,7 @@ unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
+ BuildMI(MBB, DL, II, ResultReg);
return ResultReg;
}
@@ -1076,14 +1004,11 @@ unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill);
+ BuildMI(MBB, DL, II, ResultReg).addReg(Op0, Op0IsKill * RegState::Kill);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
- .addReg(Op0, Op0IsKill * RegState::Kill);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, II.ImplicitDefs[0],
- RC, RC, DL);
+ BuildMI(MBB, DL, II).addReg(Op0, Op0IsKill * RegState::Kill);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ II.ImplicitDefs[0], RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1099,16 +1024,15 @@ unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, II.ImplicitDefs[0],
- RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ II.ImplicitDefs[0], RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1123,16 +1047,15 @@ unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, II.ImplicitDefs[0],
- RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ II.ImplicitDefs[0], RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1147,16 +1070,15 @@ unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, II.ImplicitDefs[0],
- RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ II.ImplicitDefs[0], RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1172,18 +1094,17 @@ unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, II.ImplicitDefs[0],
- RC, RC, DL);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ II.ImplicitDefs[0], RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1197,12 +1118,11 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
+ BuildMI(MBB, DL, II, ResultReg).addImm(Imm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- ResultReg, II.ImplicitDefs[0],
- RC, RC, DL);
+ BuildMI(MBB, DL, II).addImm(Imm);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ II.ImplicitDefs[0], RC, RC, DL);
if (!InsertedCopy)
ResultReg = 0;
}
@@ -1215,8 +1135,7 @@ unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
"Cannot yet extract from physregs");
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(TargetOpcode::COPY), ResultReg)
+ BuildMI(MBB, DL, TII.get(TargetOpcode::COPY), ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill), Idx);
return ResultReg;
}
diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 928e1ec..6d9f102 100644
--- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -82,13 +82,6 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
MF = &mf;
RegInfo = &MF->getRegInfo();
- // Check whether the function can return without sret-demotion.
- SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(Fn->getReturnType(),
- Fn->getAttributes().getRetAttributes(), Outs, TLI);
- CanLowerReturn = TLI.CanLowerReturn(Fn->getCallingConv(), Fn->isVarArg(),
- Outs, Fn->getContext());
-
// Create a vreg for each argument register that is not dead and is used
// outside of the entry block for the function.
for (Function::const_arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
@@ -181,7 +174,6 @@ void FunctionLoweringInfo::clear() {
#endif
LiveOutRegInfo.clear();
ArgDbgValues.clear();
- RegFixups.clear();
}
/// CreateReg - Allocate a single virtual register for the given type.
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 40fb3a4..d7d63d3 100644
--- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -734,13 +734,8 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
if (II.usesCustomInsertionHook()) {
// Insert this instruction into the basic block using a target
// specific inserter which may returns a new basic block.
- bool AtEnd = InsertPos == MBB->end();
- MachineBasicBlock *NewMBB = TLI->EmitInstrWithCustomInserter(MI, MBB);
- if (NewMBB != MBB) {
- if (AtEnd)
- InsertPos = NewMBB->end();
- MBB = NewMBB;
- }
+ MBB = TLI->EmitInstrWithCustomInserter(MI, MBB);
+ InsertPos = MBB->end();
return;
}
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index 2673eba..2d08310 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -519,13 +519,13 @@ static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
return;
MachineBasicBlock *BB = Emitter.getBlock();
- if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI()) {
+ if (BB->empty() || BB->back().isPHI()) {
// Did not insert any instruction.
Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
return;
}
- Orders.push_back(std::make_pair(Order, prior(Emitter.getInsertPos())));
+ Orders.push_back(std::make_pair(Order, &BB->back()));
if (!N->getHasDebugValue())
return;
// Opportunistically insert immediate dbg_value uses, i.e. those with source
@@ -564,7 +564,7 @@ MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
for (; PDI != PDE; ++PDI) {
MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap);
if (DbgMI)
- BB->insert(InsertPos, DbgMI);
+ BB->push_back(DbgMI);
}
}
@@ -608,7 +608,9 @@ MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
// Insert all the dbg_values which have not already been inserted in source
// order sequence.
if (HasDbg) {
- MachineBasicBlock::iterator BBBegin = BB->getFirstNonPHI();
+ MachineBasicBlock::iterator BBBegin = BB->empty() ? BB->end() : BB->begin();
+ while (BBBegin != BB->end() && BBBegin->isPHI())
+ ++BBBegin;
// Sort the source order instructions and use the order to insert debug
// values.
@@ -624,6 +626,7 @@ MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
// Insert all SDDbgValue's whose order(s) are before "Order".
if (!MI)
continue;
+ MachineBasicBlock *MIBB = MI->getParent();
#ifndef NDEBUG
unsigned LastDIOrder = 0;
#endif
@@ -643,7 +646,7 @@ MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
BB->insert(BBBegin, DbgMI);
else {
MachineBasicBlock::iterator Pos = MI;
- BB->insert(llvm::next(Pos), DbgMI);
+ MIBB->insert(llvm::next(Pos), DbgMI);
}
}
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 3b3ee3e..0bb5e4b 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -951,16 +951,79 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
// If this is an instruction which fast-isel has deferred, select it now.
if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
- unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
- RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType());
- SDValue Chain = DAG.getEntryNode();
- return RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL);
+ assert(Inst->isSafeToSpeculativelyExecute() &&
+ "Instruction with side effects deferred!");
+ visit(*Inst);
+ DenseMap<const Value *, SDValue>::iterator NIt = NodeMap.find(Inst);
+ if (NIt != NodeMap.end() && NIt->second.getNode())
+ return NIt->second;
}
llvm_unreachable("Can't get register for value!");
return SDValue();
}
+/// Get the EVTs and ArgFlags collections that represent the legalized return
+/// type of the given function. This does not require a DAG or a return value,
+/// and is suitable for use before any DAGs for the function are constructed.
+static void getReturnInfo(const Type* ReturnType,
+ Attributes attr, SmallVectorImpl<EVT> &OutVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &OutFlags,
+ const TargetLowering &TLI,
+ SmallVectorImpl<uint64_t> *Offsets = 0) {
+ SmallVector<EVT, 4> ValueVTs;
+ ComputeValueVTs(TLI, ReturnType, ValueVTs);
+ unsigned NumValues = ValueVTs.size();
+ if (NumValues == 0) return;
+ unsigned Offset = 0;
+
+ for (unsigned j = 0, f = NumValues; j != f; ++j) {
+ EVT VT = ValueVTs[j];
+ ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
+
+ if (attr & Attribute::SExt)
+ ExtendKind = ISD::SIGN_EXTEND;
+ else if (attr & Attribute::ZExt)
+ ExtendKind = ISD::ZERO_EXTEND;
+
+ // FIXME: C calling convention requires the return type to be promoted to
+ // at least 32-bit. But this is not necessary for non-C calling
+ // conventions. The frontend should mark functions whose return values
+ // require promoting with signext or zeroext attributes.
+ if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
+ EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
+ if (VT.bitsLT(MinVT))
+ VT = MinVT;
+ }
+
+ unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
+ EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
+ unsigned PartSize = TLI.getTargetData()->getTypeAllocSize(
+ PartVT.getTypeForEVT(ReturnType->getContext()));
+
+ // 'inreg' on function refers to return value
+ ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
+ if (attr & Attribute::InReg)
+ Flags.setInReg();
+
+ // Propagate extension type if any
+ if (attr & Attribute::SExt)
+ Flags.setSExt();
+ else if (attr & Attribute::ZExt)
+ Flags.setZExt();
+
+ for (unsigned i = 0; i < NumParts; ++i) {
+ OutVTs.push_back(PartVT);
+ OutFlags.push_back(Flags);
+ if (Offsets)
+ {
+ Offsets->push_back(Offset);
+ Offset += PartSize;
+ }
+ }
+ }
+}
+
void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
SDValue Chain = getControlRoot();
SmallVector<ISD::OutputArg, 8> Outs;
@@ -1257,7 +1320,7 @@ SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
}
void SelectionDAGBuilder::visitBr(const BranchInst &I) {
- MachineBasicBlock *BrMBB = FuncInfo.MBB;
+ MachineBasicBlock *BrMBB = FuncInfo.MBBMap[I.getParent()];
// Update machine-CFG edges.
MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
@@ -1583,7 +1646,7 @@ void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB,
}
void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
- MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
+ MachineBasicBlock *InvokeMBB = FuncInfo.MBBMap[I.getParent()];
// Retrieve successors.
MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
@@ -2111,7 +2174,7 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
}
void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
- MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
+ MachineBasicBlock *SwitchMBB = FuncInfo.MBBMap[SI.getParent()];
// Figure out which block is immediately after the current one.
MachineBasicBlock *NextBlock = 0;
@@ -2177,7 +2240,7 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
}
void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
- MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
+ MachineBasicBlock *IndirectBrMBB = FuncInfo.MBBMap[I.getParent()];
// Update machine-CFG edges with unique successors.
SmallVector<BasicBlock*, 32> succs;
@@ -3837,7 +3900,7 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const DbgValueInst &DI,
if (DV.isInlinedFnArgument(MF.getFunction()))
return false;
- MachineBasicBlock *MBB = FuncInfo.MBB;
+ MachineBasicBlock *MBB = FuncInfo.MBBMap[DI.getParent()];
if (MBB != &MF.front())
return false;
@@ -4100,7 +4163,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::eh_exception: {
// Insert the EXCEPTIONADDR instruction.
- assert(FuncInfo.MBB->isLandingPad() &&
+ assert(FuncInfo.MBBMap[I.getParent()]->isLandingPad() &&
"Call to eh.exception not in landing pad!");
SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
SDValue Ops[1];
@@ -4112,7 +4175,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::eh_selector: {
- MachineBasicBlock *CallMBB = FuncInfo.MBB;
+ MachineBasicBlock *CallMBB = FuncInfo.MBBMap[I.getParent()];
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
if (CallMBB->isLandingPad())
AddCatchInfo(I, &MMI, CallMBB);
@@ -4122,7 +4185,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
#endif
// FIXME: Mark exception selector register as live in. Hack for PR1508.
unsigned Reg = TLI.getExceptionSelectorRegister();
- if (Reg) FuncInfo.MBB->addLiveIn(Reg);
+ if (Reg) FuncInfo.MBBMap[I.getParent()]->addLiveIn(Reg);
}
// Insert the EHSELECTION instruction.
@@ -4496,13 +4559,14 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
Args.reserve(CS.arg_size());
// Check whether the function can return without sret-demotion.
- SmallVector<ISD::OutputArg, 4> Outs;
+ SmallVector<EVT, 4> OutVTs;
+ SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
SmallVector<uint64_t, 4> Offsets;
- GetReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
- Outs, TLI, &Offsets);
+ getReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
+ OutVTs, OutsFlags, TLI, &Offsets);
bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
- FTy->isVarArg(), Outs, FTy->getContext());
+ FTy->isVarArg(), OutVTs, OutsFlags, FTy->getContext());
SDValue DemoteStackSlot;
@@ -4595,7 +4659,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
ComputeValueVTs(TLI, PtrRetTy, PVTs);
assert(PVTs.size() == 1 && "Pointers should fit in one register");
EVT PtrVT = PVTs[0];
- unsigned NumValues = Outs.size();
+ unsigned NumValues = OutVTs.size();
SmallVector<SDValue, 4> Values(NumValues);
SmallVector<SDValue, 4> Chains(NumValues);
@@ -4603,7 +4667,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT,
DemoteStackSlot,
DAG.getConstant(Offsets[i], PtrVT));
- SDValue L = DAG.getLoad(Outs[i].VT, getCurDebugLoc(), Result.second,
+ SDValue L = DAG.getLoad(OutVTs[i], getCurDebugLoc(), Result.second,
Add, NULL, Offsets[i], false, false, 1);
Values[i] = L;
Chains[i] = L.getValue(1);
@@ -5895,10 +5959,15 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
SmallVector<ISD::InputArg, 16> Ins;
// Check whether the function can return without sret-demotion.
- SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
- Outs, TLI);
-
+ SmallVector<EVT, 4> OutVTs;
+ SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
+ getReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
+ OutVTs, OutsFlags, TLI);
+
+ FuncInfo->CanLowerReturn = TLI.CanLowerReturn(F.getCallingConv(),
+ F.isVarArg(),
+ OutVTs, OutsFlags,
+ F.getContext());
if (!FuncInfo->CanLowerReturn) {
// Put in an sret pointer parameter before all the other parameters.
SmallVector<EVT, 1> ValueVTs;
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 924d939..5eb1a00 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -313,26 +313,6 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// Determine if there is a call to setjmp in the machine function.
MF->setCallsSetJmp(FunctionCallsSetJmp(&Fn));
- // Replace forward-declared registers with the registers containing
- // the desired value.
- MachineRegisterInfo &MRI = MF->getRegInfo();
- for (DenseMap<unsigned, unsigned>::iterator
- I = FuncInfo->RegFixups.begin(), E = FuncInfo->RegFixups.end();
- I != E; ++I) {
- unsigned From = I->first;
- unsigned To = I->second;
- // If To is also scheduled to be replaced, find what its ultimate
- // replacement is.
- for (;;) {
- DenseMap<unsigned, unsigned>::iterator J =
- FuncInfo->RegFixups.find(To);
- if (J == E) break;
- To = J->second;
- }
- // Replace it.
- MRI.replaceRegWith(From, To);
- }
-
// Release function-specific state. SDB and CurDAG are already cleared
// at this point.
FuncInfo->clear();
@@ -340,8 +320,9 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
return true;
}
-void
-SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
+MachineBasicBlock *
+SelectionDAGISel::SelectBasicBlock(MachineBasicBlock *BB,
+ BasicBlock::const_iterator Begin,
BasicBlock::const_iterator End,
bool &HadTailCall) {
// Lower all of the non-terminator instructions. If a call is emitted
@@ -356,7 +337,7 @@ SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
SDB->clear();
// Final step, emit the lowered DAG as machine code.
- CodeGenAndEmitDAG();
+ return CodeGenAndEmitDAG(BB);
}
namespace {
@@ -445,7 +426,7 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo() {
} while (!Worklist.empty());
}
-void SelectionDAGISel::CodeGenAndEmitDAG() {
+MachineBasicBlock *SelectionDAGISel::CodeGenAndEmitDAG(MachineBasicBlock *BB) {
std::string GroupName;
if (TimePassesIsEnabled)
GroupName = "Instruction Selection and Scheduling";
@@ -454,7 +435,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs ||
ViewSUnitDAGs)
BlockName = MF->getFunction()->getNameStr() + ":" +
- FuncInfo->MBB->getBasicBlock()->getNameStr();
+ BB->getBasicBlock()->getNameStr();
DEBUG(dbgs() << "Initial selection DAG:\n"; CurDAG->dump());
@@ -561,7 +542,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
{
NamedRegionTimer T("Instruction Scheduling", GroupName,
TimePassesIsEnabled);
- Scheduler->Run(CurDAG, FuncInfo->MBB, FuncInfo->InsertPt);
+ Scheduler->Run(CurDAG, BB, BB->end());
}
if (ViewSUnitDAGs) Scheduler->viewGraph();
@@ -570,9 +551,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
// inserted into.
{
NamedRegionTimer T("Instruction Creation", GroupName, TimePassesIsEnabled);
-
- FuncInfo->MBB = Scheduler->EmitSchedule();
- FuncInfo->InsertPt = Scheduler->InsertPos;
+ BB = Scheduler->EmitSchedule();
}
// Free the scheduler state.
@@ -584,6 +563,8 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
// Free the SelectionDAG state, now that we're finished with it.
CurDAG->clear();
+
+ return BB;
}
void SelectionDAGISel::DoInstructionSelection() {
@@ -645,22 +626,21 @@ void SelectionDAGISel::DoInstructionSelection() {
/// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
/// do other setup for EH landing-pad blocks.
-void SelectionDAGISel::PrepareEHLandingPad() {
+void SelectionDAGISel::PrepareEHLandingPad(MachineBasicBlock *BB) {
// Add a label to mark the beginning of the landing pad. Deletion of the
// landing pad can thus be detected via the MachineModuleInfo.
- MCSymbol *Label = MF->getMMI().addLandingPad(FuncInfo->MBB);
+ MCSymbol *Label = MF->getMMI().addLandingPad(BB);
const TargetInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL);
- BuildMI(*FuncInfo->MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
- .addSym(Label);
+ BuildMI(BB, SDB->getCurDebugLoc(), II).addSym(Label);
// Mark exception register as live in.
unsigned Reg = TLI.getExceptionAddressRegister();
- if (Reg) FuncInfo->MBB->addLiveIn(Reg);
+ if (Reg) BB->addLiveIn(Reg);
// Mark exception selector register as live in.
Reg = TLI.getExceptionSelectorRegister();
- if (Reg) FuncInfo->MBB->addLiveIn(Reg);
+ if (Reg) BB->addLiveIn(Reg);
// FIXME: Hack around an exception handling flaw (PR1508): the personality
// function and list of typeids logically belong to the invoke (or, if you
@@ -673,7 +653,7 @@ void SelectionDAGISel::PrepareEHLandingPad() {
// in exceptions not being caught because no typeids are associated with
// the invoke. This may not be the only way things can go wrong, but it
// is the only way we try to work around for the moment.
- const BasicBlock *LLVMBB = FuncInfo->MBB->getBasicBlock();
+ const BasicBlock *LLVMBB = BB->getBasicBlock();
const BranchInst *Br = dyn_cast<BranchInst>(LLVMBB->getTerminator());
if (Br && Br->isUnconditional()) { // Critical edge?
@@ -697,95 +677,80 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Iterate over all basic blocks in the function.
for (Function::const_iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
const BasicBlock *LLVMBB = &*I;
- FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
- FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
+ MachineBasicBlock *BB = FuncInfo->MBBMap[LLVMBB];
BasicBlock::const_iterator const Begin = LLVMBB->getFirstNonPHI();
BasicBlock::const_iterator const End = LLVMBB->end();
- BasicBlock::const_iterator BI = End;
+ BasicBlock::const_iterator BI = Begin;
- FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
-
- // Setup an EH landing-pad block.
- if (FuncInfo->MBB->isLandingPad())
- PrepareEHLandingPad();
-
// Lower any arguments needed in this block if this is the entry block.
if (LLVMBB == &Fn.getEntryBlock())
LowerArguments(LLVMBB);
+ // Setup an EH landing-pad block.
+ if (BB->isLandingPad())
+ PrepareEHLandingPad(BB);
+
// Before doing SelectionDAG ISel, see if FastISel has been requested.
if (FastIS) {
- FastIS->startNewBlock();
-
// Emit code for any incoming arguments. This must happen before
// beginning FastISel on the entry block.
if (LLVMBB == &Fn.getEntryBlock()) {
CurDAG->setRoot(SDB->getControlRoot());
SDB->clear();
- CodeGenAndEmitDAG();
-
- // If we inserted any instructions at the beginning, make a note of
- // where they are, so we can be sure to emit subsequent instructions
- // after them.
- if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
- FastIS->setLastLocalValue(llvm::prior(FuncInfo->InsertPt));
- else
- FastIS->setLastLocalValue(0);
+ BB = CodeGenAndEmitDAG(BB);
}
-
+ FastIS->startNewBlock(BB);
// Do FastISel on as many instructions as possible.
- for (; BI != Begin; --BI) {
- const Instruction *Inst = llvm::prior(BI);
-
- // If we no longer require this instruction, skip it.
- if (!Inst->mayWriteToMemory() &&
- !isa<TerminatorInst>(Inst) &&
- !isa<DbgInfoIntrinsic>(Inst) &&
- !FuncInfo->isExportedInst(Inst))
+ for (; BI != End; ++BI) {
+#if 0
+ // Defer instructions with no side effects; they'll be emitted
+ // on-demand later.
+ if (BI->isSafeToSpeculativelyExecute() &&
+ !FuncInfo->isExportedInst(BI))
continue;
-
- // Bottom-up: reset the insert pos at the top, after any local-value
- // instructions.
- FastIS->recomputeInsertPt();
+#endif
// Try to select the instruction with FastISel.
- if (FastIS->SelectInstruction(Inst))
+ if (FastIS->SelectInstruction(BI))
continue;
// Then handle certain instructions as single-LLVM-Instruction blocks.
- if (isa<CallInst>(Inst)) {
+ if (isa<CallInst>(BI)) {
++NumFastIselFailures;
if (EnableFastISelVerbose || EnableFastISelAbort) {
dbgs() << "FastISel missed call: ";
- Inst->dump();
+ BI->dump();
}
- if (!Inst->getType()->isVoidTy() && !Inst->use_empty()) {
- unsigned &R = FuncInfo->ValueMap[Inst];
+ if (!BI->getType()->isVoidTy() && !BI->use_empty()) {
+ unsigned &R = FuncInfo->ValueMap[BI];
if (!R)
- R = FuncInfo->CreateRegs(Inst->getType());
+ R = FuncInfo->CreateRegs(BI->getType());
}
bool HadTailCall = false;
- SelectBasicBlock(Inst, BI, HadTailCall);
+ BB = SelectBasicBlock(BB, BI, llvm::next(BI), HadTailCall);
// If the call was emitted as a tail call, we're done with the block.
if (HadTailCall) {
- --BI;
+ BI = End;
break;
}
+ // If the instruction was codegen'd with multiple blocks,
+ // inform the FastISel object where to resume inserting.
+ FastIS->setCurrentBlock(BB);
continue;
}
// Otherwise, give up on FastISel for the rest of the block.
// For now, be a little lenient about non-branch terminators.
- if (!isa<TerminatorInst>(Inst) || isa<BranchInst>(Inst)) {
+ if (!isa<TerminatorInst>(BI) || isa<BranchInst>(BI)) {
++NumFastIselFailures;
if (EnableFastISelVerbose || EnableFastISelAbort) {
dbgs() << "FastISel miss: ";
- Inst->dump();
+ BI->dump();
}
if (EnableFastISelAbort)
// The "fast" selector couldn't handle something and bailed.
@@ -794,17 +759,17 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
}
break;
}
-
- FastIS->recomputeInsertPt();
}
// Run SelectionDAG instruction selection on the remainder of the block
// not handled by FastISel. If FastISel is not run, this is the entire
// block.
- bool HadTailCall;
- SelectBasicBlock(Begin, BI, HadTailCall);
+ if (BI != End) {
+ bool HadTailCall;
+ BB = SelectBasicBlock(BB, BI, End, HadTailCall);
+ }
- FinishBasicBlock();
+ FinishBasicBlock(BB);
FuncInfo->PHINodesToUpdate.clear();
}
@@ -812,7 +777,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
}
void
-SelectionDAGISel::FinishBasicBlock() {
+SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) {
DEBUG(dbgs() << "Total amount of phi nodes to update: "
<< FuncInfo->PHINodesToUpdate.size() << "\n";
@@ -830,11 +795,11 @@ SelectionDAGISel::FinishBasicBlock() {
MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first;
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
- if (!FuncInfo->MBB->isSuccessor(PHI->getParent()))
+ if (!BB->isSuccessor(PHI->getParent()))
continue;
PHI->addOperand(
MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false));
- PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
+ PHI->addOperand(MachineOperand::CreateMBB(BB));
}
return;
}
@@ -843,35 +808,33 @@ SelectionDAGISel::FinishBasicBlock() {
// Lower header first, if it wasn't already lowered
if (!SDB->BitTestCases[i].Emitted) {
// Set the current basic block to the mbb we wish to insert the code into
- FuncInfo->MBB = SDB->BitTestCases[i].Parent;
- FuncInfo->InsertPt = FuncInfo->MBB->end();
+ BB = SDB->BitTestCases[i].Parent;
// Emit the code
- SDB->visitBitTestHeader(SDB->BitTestCases[i], FuncInfo->MBB);
+ SDB->visitBitTestHeader(SDB->BitTestCases[i], BB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
- CodeGenAndEmitDAG();
+ BB = CodeGenAndEmitDAG(BB);
}
for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) {
// Set the current basic block to the mbb we wish to insert the code into
- FuncInfo->MBB = SDB->BitTestCases[i].Cases[j].ThisBB;
- FuncInfo->InsertPt = FuncInfo->MBB->end();
+ BB = SDB->BitTestCases[i].Cases[j].ThisBB;
// Emit the code
if (j+1 != ej)
SDB->visitBitTestCase(SDB->BitTestCases[i].Cases[j+1].ThisBB,
SDB->BitTestCases[i].Reg,
SDB->BitTestCases[i].Cases[j],
- FuncInfo->MBB);
+ BB);
else
SDB->visitBitTestCase(SDB->BitTestCases[i].Default,
SDB->BitTestCases[i].Reg,
SDB->BitTestCases[i].Cases[j],
- FuncInfo->MBB);
+ BB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
- CodeGenAndEmitDAG();
+ BB = CodeGenAndEmitDAG(BB);
}
// Update PHI Nodes
@@ -916,24 +879,22 @@ SelectionDAGISel::FinishBasicBlock() {
// Lower header first, if it wasn't already lowered
if (!SDB->JTCases[i].first.Emitted) {
// Set the current basic block to the mbb we wish to insert the code into
- FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB;
- FuncInfo->InsertPt = FuncInfo->MBB->end();
+ BB = SDB->JTCases[i].first.HeaderBB;
// Emit the code
SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first,
- FuncInfo->MBB);
+ BB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
- CodeGenAndEmitDAG();
+ BB = CodeGenAndEmitDAG(BB);
}
// Set the current basic block to the mbb we wish to insert the code into
- FuncInfo->MBB = SDB->JTCases[i].second.MBB;
- FuncInfo->InsertPt = FuncInfo->MBB->end();
+ BB = SDB->JTCases[i].second.MBB;
// Emit the code
SDB->visitJumpTable(SDB->JTCases[i].second);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
- CodeGenAndEmitDAG();
+ BB = CodeGenAndEmitDAG(BB);
// Update PHI Nodes
for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
@@ -951,11 +912,11 @@ SelectionDAGISel::FinishBasicBlock() {
(MachineOperand::CreateMBB(SDB->JTCases[i].first.HeaderBB));
}
// JT BB. Just iterate over successors here
- if (FuncInfo->MBB->isSuccessor(PHIBB)) {
+ if (BB->isSuccessor(PHIBB)) {
PHI->addOperand
(MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
false));
- PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
+ PHI->addOperand(MachineOperand::CreateMBB(BB));
}
}
}
@@ -967,10 +928,10 @@ SelectionDAGISel::FinishBasicBlock() {
MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first;
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
- if (FuncInfo->MBB->isSuccessor(PHI->getParent())) {
+ if (BB->isSuccessor(PHI->getParent())) {
PHI->addOperand(
MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false));
- PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
+ PHI->addOperand(MachineOperand::CreateMBB(BB));
}
}
@@ -978,8 +939,7 @@ SelectionDAGISel::FinishBasicBlock() {
// additional DAGs necessary.
for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) {
// Set the current basic block to the mbb we wish to insert the code into
- MachineBasicBlock *ThisBB = FuncInfo->MBB = SDB->SwitchCases[i].ThisBB;
- FuncInfo->InsertPt = FuncInfo->MBB->end();
+ MachineBasicBlock *ThisBB = BB = SDB->SwitchCases[i].ThisBB;
// Determine the unique successors.
SmallVector<MachineBasicBlock *, 2> Succs;
@@ -989,24 +949,21 @@ SelectionDAGISel::FinishBasicBlock() {
// Emit the code. Note that this could result in ThisBB being split, so
// we need to check for updates.
- SDB->visitSwitchCase(SDB->SwitchCases[i], FuncInfo->MBB);
+ SDB->visitSwitchCase(SDB->SwitchCases[i], BB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
- CodeGenAndEmitDAG();
- ThisBB = FuncInfo->MBB;
+ ThisBB = CodeGenAndEmitDAG(BB);
// Handle any PHI nodes in successors of this chunk, as if we were coming
// from the original BB before switch expansion. Note that PHI nodes can
// occur multiple times in PHINodesToUpdate. We have to be very careful to
// handle them the right number of times.
for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
- FuncInfo->MBB = Succs[i];
- FuncInfo->InsertPt = FuncInfo->MBB->end();
- // FuncInfo->MBB may have been removed from the CFG if a branch was
- // constant folded.
- if (ThisBB->isSuccessor(FuncInfo->MBB)) {
- for (MachineBasicBlock::iterator Phi = FuncInfo->MBB->begin();
- Phi != FuncInfo->MBB->end() && Phi->isPHI();
+ BB = Succs[i];
+ // BB may have been removed from the CFG if a branch was constant folded.
+ if (ThisBB->isSuccessor(BB)) {
+ for (MachineBasicBlock::iterator Phi = BB->begin();
+ Phi != BB->end() && Phi->isPHI();
++Phi) {
// This value for this PHI node is recorded in PHINodesToUpdate.
for (unsigned pn = 0; ; ++pn) {
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index a9a7e50..d56a892 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -20,7 +20,6 @@
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/GlobalVariable.h"
#include "llvm/DerivedTypes.h"
-#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -839,65 +838,6 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
return 1;
}
-/// Get the EVTs and ArgFlags collections that represent the legalized return
-/// type of the given function. This does not require a DAG or a return value,
-/// and is suitable for use before any DAGs for the function are constructed.
-/// TODO: Move this out of TargetLowering.cpp.
-void llvm::GetReturnInfo(const Type* ReturnType, Attributes attr,
- SmallVectorImpl<ISD::OutputArg> &Outs,
- const TargetLowering &TLI,
- SmallVectorImpl<uint64_t> *Offsets) {
- SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, ReturnType, ValueVTs);
- unsigned NumValues = ValueVTs.size();
- if (NumValues == 0) return;
- unsigned Offset = 0;
-
- for (unsigned j = 0, f = NumValues; j != f; ++j) {
- EVT VT = ValueVTs[j];
- ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
-
- if (attr & Attribute::SExt)
- ExtendKind = ISD::SIGN_EXTEND;
- else if (attr & Attribute::ZExt)
- ExtendKind = ISD::ZERO_EXTEND;
-
- // FIXME: C calling convention requires the return type to be promoted to
- // at least 32-bit. But this is not necessary for non-C calling
- // conventions. The frontend should mark functions whose return values
- // require promoting with signext or zeroext attributes.
- if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
- EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
- if (VT.bitsLT(MinVT))
- VT = MinVT;
- }
-
- unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
- EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
- unsigned PartSize = TLI.getTargetData()->getTypeAllocSize(
- PartVT.getTypeForEVT(ReturnType->getContext()));
-
- // 'inreg' on function refers to return value
- ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
- if (attr & Attribute::InReg)
- Flags.setInReg();
-
- // Propagate extension type if any
- if (attr & Attribute::SExt)
- Flags.setSExt();
- else if (attr & Attribute::ZExt)
- Flags.setZExt();
-
- for (unsigned i = 0; i < NumParts; ++i) {
- Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true));
- if (Offsets) {
- Offsets->push_back(Offset);
- Offset += PartSize;
- }
- }
- }
-}
-
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index e484752..a8fdc76 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -23,7 +23,6 @@
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
-#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -85,8 +84,6 @@ private:
bool X86SelectStore(const Instruction *I);
- bool X86SelectRet(const Instruction *I);
-
bool X86SelectCmp(const Instruction *I);
bool X86SelectZExt(const Instruction *I);
@@ -108,7 +105,6 @@ private:
bool X86SelectCall(const Instruction *I);
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isTailCall = false);
- CCAssignFn *CCAssignFnForRet(CallingConv::ID CC, bool isTailCall = false);
const X86InstrInfo *getInstrInfo() const {
return getTargetMachine()->getInstrInfo();
@@ -182,20 +178,6 @@ CCAssignFn *X86FastISel::CCAssignFnForCall(CallingConv::ID CC,
return CC_X86_32_C;
}
-/// CCAssignFnForRet - Selects the correct CCAssignFn for a given calling
-/// convention.
-CCAssignFn *X86FastISel::CCAssignFnForRet(CallingConv::ID CC,
- bool isTaillCall) {
- if (Subtarget->is64Bit()) {
- if (Subtarget->isTargetWin64())
- return RetCC_X86_Win64_C;
- else
- return RetCC_X86_64_C;
- }
-
- return RetCC_X86_32_C;
-}
-
/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
/// Return true and the result register by reference if it is possible.
@@ -248,8 +230,7 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
}
ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc), ResultReg), AM);
+ addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
return true;
}
@@ -268,7 +249,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
case MVT::i1: {
// Mask out all but lowest bit.
unsigned AndResult = createResultReg(X86::GR8RegisterClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(MBB, DL,
TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1);
Val = AndResult;
}
@@ -285,8 +266,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
break;
}
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc)), AM).addReg(Val);
+ addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM).addReg(Val);
return true;
}
@@ -314,8 +294,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
}
if (Opc) {
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc)), AM)
+ addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM)
.addImm(Signed ? (uint64_t) CI->getSExtValue() :
CI->getZExtValue());
return true;
@@ -354,7 +333,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
// Don't walk into other basic blocks; it's possible we haven't
// visited them yet, so the instructions may not yet be assigned
// virtual registers.
- if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB)
+ if (FuncInfo.MBBMap[I->getParent()] != MBB)
return false;
Opcode = I->getOpcode();
@@ -539,9 +518,6 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
StubAM.GV = GV;
StubAM.GVOpFlags = GVFlags;
- // Prepare for inserting code in the local-value area.
- MachineBasicBlock::iterator SaveInsertPt = enterLocalValueArea();
-
if (TLI.getPointerTy() == MVT::i64) {
Opc = X86::MOV64rm;
RC = X86::GR64RegisterClass;
@@ -554,13 +530,8 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
}
LoadReg = createResultReg(RC);
- MachineInstrBuilder LoadMI =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg);
- addFullAddress(LoadMI, StubAM);
-
- // Ok, back to normal mode.
- leaveLocalValueArea(SaveInsertPt);
-
+ addFullAddress(BuildMI(MBB, DL, TII.get(Opc), LoadReg), StubAM);
+
// Prevent loading GV stub multiple times in same MBB.
LocalValueMap[V] = LoadReg;
}
@@ -685,76 +656,6 @@ bool X86FastISel::X86SelectStore(const Instruction *I) {
return X86FastEmitStore(VT, I->getOperand(0), AM);
}
-/// X86SelectRet - Select and emit code to implement ret instructions.
-bool X86FastISel::X86SelectRet(const Instruction *I) {
- const ReturnInst *Ret = cast<ReturnInst>(I);
- const Function &F = *I->getParent()->getParent();
-
- if (!FuncInfo.CanLowerReturn)
- return false;
-
- CallingConv::ID CC = F.getCallingConv();
- if (CC != CallingConv::C &&
- CC != CallingConv::Fast &&
- CC != CallingConv::X86_FastCall)
- return false;
-
- if (Subtarget->isTargetWin64())
- return false;
-
- // fastcc with -tailcallopt is intended to provide a guaranteed
- // tail call optimization. Fastisel doesn't know how to do that.
- if (CC == CallingConv::Fast && GuaranteedTailCallOpt)
- return false;
-
- // Let SDISel handle vararg functions.
- if (F.isVarArg())
- return false;
-
- if (Ret->getNumOperands() > 0) {
- SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
- Outs, TLI);
-
- // Analyze operands of the call, assigning locations to each operand.
- SmallVector<CCValAssign, 16> ValLocs;
- CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext());
- CCInfo.AnalyzeReturn(Outs, CCAssignFnForRet(CC));
-
- const Value *RV = Ret->getOperand(0);
- unsigned Reg = getRegForValue(RV);
- if (Reg == 0)
- return false;
-
- // Copy the return value into registers.
- for (unsigned i = 0, e = ValLocs.size(); i != e; ++i) {
- CCValAssign &VA = ValLocs[i];
-
- // Don't bother handling odd stuff for now.
- if (VA.getLocInfo() != CCValAssign::Full)
- return false;
- if (!VA.isRegLoc())
- return false;
- // TODO: For now, don't try to handle cases where getLocInfo()
- // says Full but the types don't match.
- if (VA.getValVT() != TLI.getValueType(RV->getType()))
- return false;
-
- TargetRegisterClass* RC = TLI.getRegClassFor(VA.getValVT());
- bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- VA.getLocReg(), Reg + VA.getValNo(),
- RC, RC, DL);
- assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
-
- MRI.addLiveOut(VA.getLocReg());
- }
- }
-
- // Now emit the RET.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET));
- return true;
-}
-
/// X86SelectLoad - Select and emit code to implement load instructions.
///
bool X86FastISel::X86SelectLoad(const Instruction *I) {
@@ -819,9 +720,8 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
// CMPri, otherwise use CMPrr.
if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareImmOpc))
- .addReg(Op0Reg)
- .addImm(Op1C->getSExtValue());
+ BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg)
+ .addImm(Op1C->getSExtValue());
return true;
}
}
@@ -831,9 +731,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
unsigned Op1Reg = getRegForValue(Op1);
if (Op1Reg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc))
- .addReg(Op0Reg)
- .addReg(Op1Reg);
+ BuildMI(MBB, DL, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg);
return true;
}
@@ -855,10 +753,9 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
unsigned EReg = createResultReg(&X86::GR8RegClass);
unsigned NPReg = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(X86::SETNPr), NPReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(MBB, DL, TII.get(X86::SETEr), EReg);
+ BuildMI(MBB, DL, TII.get(X86::SETNPr), NPReg);
+ BuildMI(MBB, DL,
TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
UpdateValueMap(I, ResultReg);
return true;
@@ -869,13 +766,9 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
unsigned NEReg = createResultReg(&X86::GR8RegClass);
unsigned PReg = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(X86::SETNEr), NEReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(X86::SETPr), PReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(X86::OR8rr), ResultReg)
- .addReg(PReg).addReg(NEReg);
+ BuildMI(MBB, DL, TII.get(X86::SETNEr), NEReg);
+ BuildMI(MBB, DL, TII.get(X86::SETPr), PReg);
+ BuildMI(MBB, DL, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -914,7 +807,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
if (!X86FastEmitCompare(Op0, Op1, VT))
return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg);
+ BuildMI(MBB, DL, TII.get(SetCCOpc), ResultReg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -950,7 +843,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
// Try to take advantage of fallthrough opportunities.
CmpInst::Predicate Predicate = CI->getPredicate();
- if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
+ if (MBB->isLayoutSuccessor(TrueMBB)) {
std::swap(TrueMBB, FalseMBB);
Predicate = CmpInst::getInversePredicate(Predicate);
}
@@ -999,18 +892,16 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
if (!X86FastEmitCompare(Op0, Op1, VT))
return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc))
- .addMBB(TrueMBB);
+ BuildMI(MBB, DL, TII.get(BranchOpc)).addMBB(TrueMBB);
if (Predicate == CmpInst::FCMP_UNE) {
// X86 requires a second branch to handle UNE (and OEQ,
// which is mapped to UNE above).
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JP_4))
- .addMBB(TrueMBB);
+ BuildMI(MBB, DL, TII.get(X86::JP_4)).addMBB(TrueMBB);
}
FastEmitBranch(FalseMBB, DL);
- FuncInfo.MBB->addSuccessor(TrueMBB);
+ MBB->addSuccessor(TrueMBB);
return true;
}
} else if (ExtractValueInst *EI =
@@ -1036,8 +927,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
unsigned Reg = getRegForValue(EI);
for (MachineBasicBlock::const_reverse_iterator
- RI = FuncInfo.MBB->rbegin(), RE = FuncInfo.MBB->rend();
- RI != RE; ++RI) {
+ RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) {
const MachineInstr &MI = *RI;
if (MI.definesRegister(Reg)) {
@@ -1062,11 +952,11 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
unsigned OpCode = SetMI->getOpcode();
if (OpCode == X86::SETOr || OpCode == X86::SETBr) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(OpCode == X86::SETOr ? X86::JO_4 : X86::JB_4))
+ BuildMI(MBB, DL, TII.get(OpCode == X86::SETOr ?
+ X86::JO_4 : X86::JB_4))
.addMBB(TrueMBB);
FastEmitBranch(FalseMBB, DL);
- FuncInfo.MBB->addSuccessor(TrueMBB);
+ MBB->addSuccessor(TrueMBB);
return true;
}
}
@@ -1078,12 +968,10 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
unsigned OpReg = getRegForValue(BI->getCondition());
if (OpReg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr))
- .addReg(OpReg).addReg(OpReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JNE_4))
- .addMBB(TrueMBB);
+ BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
+ BuildMI(MBB, DL, TII.get(X86::JNE_4)).addMBB(TrueMBB);
FastEmitBranch(FalseMBB, DL);
- FuncInfo.MBB->addSuccessor(TrueMBB);
+ MBB->addSuccessor(TrueMBB);
return true;
}
@@ -1140,7 +1028,7 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
// Fold immediate in shl(x,3).
if (const ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm),
+ BuildMI(MBB, DL, TII.get(OpImm),
ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff);
UpdateValueMap(I, ResultReg);
return true;
@@ -1148,19 +1036,16 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
unsigned Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false;
- TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- CReg, Op1Reg, RC, RC, DL);
+ TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC, DL);
// The shift instruction uses X86::CL. If we defined a super-register
// of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
if (CReg != X86::CL)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(TargetOpcode::KILL), X86::CL)
+ BuildMI(MBB, DL, TII.get(TargetOpcode::KILL), X86::CL)
.addReg(CReg, RegState::Kill);
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpReg), ResultReg)
- .addReg(Op0Reg);
+ BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1192,11 +1077,9 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
unsigned Op2Reg = getRegForValue(I->getOperand(2));
if (Op2Reg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr))
- .addReg(Op0Reg).addReg(Op0Reg);
+ BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
- .addReg(Op1Reg).addReg(Op2Reg);
+ BuildMI(MBB, DL, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1210,9 +1093,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(X86::FR64RegisterClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(X86::CVTSS2SDrr), ResultReg)
- .addReg(OpReg);
+ BuildMI(MBB, DL, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1229,9 +1110,7 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(X86::FR32RegisterClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(X86::CVTSD2SSrr), ResultReg)
- .addReg(OpReg);
+ BuildMI(MBB, DL, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1266,8 +1145,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass;
unsigned CopyReg = createResultReg(CopyRC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CopyOpc), CopyReg)
- .addReg(InputReg);
+ BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg);
// Then issue an extract_subreg.
unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
@@ -1288,18 +1166,14 @@ bool X86FastISel::X86SelectExtractValue(const Instruction *I) {
switch (CI->getIntrinsicID()) {
default: break;
case Intrinsic::sadd_with_overflow:
- case Intrinsic::uadd_with_overflow: {
+ case Intrinsic::uadd_with_overflow:
// Cheat a little. We know that the registers for "add" and "seto" are
// allocated sequentially. However, we only keep track of the register
// for "add" in the value map. Use extractvalue's index to get the
// correct register for "seto".
- unsigned OpReg = getRegForValue(Agg);
- if (OpReg == 0)
- return false;
- UpdateValueMap(I, OpReg + *EI->idx_begin());
+ UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin());
return true;
}
- }
}
return false;
@@ -1343,7 +1217,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg).
+ BuildMI(MBB, DL, TII.get(OpC), ResultReg).
addImm(CI->isZero() ? -1ULL : 0);
UpdateValueMap(&I, ResultReg);
return true;
@@ -1357,12 +1231,12 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
// FIXME may need to add RegState::Debug to any registers produced,
// although ESP/EBP should be the only ones at the moment.
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II), AM).
- addImm(0).addMetadata(DI->getVariable());
+ addFullAddress(BuildMI(MBB, DL, II), AM).addImm(0).
+ addMetadata(DI->getVariable());
return true;
}
case Intrinsic::trap: {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TRAP));
+ BuildMI(MBB, DL, TII.get(X86::TRAP));
return true;
}
case Intrinsic::sadd_with_overflow:
@@ -1398,8 +1272,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg)
- .addReg(Reg1).addReg(Reg2);
+ BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2);
unsigned DestReg1 = UpdateValueMap(&I, ResultReg);
// If the add with overflow is an intra-block value then we just want to
@@ -1417,7 +1290,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
unsigned Opc = X86::SETBr;
if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
Opc = X86::SETOr;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg);
+ BuildMI(MBB, DL, TII.get(Opc), ResultReg);
return true;
}
}
@@ -1544,8 +1417,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// Issue CALLSEQ_START
unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode();
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown))
- .addImm(NumBytes);
+ BuildMI(MBB, DL, TII.get(AdjStackDown)).addImm(NumBytes);
// Process argument: walk the register/memloc assignments, inserting
// copies / loads.
@@ -1601,8 +1473,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
if (VA.isRegLoc()) {
TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT);
- bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- VA.getLocReg(), Arg, RC, RC, DL);
+ bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(),
+ Arg, RC, RC, DL);
assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
Emitted = true;
RegArgs.push_back(VA.getLocReg());
@@ -1628,8 +1500,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
if (Subtarget->isPICStyleGOT()) {
TargetRegisterClass *RC = X86::GR32RegisterClass;
unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
- X86::EBX, Base, RC, RC, DL);
+ bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC,
+ DL);
assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
Emitted = true;
}
@@ -1639,8 +1511,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
if (CalleeOp) {
// Register-indirect call.
unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
- .addReg(CalleeOp);
+ MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp);
} else {
// Direct call.
@@ -1669,8 +1540,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
}
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
- .addGlobalAddress(GV, 0, OpFlags);
+ MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV, 0, OpFlags);
}
// Add an implicit use GOT pointer in EBX.
@@ -1683,8 +1553,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// Issue CALLSEQ_END
unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode();
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp))
- .addImm(NumBytes).addImm(0);
+ BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
// Now handle call return value (if any).
SmallVector<unsigned, 4> UsedRegs;
@@ -1711,7 +1580,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
}
unsigned ResultReg = createResultReg(DstRC);
- bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, ResultReg,
+ bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
RVLocs[0].getLocReg(), DstRC, SrcRC, DL);
assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
Emitted = true;
@@ -1725,21 +1594,18 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
unsigned MemSize = ResVT.getSizeInBits()/8;
int FI = MFI.CreateStackObject(MemSize, MemSize, false);
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(Opc)), FI)
- .addReg(ResultReg);
+ addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg);
DstRC = ResVT == MVT::f32
? X86::FR32RegisterClass : X86::FR64RegisterClass;
Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
ResultReg = createResultReg(DstRC);
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(Opc), ResultReg), FI);
+ addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI);
}
if (AndToI1) {
// Mask out all but lowest bit for some call which produces an i1.
unsigned AndResult = createResultReg(X86::GR8RegisterClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(MBB, DL,
TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
ResultReg = AndResult;
}
@@ -1762,8 +1628,6 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) {
return X86SelectLoad(I);
case Instruction::Store:
return X86SelectStore(I);
- case Instruction::Ret:
- return X86SelectRet(I);
case Instruction::ICmp:
case Instruction::FCmp:
return X86SelectCmp(I);
@@ -1864,8 +1728,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
else
Opc = X86::LEA64r;
unsigned ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(Opc), ResultReg), AM);
+ addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
return ResultReg;
}
return 0;
@@ -1895,8 +1758,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
// Create the load from the constant pool.
unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
unsigned ResultReg = createResultReg(RC);
- addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(Opc), ResultReg),
+ addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg),
MCPOffset, PICBase, OpFlag);
return ResultReg;
@@ -1919,8 +1781,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
unsigned ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(Opc), ResultReg), AM);
+ addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
return ResultReg;
}
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 0267487..e45f17b 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1218,12 +1218,13 @@ bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
bool
X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<EVT> &OutTys,
+ const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, Context);
- return CCInfo.CheckReturn(Outs, RetCC_X86);
+ return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_X86);
}
SDValue
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 2d28e5c..39bbdac 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -740,7 +740,8 @@ namespace llvm {
virtual bool
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<EVT> &OutTys,
+ const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
LLVMContext &Context) const;
void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index abe7b2f..6a25e06 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -1135,12 +1135,13 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
bool XCoreTargetLowering::
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<EVT> &OutTys,
+ const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, Context);
- return CCInfo.CheckReturn(Outs, RetCC_XCore);
+ return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_XCore);
}
SDValue
diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h
index febc198..4664301 100644
--- a/lib/Target/XCore/XCoreISelLowering.h
+++ b/lib/Target/XCore/XCoreISelLowering.h
@@ -193,7 +193,8 @@ namespace llvm {
virtual bool
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
+ const SmallVectorImpl<EVT> &OutTys,
+ const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
LLVMContext &Context) const;
};
}
diff --git a/test/CodeGen/X86/fast-isel-loads.ll b/test/CodeGen/X86/fast-isel-loads.ll
index 2fbb46c..027363e 100644
--- a/test/CodeGen/X86/fast-isel-loads.ll
+++ b/test/CodeGen/X86/fast-isel-loads.ll
@@ -5,7 +5,7 @@
; CHECK: foo:
; CHECK-NEXT: movq %rdi, -8(%rsp)
; CHECK-NEXT: movq %rsi, -16(%rsp)
-; CHECK-NEXT: movsd 128(%rsi,%rdi,8), %xmm0
+; CHECK: movsd 128(%rsi,%rdi,8), %xmm0
; CHECK-NEXT: ret
define double @foo(i64 %x, double* %p) nounwind {
diff --git a/test/CodeGen/X86/fast-isel.ll b/test/CodeGen/X86/fast-isel.ll
index 177c06b..3d26ae7 100644
--- a/test/CodeGen/X86/fast-isel.ll
+++ b/test/CodeGen/X86/fast-isel.ll
@@ -49,10 +49,9 @@ entry:
ret i32 %tmp2
}
-define void @ptrtoint_i1(i8* %p, i1* %q) nounwind {
+define i1 @ptrtoint_i1(i8* %p) nounwind {
%t = ptrtoint i8* %p to i1
- store i1 %t, i1* %q
- ret void
+ ret i1 %t
}
define i8* @inttoptr_i1(i1 %p) nounwind {
%t = inttoptr i1 %p to i8*
@@ -87,8 +86,11 @@ define i8 @mul_i8(i8 %a) nounwind {
ret i8 %tmp
}
-define void @load_store_i1(i1* %p, i1* %q) nounwind {
- %t = load i1* %p
- store i1 %t, i1* %q
+define void @store_i1(i1* %p, i1 %t) nounwind {
+ store i1 %t, i1* %p
ret void
}
+define i1 @load_i1(i1* %p) nounwind {
+ %t = load i1* %p
+ ret i1 %t
+}
diff --git a/utils/TableGen/FastISelEmitter.cpp b/utils/TableGen/FastISelEmitter.cpp
index 843546d..7c54f78 100644
--- a/utils/TableGen/FastISelEmitter.cpp
+++ b/utils/TableGen/FastISelEmitter.cpp
@@ -432,7 +432,7 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) {
if ((*Memo.PhysRegs)[i] != "")
- OS << " TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, "
+ OS << " TII.copyRegToReg(*MBB, MBB->end(), "
<< (*Memo.PhysRegs)[i] << ", Op" << i << ", "
<< "TM.getRegisterInfo()->getPhysicalRegisterRegClass("
<< (*Memo.PhysRegs)[i] << "), "
@@ -526,7 +526,7 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) {
if ((*Memo.PhysRegs)[i] != "")
- OS << " TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, "
+ OS << " TII.copyRegToReg(*MBB, MBB->end(), "
<< (*Memo.PhysRegs)[i] << ", Op" << i << ", "
<< "TM.getRegisterInfo()->getPhysicalRegisterRegClass("
<< (*Memo.PhysRegs)[i] << "), "