aboutsummaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/AggressiveAntiDepBreaker.h2
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinter.cpp38
-rw-r--r--lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp22
-rw-r--r--lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h14
-rw-r--r--lib/CodeGen/AsmPrinter/DebugLocEntry.h29
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp130
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfDebug.cpp299
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfDebug.h55
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfException.h4
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfExpression.cpp28
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfExpression.h3
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfFile.cpp4
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfUnit.cpp492
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfUnit.h4
-rw-r--r--lib/CodeGen/AsmPrinter/EHStreamer.cpp28
-rw-r--r--lib/CodeGen/AsmPrinter/EHStreamer.h6
-rw-r--r--lib/CodeGen/AsmPrinter/Win64Exception.cpp274
-rw-r--r--lib/CodeGen/AsmPrinter/Win64Exception.h9
-rw-r--r--lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp29
-rw-r--r--lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h2
-rw-r--r--lib/CodeGen/CodeGenPrepare.cpp104
-rw-r--r--lib/CodeGen/CriticalAntiDepBreaker.h2
-rw-r--r--lib/CodeGen/EarlyIfConversion.cpp5
-rw-r--r--lib/CodeGen/GCMetadata.cpp4
-rw-r--r--lib/CodeGen/GCRootLowering.cpp17
-rw-r--r--lib/CodeGen/GlobalMerge.cpp6
-rw-r--r--lib/CodeGen/InlineSpiller.cpp4
-rw-r--r--lib/CodeGen/LLVMTargetMachine.cpp19
-rw-r--r--lib/CodeGen/LexicalScopes.cpp105
-rw-r--r--lib/CodeGen/LiveDebugVariables.cpp69
-rw-r--r--lib/CodeGen/LiveDebugVariables.h2
-rw-r--r--lib/CodeGen/LiveInterval.cpp56
-rw-r--r--lib/CodeGen/MachineBasicBlock.cpp2
-rw-r--r--lib/CodeGen/MachineBlockPlacement.cpp6
-rw-r--r--lib/CodeGen/MachineFunction.cpp4
-rw-r--r--lib/CodeGen/MachineInstr.cpp27
-rw-r--r--lib/CodeGen/MachineLICM.cpp240
-rw-r--r--lib/CodeGen/MachineModuleInfo.cpp29
-rw-r--r--lib/CodeGen/MachineModuleInfoImpls.cpp9
-rw-r--r--lib/CodeGen/MachineScheduler.cpp4
-rw-r--r--lib/CodeGen/MachineTraceMetrics.cpp16
-rw-r--r--lib/CodeGen/PostRASchedulerList.cpp2
-rw-r--r--lib/CodeGen/PrologEpilogInserter.cpp20
-rw-r--r--lib/CodeGen/RegAllocFast.cpp13
-rw-r--r--lib/CodeGen/RegAllocGreedy.cpp9
-rw-r--r--lib/CodeGen/RegisterCoalescer.cpp48
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp339
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp17
-rw-r--r--lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp209
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp55
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.h2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp4
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp35
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp5
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h2
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp57
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp127
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h8
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp30
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp123
-rw-r--r--lib/CodeGen/SelectionDAG/StatepointLowering.cpp68
-rw-r--r--lib/CodeGen/ShadowStackGCLowering.cpp4
-rw-r--r--lib/CodeGen/SjLjEHPrepare.cpp43
-rw-r--r--lib/CodeGen/SpillPlacement.h2
-rw-r--r--lib/CodeGen/StackColoring.cpp2
-rw-r--r--lib/CodeGen/TargetLoweringBase.cpp19
-rw-r--r--lib/CodeGen/TargetLoweringObjectFileImpl.cpp21
-rw-r--r--lib/CodeGen/WinEHPrepare.cpp981
70 files changed, 2766 insertions, 1685 deletions
diff --git a/lib/CodeGen/AggressiveAntiDepBreaker.h b/lib/CodeGen/AggressiveAntiDepBreaker.h
index 12cf95b..f9544dd 100644
--- a/lib/CodeGen/AggressiveAntiDepBreaker.h
+++ b/lib/CodeGen/AggressiveAntiDepBreaker.h
@@ -127,7 +127,7 @@ class RegisterClassInfo;
AggressiveAntiDepBreaker(MachineFunction& MFi,
const RegisterClassInfo &RCI,
TargetSubtargetInfo::RegClassVector& CriticalPathRCs);
- ~AggressiveAntiDepBreaker();
+ ~AggressiveAntiDepBreaker() override;
/// Initialize anti-dep breaking for a new basic block.
void StartBlock(MachineBasicBlock *BB) override;
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 07d6731..43d7a38 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -28,7 +28,7 @@
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Mangler.h"
@@ -671,17 +671,17 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
OS << "DEBUG_VALUE: ";
DIVariable V = MI->getDebugVariable();
- if (V.getContext().isSubprogram()) {
- StringRef Name = DISubprogram(V.getContext()).getDisplayName();
+ if (auto *SP = dyn_cast<MDSubprogram>(V->getScope())) {
+ StringRef Name = SP->getDisplayName();
if (!Name.empty())
OS << Name << ":";
}
- OS << V.getName();
+ OS << V->getName();
DIExpression Expr = MI->getDebugExpression();
- if (Expr.isBitPiece())
- OS << " [bit_piece offset=" << Expr.getBitPieceOffset()
- << " size=" << Expr.getBitPieceSize() << "]";
+ if (Expr->isBitPiece())
+ OS << " [bit_piece offset=" << Expr->getBitPieceOffset()
+ << " size=" << Expr->getBitPieceSize() << "]";
OS << " <- ";
// The second operand is only an offset if it's an immediate.
@@ -1034,11 +1034,31 @@ bool AsmPrinter::doFinalization(Module &M) {
EmitVisibility(Name, V, false);
}
+ const TargetLoweringObjectFile &TLOF = getObjFileLowering();
+
// Emit module flags.
SmallVector<Module::ModuleFlagEntry, 8> ModuleFlags;
M.getModuleFlagsMetadata(ModuleFlags);
if (!ModuleFlags.empty())
- getObjFileLowering().emitModuleFlags(OutStreamer, ModuleFlags, *Mang, TM);
+ TLOF.emitModuleFlags(OutStreamer, ModuleFlags, *Mang, TM);
+
+ Triple TT(TM.getTargetTriple());
+ if (TT.isOSBinFormatELF()) {
+ MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
+
+ // Output stubs for external and common global variables.
+ MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
+ if (!Stubs.empty()) {
+ OutStreamer.SwitchSection(TLOF.getDataRelSection());
+ const DataLayout *DL = TM.getDataLayout();
+
+ for (const auto &Stub : Stubs) {
+ OutStreamer.EmitLabel(Stub.first);
+ OutStreamer.EmitSymbolValue(Stub.second.getPointer(),
+ DL->getPointerSize());
+ }
+ }
+ }
// Make sure we wrote out everything we need.
OutStreamer.Flush();
@@ -2302,7 +2322,7 @@ MCSymbol *AsmPrinter::getSymbolWithGlobalValueBase(const GlobalValue *GV,
MCSymbol *AsmPrinter::GetExternalSymbolSymbol(StringRef Sym) const {
SmallString<60> NameStr;
Mang->getNameWithPrefix(NameStr, Sym);
- return OutContext.GetOrCreateSymbol(NameStr.str());
+ return OutContext.GetOrCreateSymbol(NameStr);
}
diff --git a/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp b/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp
index bbdf237..1e3c5d7 100644
--- a/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp
+++ b/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp
@@ -33,7 +33,7 @@ static unsigned isDescribedByReg(const MachineInstr &MI) {
return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : 0;
}
-void DbgValueHistoryMap::startInstrRange(const MDNode *Var,
+void DbgValueHistoryMap::startInstrRange(InlinedVariable Var,
const MachineInstr &MI) {
// Instruction range should start with a DBG_VALUE instruction for the
// variable.
@@ -48,7 +48,7 @@ void DbgValueHistoryMap::startInstrRange(const MDNode *Var,
Ranges.push_back(std::make_pair(&MI, nullptr));
}
-void DbgValueHistoryMap::endInstrRange(const MDNode *Var,
+void DbgValueHistoryMap::endInstrRange(InlinedVariable Var,
const MachineInstr &MI) {
auto &Ranges = VarInstrRanges[Var];
// Verify that the current instruction range is not yet closed.
@@ -59,7 +59,7 @@ void DbgValueHistoryMap::endInstrRange(const MDNode *Var,
Ranges.back().second = &MI;
}
-unsigned DbgValueHistoryMap::getRegisterForVar(const MDNode *Var) const {
+unsigned DbgValueHistoryMap::getRegisterForVar(InlinedVariable Var) const {
const auto &I = VarInstrRanges.find(Var);
if (I == VarInstrRanges.end())
return 0;
@@ -71,12 +71,13 @@ unsigned DbgValueHistoryMap::getRegisterForVar(const MDNode *Var) const {
namespace {
// Maps physreg numbers to the variables they describe.
-typedef std::map<unsigned, SmallVector<const MDNode *, 1>> RegDescribedVarsMap;
+typedef DbgValueHistoryMap::InlinedVariable InlinedVariable;
+typedef std::map<unsigned, SmallVector<InlinedVariable, 1>> RegDescribedVarsMap;
}
// \brief Claim that @Var is not described by @RegNo anymore.
-static void dropRegDescribedVar(RegDescribedVarsMap &RegVars,
- unsigned RegNo, const MDNode *Var) {
+static void dropRegDescribedVar(RegDescribedVarsMap &RegVars, unsigned RegNo,
+ InlinedVariable Var) {
const auto &I = RegVars.find(RegNo);
assert(RegNo != 0U && I != RegVars.end());
auto &VarSet = I->second;
@@ -89,8 +90,8 @@ static void dropRegDescribedVar(RegDescribedVarsMap &RegVars,
}
// \brief Claim that @Var is now described by @RegNo.
-static void addRegDescribedVar(RegDescribedVarsMap &RegVars,
- unsigned RegNo, const MDNode *Var) {
+static void addRegDescribedVar(RegDescribedVarsMap &RegVars, unsigned RegNo,
+ InlinedVariable Var) {
assert(RegNo != 0U);
auto &VarSet = RegVars[RegNo];
assert(std::find(VarSet.begin(), VarSet.end(), Var) == VarSet.end());
@@ -203,7 +204,10 @@ void llvm::calculateDbgValueHistory(const MachineFunction *MF,
// Use the base variable (without any DW_OP_piece expressions)
// as index into History. The full variables including the
// piece expressions are attached to the MI.
- DIVariable Var = MI.getDebugVariable();
+ MDLocalVariable *RawVar = MI.getDebugVariable();
+ assert(RawVar->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
+ "Expected inlined-at fields to agree");
+ InlinedVariable Var(RawVar, MI.getDebugLoc()->getInlinedAt());
if (unsigned PrevReg = Result.getRegisterForVar(Var))
dropRegDescribedVar(RegVars, PrevReg, Var);
diff --git a/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h b/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h
index 4b62007..c25aaff 100644
--- a/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h
+++ b/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h
@@ -17,7 +17,8 @@ namespace llvm {
class MachineFunction;
class MachineInstr;
-class MDNode;
+class MDLocalVariable;
+class MDLocation;
class TargetRegisterInfo;
// For each user variable, keep a list of instruction ranges where this variable
@@ -31,16 +32,19 @@ class DbgValueHistoryMap {
public:
typedef std::pair<const MachineInstr *, const MachineInstr *> InstrRange;
typedef SmallVector<InstrRange, 4> InstrRanges;
- typedef MapVector<const MDNode *, InstrRanges> InstrRangesMap;
+ typedef std::pair<const MDLocalVariable *, const MDLocation *>
+ InlinedVariable;
+ typedef MapVector<InlinedVariable, InstrRanges> InstrRangesMap;
+
private:
InstrRangesMap VarInstrRanges;
public:
- void startInstrRange(const MDNode *Var, const MachineInstr &MI);
- void endInstrRange(const MDNode *Var, const MachineInstr &MI);
+ void startInstrRange(InlinedVariable Var, const MachineInstr &MI);
+ void endInstrRange(InlinedVariable Var, const MachineInstr &MI);
// Returns register currently describing @Var. If @Var is currently
// unaccessible or is not described by a register, returns 0.
- unsigned getRegisterForVar(const MDNode *Var) const;
+ unsigned getRegisterForVar(InlinedVariable Var) const;
bool empty() const { return VarInstrRanges.empty(); }
void clear() { VarInstrRanges.clear(); }
diff --git a/lib/CodeGen/AsmPrinter/DebugLocEntry.h b/lib/CodeGen/AsmPrinter/DebugLocEntry.h
index 6914bbe..4f6714e 100644
--- a/lib/CodeGen/AsmPrinter/DebugLocEntry.h
+++ b/lib/CodeGen/AsmPrinter/DebugLocEntry.h
@@ -42,8 +42,8 @@ public:
}
Value(const MDNode *Var, const MDNode *Expr, MachineLocation Loc)
: Variable(Var), Expression(Expr), EntryKind(E_Location), Loc(Loc) {
- assert(DIVariable(Var).Verify());
- assert(DIExpression(Expr)->isValid());
+ assert(isa<MDLocalVariable>(Var));
+ assert(cast<MDExpression>(Expr)->isValid());
}
/// The variable to which this location entry corresponds.
@@ -74,10 +74,11 @@ public:
const ConstantFP *getConstantFP() const { return Constant.CFP; }
const ConstantInt *getConstantInt() const { return Constant.CIP; }
MachineLocation getLoc() const { return Loc; }
- const MDNode *getVariableNode() const { return Variable; }
- DIVariable getVariable() const { return DIVariable(Variable); }
- bool isBitPiece() const { return getExpression().isBitPiece(); }
- DIExpression getExpression() const { return DIExpression(Expression); }
+ DIVariable getVariable() const { return cast<MDLocalVariable>(Variable); }
+ bool isBitPiece() const { return getExpression()->isBitPiece(); }
+ DIExpression getExpression() const {
+ return cast_or_null<MDExpression>(Expression);
+ }
friend bool operator==(const Value &, const Value &);
friend bool operator<(const Value &, const Value &);
};
@@ -101,12 +102,12 @@ public:
/// Return true if the merge was successful.
bool MergeValues(const DebugLocEntry &Next) {
if (Begin == Next.Begin) {
- DIExpression Expr(Values[0].Expression);
- DIVariable Var(Values[0].Variable);
- DIExpression NextExpr(Next.Values[0].Expression);
- DIVariable NextVar(Next.Values[0].Variable);
- if (Var == NextVar && Expr.isBitPiece() &&
- NextExpr.isBitPiece()) {
+ DIExpression Expr = cast_or_null<MDExpression>(Values[0].Expression);
+ DIVariable Var = cast_or_null<MDLocalVariable>(Values[0].Variable);
+ DIExpression NextExpr =
+ cast_or_null<MDExpression>(Next.Values[0].Expression);
+ DIVariable NextVar = cast_or_null<MDLocalVariable>(Next.Values[0].Variable);
+ if (Var == NextVar && Expr->isBitPiece() && NextExpr->isBitPiece()) {
addValues(Next.Values);
End = Next.End;
return true;
@@ -189,8 +190,8 @@ inline bool operator==(const DebugLocEntry::Value &A,
/// \brief Compare two pieces based on their offset.
inline bool operator<(const DebugLocEntry::Value &A,
const DebugLocEntry::Value &B) {
- return A.getExpression().getBitPieceOffset() <
- B.getExpression().getBitPieceOffset();
+ return A.getExpression()->getBitPieceOffset() <
+ B.getExpression()->getBitPieceOffset();
}
}
diff --git a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index eee5fc5..75d3b68 100644
--- a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -101,51 +101,52 @@ DIE *DwarfCompileUnit::getOrCreateGlobalVariableDIE(DIGlobalVariable GV) {
if (DIE *Die = getDIE(GV))
return Die;
- assert(GV.isGlobalVariable());
+ assert(GV);
- DIScope GVContext = GV.getContext();
- DIType GTy = DD->resolve(GV.getType());
+ DIScope GVContext = GV->getScope();
+ DIType GTy = DD->resolve(GV->getType());
// Construct the context before querying for the existence of the DIE in
// case such construction creates the DIE.
DIE *ContextDIE = getOrCreateContextDIE(GVContext);
// Add to map.
- DIE *VariableDIE = &createAndAddDIE(GV.getTag(), *ContextDIE, GV);
+ DIE *VariableDIE = &createAndAddDIE(GV->getTag(), *ContextDIE, GV);
DIScope DeclContext;
- if (DIDerivedType SDMDecl = GV.getStaticDataMemberDeclaration()) {
- DeclContext = resolve(SDMDecl.getContext());
- assert(SDMDecl.isStaticMember() && "Expected static member decl");
- assert(GV.isDefinition());
+ if (auto *SDMDecl = GV->getStaticDataMemberDeclaration()) {
+ DeclContext = resolve(SDMDecl->getScope());
+ assert(SDMDecl->isStaticMember() && "Expected static member decl");
+ assert(GV->isDefinition());
// We need the declaration DIE that is in the static member's class.
DIE *VariableSpecDIE = getOrCreateStaticMemberDIE(SDMDecl);
addDIEEntry(*VariableDIE, dwarf::DW_AT_specification, *VariableSpecDIE);
} else {
- DeclContext = GV.getContext();
+ DeclContext = GV->getScope();
// Add name and type.
- addString(*VariableDIE, dwarf::DW_AT_name, GV.getDisplayName());
+ addString(*VariableDIE, dwarf::DW_AT_name, GV->getDisplayName());
addType(*VariableDIE, GTy);
// Add scoping info.
- if (!GV.isLocalToUnit())
+ if (!GV->isLocalToUnit())
addFlag(*VariableDIE, dwarf::DW_AT_external);
// Add line number info.
addSourceLine(*VariableDIE, GV);
}
- if (!GV.isDefinition())
+ if (!GV->isDefinition())
addFlag(*VariableDIE, dwarf::DW_AT_declaration);
+ else
+ addGlobalName(GV->getName(), *VariableDIE, DeclContext);
// Add location.
bool addToAccelTable = false;
- bool isGlobalVariable = GV.getGlobal() != nullptr;
- if (isGlobalVariable) {
+ if (auto *Global = dyn_cast_or_null<GlobalVariable>(GV->getVariable())) {
addToAccelTable = true;
DIELoc *Loc = new (DIEValueAllocator) DIELoc();
- const MCSymbol *Sym = Asm->getSymbol(GV.getGlobal());
- if (GV.getGlobal()->isThreadLocal()) {
+ const MCSymbol *Sym = Asm->getSymbol(Global);
+ if (Global->isThreadLocal()) {
// FIXME: Make this work with -gsplit-dwarf.
unsigned PointerSize = Asm->getDataLayout().getPointerSize();
assert((PointerSize == 4 || PointerSize == 8) &&
@@ -174,11 +175,11 @@ DIE *DwarfCompileUnit::getOrCreateGlobalVariableDIE(DIGlobalVariable GV) {
}
addBlock(*VariableDIE, dwarf::DW_AT_location, Loc);
- addLinkageName(*VariableDIE, GV.getLinkageName());
+ addLinkageName(*VariableDIE, GV->getLinkageName());
} else if (const ConstantInt *CI =
- dyn_cast_or_null<ConstantInt>(GV.getConstant())) {
+ dyn_cast_or_null<ConstantInt>(GV->getVariable())) {
addConstantValue(*VariableDIE, CI, GTy);
- } else if (const ConstantExpr *CE = getMergedGlobalExpr(GV.getConstant())) {
+ } else if (const ConstantExpr *CE = getMergedGlobalExpr(GV->getVariable())) {
addToAccelTable = true;
// GV is a merged global.
DIELoc *Loc = new (DIEValueAllocator) DIELoc();
@@ -195,15 +196,14 @@ DIE *DwarfCompileUnit::getOrCreateGlobalVariableDIE(DIGlobalVariable GV) {
}
if (addToAccelTable) {
- DD->addAccelName(GV.getName(), *VariableDIE);
+ DD->addAccelName(GV->getName(), *VariableDIE);
// If the linkage name is different than the name, go ahead and output
// that as well into the name table.
- if (GV.getLinkageName() != "" && GV.getName() != GV.getLinkageName())
- DD->addAccelName(GV.getLinkageName(), *VariableDIE);
+ if (GV->getLinkageName() != "" && GV->getName() != GV->getLinkageName())
+ DD->addAccelName(GV->getLinkageName(), *VariableDIE);
}
- addGlobalName(GV.getName(), *VariableDIE, DeclContext);
return VariableDIE;
}
@@ -307,7 +307,7 @@ void DwarfCompileUnit::constructScopeDIE(
DIScope DS(Scope->getScopeNode());
- assert((Scope->getInlinedAt() || !DS.isSubprogram()) &&
+ assert((Scope->getInlinedAt() || !isa<MDSubprogram>(DS)) &&
"Only handle inlined subprograms here, use "
"constructSubprogramScopeDIE for non-inlined "
"subprograms");
@@ -318,7 +318,7 @@ void DwarfCompileUnit::constructScopeDIE(
// avoid creating un-used children then removing them later when we find out
// the scope DIE is null.
std::unique_ptr<DIE> ScopeDIE;
- if (Scope->getParent() && DS.isSubprogram()) {
+ if (Scope->getParent() && isa<MDSubprogram>(DS)) {
ScopeDIE = constructInlinedScopeDIE(Scope);
if (!ScopeDIE)
return;
@@ -340,7 +340,7 @@ void DwarfCompileUnit::constructScopeDIE(
// There is no need to emit empty lexical block DIE.
for (const auto &E : DD->findImportedEntitiesForScope(DS))
Children.push_back(
- constructImportedEntityDIE(DIImportedEntity(E.second)));
+ constructImportedEntityDIE(cast<MDImportedEntity>(E.second)));
}
// If there are only other scopes as children, put them directly in the
@@ -431,10 +431,10 @@ DwarfCompileUnit::constructInlinedScopeDIE(LexicalScope *Scope) {
attachRangesOrLowHighPC(*ScopeDIE, Scope->getRanges());
// Add the call site information to the DIE.
- DILocation DL(Scope->getInlinedAt());
+ const MDLocation *IA = Scope->getInlinedAt();
addUInt(*ScopeDIE, dwarf::DW_AT_call_file, None,
- getOrCreateSourceID(DL.getFilename(), DL.getDirectory()));
- addUInt(*ScopeDIE, dwarf::DW_AT_call_line, None, DL.getLineNumber());
+ getOrCreateSourceID(IA->getFilename(), IA->getDirectory()));
+ addUInt(*ScopeDIE, dwarf::DW_AT_call_line, None, IA->getLine());
// Add name to the name table, we do this here because we're guaranteed
// to have concrete versions of our DW_TAG_inlined_subprogram nodes.
@@ -523,7 +523,7 @@ DwarfCompileUnit::constructVariableDIEImpl(const DbgVariable &DV,
assert(Expr != DV.getExpression().end() &&
"Wrong number of expressions");
DwarfExpr.AddMachineRegIndirect(FrameReg, Offset);
- DwarfExpr.AddExpression(Expr->begin(), Expr->end());
+ DwarfExpr.AddExpression((*Expr)->expr_op_begin(), (*Expr)->expr_op_end());
++Expr;
}
addBlock(*VariableDie, dwarf::DW_AT_location, Loc);
@@ -562,16 +562,14 @@ void DwarfCompileUnit::constructSubprogramScopeDIE(LexicalScope *Scope) {
assert(Scope && Scope->getScopeNode());
assert(!Scope->getInlinedAt());
assert(!Scope->isAbstractScope());
- DISubprogram Sub(Scope->getScopeNode());
-
- assert(Sub.isSubprogram());
+ DISubprogram Sub = cast<MDSubprogram>(Scope->getScopeNode());
DD->getProcessedSPNodes().insert(Sub);
DIE &ScopeDIE = updateSubprogramScopeDIE(Sub);
// If this is a variadic function, add an unspecified parameter.
- DITypeArray FnArgs = Sub.getType().getTypeArray();
+ DITypeArray FnArgs = Sub->getType()->getTypeArray();
// Collect lexical scope children first.
// ObjectPointer might be a local (non-argument) local variable if it's a
@@ -582,8 +580,7 @@ void DwarfCompileUnit::constructSubprogramScopeDIE(LexicalScope *Scope) {
// If we have a single element of null, it is a function that returns void.
// If we have more than one elements and the last one is null, it is a
// variadic function.
- if (FnArgs.getNumElements() > 1 &&
- !FnArgs.getElement(FnArgs.getNumElements() - 1) &&
+ if (FnArgs.size() > 1 && !FnArgs[FnArgs.size() - 1] &&
!includeMinimalInlineScopes())
ScopeDIE.addChild(make_unique<DIE>(dwarf::DW_TAG_unspecified_parameters));
}
@@ -607,7 +604,7 @@ DwarfCompileUnit::constructAbstractSubprogramScopeDIE(LexicalScope *Scope) {
if (AbsDef)
return;
- DISubprogram SP(Scope->getScopeNode());
+ DISubprogram SP = cast<MDSubprogram>(Scope->getScopeNode());
DIE *ContextDIE;
@@ -617,11 +614,11 @@ DwarfCompileUnit::constructAbstractSubprogramScopeDIE(LexicalScope *Scope) {
// the important distinction that the DIDescriptor is not associated with the
// DIE (since the DIDescriptor will be associated with the concrete DIE, if
// any). It could be refactored to some common utility function.
- else if (DISubprogram SPDecl = SP.getFunctionDeclaration()) {
+ else if (auto *SPDecl = SP->getDeclaration()) {
ContextDIE = &getUnitDie();
getOrCreateSubprogramDIE(SPDecl);
} else
- ContextDIE = getOrCreateContextDIE(resolve(SP.getContext()));
+ ContextDIE = getOrCreateContextDIE(resolve(SP->getScope()));
// Passing null as the associated DIDescriptor because the abstract definition
// shouldn't be found by lookup.
@@ -637,28 +634,25 @@ DwarfCompileUnit::constructAbstractSubprogramScopeDIE(LexicalScope *Scope) {
std::unique_ptr<DIE>
DwarfCompileUnit::constructImportedEntityDIE(const DIImportedEntity &Module) {
- assert(Module.Verify() &&
- "Use one of the MDNode * overloads to handle invalid metadata");
- std::unique_ptr<DIE> IMDie = make_unique<DIE>((dwarf::Tag)Module.getTag());
+ std::unique_ptr<DIE> IMDie = make_unique<DIE>((dwarf::Tag)Module->getTag());
insertDIE(Module, IMDie.get());
DIE *EntityDie;
- DIDescriptor Entity = resolve(Module.getEntity());
- if (Entity.isNameSpace())
- EntityDie = getOrCreateNameSpace(DINameSpace(Entity));
- else if (Entity.isSubprogram())
- EntityDie = getOrCreateSubprogramDIE(DISubprogram(Entity));
- else if (Entity.isType())
- EntityDie = getOrCreateTypeDIE(DIType(Entity));
- else if (Entity.isGlobalVariable())
- EntityDie = getOrCreateGlobalVariableDIE(DIGlobalVariable(Entity));
+ auto *Entity = resolve(Module->getEntity());
+ if (auto *NS = dyn_cast<MDNamespace>(Entity))
+ EntityDie = getOrCreateNameSpace(NS);
+ else if (auto *SP = dyn_cast<MDSubprogram>(Entity))
+ EntityDie = getOrCreateSubprogramDIE(SP);
+ else if (auto *T = dyn_cast<MDType>(Entity))
+ EntityDie = getOrCreateTypeDIE(T);
+ else if (auto *GV = dyn_cast<MDGlobalVariable>(Entity))
+ EntityDie = getOrCreateGlobalVariableDIE(GV);
else
EntityDie = getDIE(Entity);
assert(EntityDie);
- addSourceLine(*IMDie, Module.getLineNumber(),
- Module.getContext().getFilename(),
- Module.getContext().getDirectory());
+ addSourceLine(*IMDie, Module->getLine(), Module->getScope()->getFilename(),
+ Module->getScope()->getDirectory());
addDIEEntry(*IMDie, dwarf::DW_AT_import, *EntityDie);
- StringRef Name = Module.getName();
+ StringRef Name = Module->getName();
if (!Name.empty())
addString(*IMDie, dwarf::DW_AT_name, Name);
@@ -683,21 +677,19 @@ void DwarfCompileUnit::finishSubprogramDefinition(DISubprogram SP) {
}
}
void DwarfCompileUnit::collectDeadVariables(DISubprogram SP) {
- assert(SP.isSubprogram() && "CU's subprogram list contains a non-subprogram");
- assert(SP.isDefinition() &&
+ assert(SP && "CU's subprogram list contains a non-subprogram");
+ assert(SP->isDefinition() &&
"CU's subprogram list contains a subprogram declaration");
- DIArray Variables = SP.getVariables();
- if (Variables.getNumElements() == 0)
+ auto Variables = SP->getVariables();
+ if (Variables.size() == 0)
return;
DIE *SPDIE = DU->getAbstractSPDies().lookup(SP);
if (!SPDIE)
SPDIE = getDIE(SP);
assert(SPDIE);
- for (unsigned vi = 0, ve = Variables.getNumElements(); vi != ve; ++vi) {
- DIVariable DV(Variables.getElement(vi));
- assert(DV.isVariable());
- DbgVariable NewVar(DV, DIExpression(), DD);
+ for (DIVariable DV : Variables) {
+ DbgVariable NewVar(DV, nullptr, DIExpression(), DD);
auto VariableDie = constructVariableDIE(NewVar);
applyVariableAttributes(NewVar, *VariableDie);
SPDIE->addChild(std::move(VariableDie));
@@ -728,7 +720,7 @@ void DwarfCompileUnit::addGlobalType(DIType Ty, const DIE &Die,
DIScope Context) {
if (includeMinimalInlineScopes())
return;
- std::string FullName = getParentContextString(Context) + Ty.getName().str();
+ std::string FullName = getParentContextString(Context) + Ty->getName().str();
GlobalTypes[FullName] = &Die;
}
@@ -778,7 +770,7 @@ void DwarfCompileUnit::addComplexAddress(const DbgVariable &DV, DIE &Die,
ValidReg = DwarfExpr.AddMachineRegIndirect(Location.getReg(),
Location.getOffset());
if (ValidReg)
- DwarfExpr.AddExpression(Expr.begin(), Expr.end());
+ DwarfExpr.AddExpression(Expr->expr_op_begin(), Expr->expr_op_end());
} else
ValidReg = DwarfExpr.AddMachineRegExpression(Expr, Location.getReg());
@@ -816,10 +808,10 @@ void DwarfCompileUnit::addExpr(DIELoc &Die, dwarf::Form Form,
void DwarfCompileUnit::applySubprogramAttributesToDefinition(DISubprogram SP,
DIE &SPDie) {
- DISubprogram SPDecl = SP.getFunctionDeclaration();
- DIScope Context = resolve(SPDecl ? SPDecl.getContext() : SP.getContext());
+ auto *SPDecl = SP->getDeclaration();
+ DIScope Context = resolve(SPDecl ? SPDecl->getScope() : SP->getScope());
applySubprogramAttributes(SP, SPDie, includeMinimalInlineScopes());
- addGlobalName(SP.getName(), SPDie, Context);
+ addGlobalName(SP->getName(), SPDie, Context);
}
bool DwarfCompileUnit::isDwoUnit() const {
@@ -827,7 +819,7 @@ bool DwarfCompileUnit::isDwoUnit() const {
}
bool DwarfCompileUnit::includeMinimalInlineScopes() const {
- return getCUNode().getEmissionKind() == DIBuilder::LineTablesOnly ||
+ return getCUNode()->getEmissionKind() == DIBuilder::LineTablesOnly ||
(DD->useSplitDwarf() && !Skeleton);
}
} // end llvm namespace
diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index e9ebd97..fb8fc6e 100644
--- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -129,20 +129,22 @@ bool DebugLocDwarfExpression::isFrameRegister(unsigned MachineReg) {
/// resolve - Look in the DwarfDebug map for the MDNode that
/// corresponds to the reference.
-template <typename T> T DbgVariable::resolve(DIRef<T> Ref) const {
+template <typename T> T *DbgVariable::resolve(TypedDebugNodeRef<T> Ref) const {
return DD->resolve(Ref);
}
bool DbgVariable::isBlockByrefVariable() const {
- assert(Var.isVariable() && "Invalid complex DbgVariable!");
- return Var.isBlockByrefVariable(DD->getTypeIdentifierMap());
+ assert(Var && "Invalid complex DbgVariable!");
+ return Var->getType()
+ .resolve(DD->getTypeIdentifierMap())
+ ->isBlockByrefStruct();
}
DIType DbgVariable::getType() const {
- DIType Ty = Var.getType().resolve(DD->getTypeIdentifierMap());
+ MDType *Ty = Var->getType().resolve(DD->getTypeIdentifierMap());
// FIXME: isBlockByrefVariable should be reformulated in terms of complex
// addresses instead.
- if (Var.isBlockByrefVariable(DD->getTypeIdentifierMap())) {
+ if (Ty->isBlockByrefStruct()) {
/* Byref variables, in Blocks, are declared by the programmer as
"SomeType VarName;", but the compiler creates a
__Block_byref_x_VarName struct, and gives the variable VarName
@@ -167,17 +169,17 @@ DIType DbgVariable::getType() const {
have a DW_AT_location that tells the debugger how to unwind through
the pointers and __Block_byref_x_VarName struct to find the actual
value of the variable. The function addBlockByrefType does this. */
- DIType subType = Ty;
- uint16_t tag = Ty.getTag();
+ MDType *subType = Ty;
+ uint16_t tag = Ty->getTag();
if (tag == dwarf::DW_TAG_pointer_type)
- subType = resolve(DIDerivedType(Ty).getTypeDerivedFrom());
+ subType = resolve(DITypeRef(cast<MDDerivedType>(Ty)->getBaseType()));
- DIArray Elements = DICompositeType(subType).getElements();
- for (unsigned i = 0, N = Elements.getNumElements(); i < N; ++i) {
- DIDerivedType DT(Elements.getElement(i));
- if (getName() == DT.getName())
- return (resolve(DT.getTypeDerivedFrom()));
+ auto Elements = cast<MDCompositeTypeBase>(subType)->getElements();
+ for (unsigned i = 0, N = Elements.size(); i < N; ++i) {
+ auto *DT = cast<MDDerivedTypeBase>(Elements[i]);
+ if (getName() == DT->getName())
+ return resolve(DITypeRef(DT->getBaseType()));
}
}
return Ty;
@@ -275,25 +277,25 @@ static StringRef getObjCMethodName(StringRef In) {
// that do not have a DW_AT_name or DW_AT_linkage_name field - this
// is only slightly different than the lookup of non-standard ObjC names.
void DwarfDebug::addSubprogramNames(DISubprogram SP, DIE &Die) {
- if (!SP.isDefinition())
+ if (!SP->isDefinition())
return;
- addAccelName(SP.getName(), Die);
+ addAccelName(SP->getName(), Die);
// If the linkage name is different than the name, go ahead and output
// that as well into the name table.
- if (SP.getLinkageName() != "" && SP.getName() != SP.getLinkageName())
- addAccelName(SP.getLinkageName(), Die);
+ if (SP->getLinkageName() != "" && SP->getName() != SP->getLinkageName())
+ addAccelName(SP->getLinkageName(), Die);
// If this is an Objective-C selector name add it to the ObjC accelerator
// too.
- if (isObjCClass(SP.getName())) {
+ if (isObjCClass(SP->getName())) {
StringRef Class, Category;
- getObjCClassCategory(SP.getName(), Class, Category);
+ getObjCClassCategory(SP->getName(), Class, Category);
addAccelObjC(Class, Die);
if (Category != "")
addAccelObjC(Category, Die);
// Also add the base method name to the name table.
- addAccelName(getObjCMethodName(SP.getName()), Die);
+ addAccelName(getObjCMethodName(SP->getName()), Die);
}
}
@@ -302,11 +304,10 @@ void DwarfDebug::addSubprogramNames(DISubprogram SP, DIE &Die) {
bool DwarfDebug::isSubprogramContext(const MDNode *Context) {
if (!Context)
return false;
- DIDescriptor D(Context);
- if (D.isSubprogram())
+ if (isa<MDSubprogram>(Context))
return true;
- if (D.isType())
- return isSubprogramContext(resolve(DIType(Context).getContext()));
+ if (auto *T = dyn_cast<MDType>(Context))
+ return isSubprogramContext(resolve(T->getScope()));
return false;
}
@@ -362,8 +363,8 @@ void DwarfDebug::addGnuPubAttributes(DwarfUnit &U, DIE &D) const {
// Create new DwarfCompileUnit for the given metadata node with tag
// DW_TAG_compile_unit.
DwarfCompileUnit &DwarfDebug::constructDwarfCompileUnit(DICompileUnit DIUnit) {
- StringRef FN = DIUnit.getFilename();
- CompilationDir = DIUnit.getDirectory();
+ StringRef FN = DIUnit->getFilename();
+ CompilationDir = DIUnit->getDirectory();
auto OwnedUnit = make_unique<DwarfCompileUnit>(
InfoHolder.getUnits().size(), DIUnit, Asm, this, &InfoHolder);
@@ -381,9 +382,9 @@ DwarfCompileUnit &DwarfDebug::constructDwarfCompileUnit(DICompileUnit DIUnit) {
Asm->OutStreamer.getContext().setMCLineTableCompilationDir(
NewCU.getUniqueID(), CompilationDir);
- NewCU.addString(Die, dwarf::DW_AT_producer, DIUnit.getProducer());
+ NewCU.addString(Die, dwarf::DW_AT_producer, DIUnit->getProducer());
NewCU.addUInt(Die, dwarf::DW_AT_language, dwarf::DW_FORM_data2,
- DIUnit.getLanguage());
+ DIUnit->getSourceLanguage());
NewCU.addString(Die, dwarf::DW_AT_name, FN);
if (!useSplitDwarf()) {
@@ -397,14 +398,14 @@ DwarfCompileUnit &DwarfDebug::constructDwarfCompileUnit(DICompileUnit DIUnit) {
addGnuPubAttributes(NewCU, Die);
}
- if (DIUnit.isOptimized())
+ if (DIUnit->isOptimized())
NewCU.addFlag(Die, dwarf::DW_AT_APPLE_optimized);
- StringRef Flags = DIUnit.getFlags();
+ StringRef Flags = DIUnit->getFlags();
if (!Flags.empty())
NewCU.addString(Die, dwarf::DW_AT_APPLE_flags, Flags);
- if (unsigned RVer = DIUnit.getRunTimeVersion())
+ if (unsigned RVer = DIUnit->getRuntimeVersion())
NewCU.addUInt(Die, dwarf::DW_AT_APPLE_major_runtime_vers,
dwarf::DW_FORM_data1, RVer);
@@ -420,9 +421,8 @@ DwarfCompileUnit &DwarfDebug::constructDwarfCompileUnit(DICompileUnit DIUnit) {
void DwarfDebug::constructAndAddImportedEntityDIE(DwarfCompileUnit &TheCU,
const MDNode *N) {
- DIImportedEntity Module(N);
- assert(Module.Verify());
- if (DIE *D = TheCU.getOrCreateContextDIE(Module.getContext()))
+ DIImportedEntity Module = cast<MDImportedEntity>(N);
+ if (DIE *D = TheCU.getOrCreateContextDIE(Module->getScope()))
D->addChild(TheCU.constructImportedEntityDIE(Module));
}
@@ -445,44 +445,35 @@ void DwarfDebug::beginModule() {
SingleCU = CU_Nodes->getNumOperands() == 1;
for (MDNode *N : CU_Nodes->operands()) {
- DICompileUnit CUNode(N);
+ DICompileUnit CUNode = cast<MDCompileUnit>(N);
DwarfCompileUnit &CU = constructDwarfCompileUnit(CUNode);
- DIArray ImportedEntities = CUNode.getImportedEntities();
- for (unsigned i = 0, e = ImportedEntities.getNumElements(); i != e; ++i)
- ScopesWithImportedEntities.push_back(std::make_pair(
- DIImportedEntity(ImportedEntities.getElement(i)).getContext(),
- ImportedEntities.getElement(i)));
+ for (auto *IE : CUNode->getImportedEntities())
+ ScopesWithImportedEntities.push_back(std::make_pair(IE->getScope(), IE));
// Stable sort to preserve the order of appearance of imported entities.
// This is to avoid out-of-order processing of interdependent declarations
// within the same scope, e.g. { namespace A = base; namespace B = A; }
std::stable_sort(ScopesWithImportedEntities.begin(),
ScopesWithImportedEntities.end(), less_first());
- DIArray GVs = CUNode.getGlobalVariables();
- for (unsigned i = 0, e = GVs.getNumElements(); i != e; ++i)
- CU.getOrCreateGlobalVariableDIE(DIGlobalVariable(GVs.getElement(i)));
- DIArray SPs = CUNode.getSubprograms();
- for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i)
- SPMap.insert(std::make_pair(SPs.getElement(i), &CU));
- DIArray EnumTypes = CUNode.getEnumTypes();
- for (unsigned i = 0, e = EnumTypes.getNumElements(); i != e; ++i) {
- DIType Ty(EnumTypes.getElement(i));
+ for (auto *GV : CUNode->getGlobalVariables())
+ CU.getOrCreateGlobalVariableDIE(GV);
+ for (auto *SP : CUNode->getSubprograms())
+ SPMap.insert(std::make_pair(SP, &CU));
+ for (DIType Ty : CUNode->getEnumTypes()) {
// The enum types array by design contains pointers to
// MDNodes rather than DIRefs. Unique them here.
- DIType UniqueTy(resolve(Ty.getRef()));
+ DIType UniqueTy = cast<MDType>(resolve(Ty->getRef()));
CU.getOrCreateTypeDIE(UniqueTy);
}
- DIArray RetainedTypes = CUNode.getRetainedTypes();
- for (unsigned i = 0, e = RetainedTypes.getNumElements(); i != e; ++i) {
- DIType Ty(RetainedTypes.getElement(i));
+ for (DIType Ty : CUNode->getRetainedTypes()) {
// The retained types array by design contains pointers to
// MDNodes rather than DIRefs. Unique them here.
- DIType UniqueTy(resolve(Ty.getRef()));
+ DIType UniqueTy = cast<MDType>(resolve(Ty->getRef()));
CU.getOrCreateTypeDIE(UniqueTy);
}
// Emit imported_modules last so that the relevant context is already
// available.
- for (unsigned i = 0, e = ImportedEntities.getNumElements(); i != e; ++i)
- constructAndAddImportedEntityDIE(CU, ImportedEntities.getElement(i));
+ for (auto *IE : CUNode->getImportedEntities())
+ constructAndAddImportedEntityDIE(CU, IE);
}
// Tell MMI that we have debug info.
@@ -498,7 +489,8 @@ void DwarfDebug::finishVariableDefinitions() {
// DIE::getUnit isn't simple - it walks parent pointers, etc.
DwarfCompileUnit *Unit = lookupUnit(VariableDie->getUnit());
assert(Unit);
- DbgVariable *AbsVar = getExistingAbstractVariable(Var->getVariable());
+ DbgVariable *AbsVar = getExistingAbstractVariable(
+ InlinedVariable(Var->getVariable(), Var->getInlinedAt()));
if (AbsVar && AbsVar->getDIE()) {
Unit->addDIEEntry(*VariableDie, dwarf::DW_AT_abstract_origin,
*AbsVar->getDIE());
@@ -510,7 +502,7 @@ void DwarfDebug::finishVariableDefinitions() {
void DwarfDebug::finishSubprogramDefinitions() {
for (const auto &P : SPMap)
forBothCUs(*P.second, [&](DwarfCompileUnit &CU) {
- CU.finishSubprogramDefinition(DISubprogram(P.first));
+ CU.finishSubprogramDefinition(cast<MDSubprogram>(P.first));
});
}
@@ -521,14 +513,12 @@ void DwarfDebug::collectDeadVariables() {
if (NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu")) {
for (MDNode *N : CU_Nodes->operands()) {
- DICompileUnit TheCU(N);
+ DICompileUnit TheCU = cast<MDCompileUnit>(N);
// Construct subprogram DIE and add variables DIEs.
DwarfCompileUnit *SPCU =
static_cast<DwarfCompileUnit *>(CUMap.lookup(TheCU));
assert(SPCU && "Unable to find Compile Unit!");
- DIArray Subprograms = TheCU.getSubprograms();
- for (unsigned i = 0, e = Subprograms.getNumElements(); i != e; ++i) {
- DISubprogram SP(Subprograms.getElement(i));
+ for (auto *SP : TheCU->getSubprograms()) {
if (ProcessedSPNodes.count(SP) != 0)
continue;
SPCU->collectDeadVariables(SP);
@@ -671,70 +661,71 @@ void DwarfDebug::endModule() {
}
// Find abstract variable, if any, associated with Var.
-DbgVariable *DwarfDebug::getExistingAbstractVariable(const DIVariable &DV,
+DbgVariable *DwarfDebug::getExistingAbstractVariable(InlinedVariable IV,
DIVariable &Cleansed) {
- LLVMContext &Ctx = DV->getContext();
// More then one inlined variable corresponds to one abstract variable.
- // FIXME: This duplication of variables when inlining should probably be
- // removed. It's done to allow each DIVariable to describe its location
- // because the DebugLoc on the dbg.value/declare isn't accurate. We should
- // make it accurate then remove this duplication/cleansing stuff.
- Cleansed = cleanseInlinedVariable(DV, Ctx);
+ Cleansed = IV.first;
auto I = AbstractVariables.find(Cleansed);
if (I != AbstractVariables.end())
return I->second.get();
return nullptr;
}
-DbgVariable *DwarfDebug::getExistingAbstractVariable(const DIVariable &DV) {
+DbgVariable *DwarfDebug::getExistingAbstractVariable(InlinedVariable IV) {
DIVariable Cleansed;
- return getExistingAbstractVariable(DV, Cleansed);
+ return getExistingAbstractVariable(IV, Cleansed);
}
void DwarfDebug::createAbstractVariable(const DIVariable &Var,
LexicalScope *Scope) {
- auto AbsDbgVariable = make_unique<DbgVariable>(Var, DIExpression(), this);
+ auto AbsDbgVariable =
+ make_unique<DbgVariable>(Var, nullptr, DIExpression(), this);
InfoHolder.addScopeVariable(Scope, AbsDbgVariable.get());
AbstractVariables[Var] = std::move(AbsDbgVariable);
}
-void DwarfDebug::ensureAbstractVariableIsCreated(const DIVariable &DV,
+void DwarfDebug::ensureAbstractVariableIsCreated(InlinedVariable IV,
const MDNode *ScopeNode) {
- DIVariable Cleansed = DV;
- if (getExistingAbstractVariable(DV, Cleansed))
+ DIVariable Cleansed;
+ if (getExistingAbstractVariable(IV, Cleansed))
return;
- createAbstractVariable(Cleansed, LScopes.getOrCreateAbstractScope(ScopeNode));
+ createAbstractVariable(Cleansed, LScopes.getOrCreateAbstractScope(
+ cast<MDLocalScope>(ScopeNode)));
}
-void
-DwarfDebug::ensureAbstractVariableIsCreatedIfScoped(const DIVariable &DV,
- const MDNode *ScopeNode) {
- DIVariable Cleansed = DV;
- if (getExistingAbstractVariable(DV, Cleansed))
+void DwarfDebug::ensureAbstractVariableIsCreatedIfScoped(
+ InlinedVariable IV, const MDNode *ScopeNode) {
+ DIVariable Cleansed;
+ if (getExistingAbstractVariable(IV, Cleansed))
return;
- if (LexicalScope *Scope = LScopes.findAbstractScope(ScopeNode))
+ if (LexicalScope *Scope =
+ LScopes.findAbstractScope(cast_or_null<MDLocalScope>(ScopeNode)))
createAbstractVariable(Cleansed, Scope);
}
// Collect variable information from side table maintained by MMI.
void DwarfDebug::collectVariableInfoFromMMITable(
- SmallPtrSetImpl<const MDNode *> &Processed) {
+ DenseSet<InlinedVariable> &Processed) {
for (const auto &VI : MMI->getVariableDbgInfo()) {
if (!VI.Var)
continue;
- Processed.insert(VI.Var);
+ assert(VI.Var->isValidLocationForIntrinsic(VI.Loc) &&
+ "Expected inlined-at fields to agree");
+
+ InlinedVariable Var(VI.Var, VI.Loc->getInlinedAt());
+ Processed.insert(Var);
LexicalScope *Scope = LScopes.findLexicalScope(VI.Loc);
// If variable scope is not found then skip this variable.
if (!Scope)
continue;
- DIVariable DV(VI.Var);
- DIExpression Expr(VI.Expr);
- ensureAbstractVariableIsCreatedIfScoped(DV, Scope->getScopeNode());
- auto RegVar = make_unique<DbgVariable>(DV, Expr, this, VI.Slot);
+ DIExpression Expr = cast_or_null<MDExpression>(VI.Expr);
+ ensureAbstractVariableIsCreatedIfScoped(Var, Scope->getScopeNode());
+ auto RegVar =
+ make_unique<DbgVariable>(Var.first, Var.second, Expr, this, VI.Slot);
if (InfoHolder.addScopeVariable(Scope, RegVar.get()))
ConcreteVariables.push_back(std::move(RegVar));
}
@@ -768,12 +759,12 @@ static DebugLocEntry::Value getDebugLocValue(const MachineInstr *MI) {
/// Determine whether two variable pieces overlap.
static bool piecesOverlap(DIExpression P1, DIExpression P2) {
- if (!P1.isBitPiece() || !P2.isBitPiece())
+ if (!P1->isBitPiece() || !P2->isBitPiece())
return true;
- unsigned l1 = P1.getBitPieceOffset();
- unsigned l2 = P2.getBitPieceOffset();
- unsigned r1 = l1 + P1.getBitPieceSize();
- unsigned r2 = l2 + P2.getBitPieceSize();
+ unsigned l1 = P1->getBitPieceOffset();
+ unsigned l2 = P2->getBitPieceOffset();
+ unsigned r1 = l1 + P1->getBitPieceSize();
+ unsigned r2 = l2 + P2->getBitPieceSize();
// True where [l1,r1[ and [r1,r2[ overlap.
return (l1 < r2) && (l2 < r1);
}
@@ -845,7 +836,7 @@ DwarfDebug::buildLocationList(SmallVectorImpl<DebugLocEntry> &DebugLoc,
bool couldMerge = false;
// If this is a piece, it may belong to the current DebugLocEntry.
- if (DIExpr.isBitPiece()) {
+ if (DIExpr->isBitPiece()) {
// Add this value to the list of open ranges.
OpenRanges.push_back(Value);
@@ -884,35 +875,34 @@ DwarfDebug::buildLocationList(SmallVectorImpl<DebugLocEntry> &DebugLoc,
// Find variables for each lexical scope.
-void
-DwarfDebug::collectVariableInfo(DwarfCompileUnit &TheCU, DISubprogram SP,
- SmallPtrSetImpl<const MDNode *> &Processed) {
+void DwarfDebug::collectVariableInfo(DwarfCompileUnit &TheCU, DISubprogram SP,
+ DenseSet<InlinedVariable> &Processed) {
// Grab the variable info that was squirreled away in the MMI side-table.
collectVariableInfoFromMMITable(Processed);
for (const auto &I : DbgValues) {
- DIVariable DV(I.first);
- if (Processed.count(DV))
+ InlinedVariable IV = I.first;
+ if (Processed.count(IV))
continue;
- // Instruction ranges, specifying where DV is accessible.
+ // Instruction ranges, specifying where IV is accessible.
const auto &Ranges = I.second;
if (Ranges.empty())
continue;
LexicalScope *Scope = nullptr;
- if (MDNode *IA = DV.getInlinedAt())
- Scope = LScopes.findInlinedScope(DV.getContext(), IA);
+ if (const MDLocation *IA = IV.second)
+ Scope = LScopes.findInlinedScope(IV.first->getScope(), IA);
else
- Scope = LScopes.findLexicalScope(DV.getContext());
+ Scope = LScopes.findLexicalScope(IV.first->getScope());
// If variable scope is not found then skip this variable.
if (!Scope)
continue;
- Processed.insert(DV);
+ Processed.insert(IV);
const MachineInstr *MInsn = Ranges.front().first;
assert(MInsn->isDebugValue() && "History must begin with debug value");
- ensureAbstractVariableIsCreatedIfScoped(DV, Scope->getScopeNode());
+ ensureAbstractVariableIsCreatedIfScoped(IV, Scope->getScopeNode());
ConcreteVariables.push_back(make_unique<DbgVariable>(MInsn, this));
DbgVariable *RegVar = ConcreteVariables.back().get();
InfoHolder.addScopeVariable(Scope, RegVar);
@@ -937,16 +927,15 @@ DwarfDebug::collectVariableInfo(DwarfCompileUnit &TheCU, DISubprogram SP,
}
// Collect info for variables that were optimized out.
- DIArray Variables = SP.getVariables();
- for (unsigned i = 0, e = Variables.getNumElements(); i != e; ++i) {
- DIVariable DV(Variables.getElement(i));
- assert(DV.isVariable());
- if (!Processed.insert(DV).second)
+ for (DIVariable DV : SP->getVariables()) {
+ if (!Processed.insert(InlinedVariable(DV, nullptr)).second)
continue;
- if (LexicalScope *Scope = LScopes.findLexicalScope(DV.getContext())) {
- ensureAbstractVariableIsCreatedIfScoped(DV, Scope->getScopeNode());
+ if (LexicalScope *Scope = LScopes.findLexicalScope(DV->getScope())) {
+ ensureAbstractVariableIsCreatedIfScoped(InlinedVariable(DV, nullptr),
+ Scope->getScopeNode());
DIExpression NoExpr;
- ConcreteVariables.push_back(make_unique<DbgVariable>(DV, NoExpr, this));
+ ConcreteVariables.push_back(
+ make_unique<DbgVariable>(DV, nullptr, NoExpr, this));
InfoHolder.addScopeVariable(Scope, ConcreteVariables.back().get());
}
}
@@ -972,7 +961,7 @@ void DwarfDebug::beginInstruction(const MachineInstr *MI) {
if (!MI->isDebugValue()) {
DebugLoc DL = MI->getDebugLoc();
if (DL != PrevInstLoc) {
- if (!DL.isUnknown()) {
+ if (DL) {
unsigned Flags = 0;
PrevInstLoc = DL;
if (DL == PrologEndLoc) {
@@ -984,7 +973,7 @@ void DwarfDebug::beginInstruction(const MachineInstr *MI) {
Asm->OutStreamer.getContext().getCurrentDwarfLoc().getLine())
Flags |= DWARF2_FLAG_IS_STMT;
- const MDNode *Scope = DL.getScope(Asm->MF->getFunction()->getContext());
+ const MDNode *Scope = DL.getScope();
recordSourceLine(DL.getLine(), DL.getCol(), Scope, Flags);
} else if (UnknownLocations) {
PrevInstLoc = DL;
@@ -1072,7 +1061,7 @@ static DebugLoc findPrologueEndLoc(const MachineFunction *MF) {
for (const auto &MBB : *MF)
for (const auto &MI : MBB)
if (!MI.isDebugValue() && !MI.getFlag(MachineInstr::FrameSetup) &&
- !MI.getDebugLoc().isUnknown()) {
+ MI.getDebugLoc()) {
// Did the target forget to set the FrameSetup flag for CFI insns?
assert(!MI.isCFIInstruction() &&
"First non-frame-setup instruction is a CFI instruction.");
@@ -1137,11 +1126,11 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
// The first mention of a function argument gets the CurrentFnBegin
// label, so arguments are visible when breaking at function entry.
- DIVariable DIVar(Ranges.front().first->getDebugVariable());
- if (DIVar.isVariable() && DIVar.getTag() == dwarf::DW_TAG_arg_variable &&
- getDISubprogram(DIVar.getContext()).describes(MF->getFunction())) {
+ DIVariable DIVar = Ranges.front().first->getDebugVariable();
+ if (DIVar->getTag() == dwarf::DW_TAG_arg_variable &&
+ getDISubprogram(DIVar->getScope())->describes(MF->getFunction())) {
LabelsBeforeInsn[Ranges.front().first] = Asm->getFunctionBegin();
- if (Ranges.front().first->getDebugExpression().isBitPiece()) {
+ if (Ranges.front().first->getDebugExpression()->isBitPiece()) {
// Mark all non-overlapping initial pieces.
for (auto I = Ranges.begin(); I != Ranges.end(); ++I) {
DIExpression Piece = I->first->getDebugExpression();
@@ -1168,15 +1157,11 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
// Record beginning of function.
PrologEndLoc = findPrologueEndLoc(MF);
- if (!PrologEndLoc.isUnknown()) {
- DebugLoc FnStartDL =
- PrologEndLoc.getFnDebugLoc(MF->getFunction()->getContext());
-
+ if (MDLocation *L = PrologEndLoc) {
// We'd like to list the prologue as "not statements" but GDB behaves
// poorly if we do that. Revisit this with caution/GDB (7.5+) testing.
- recordSourceLine(FnStartDL.getLine(), FnStartDL.getCol(),
- FnStartDL.getScope(MF->getFunction()->getContext()),
- DWARF2_FLAG_IS_STMT);
+ auto *SP = L->getInlinedAtScope()->getSubprogram();
+ recordSourceLine(SP->getScopeLine(), 0, SP, DWARF2_FLAG_IS_STMT);
}
}
@@ -1199,10 +1184,10 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
Asm->OutStreamer.getContext().setDwarfCompileUnitID(0);
LexicalScope *FnScope = LScopes.getCurrentFunctionScope();
- DISubprogram SP(FnScope->getScopeNode());
+ DISubprogram SP = cast<MDSubprogram>(FnScope->getScopeNode());
DwarfCompileUnit &TheCU = *SPMap.lookup(SP);
- SmallPtrSet<const MDNode *, 16> ProcessedVars;
+ DenseSet<InlinedVariable> ProcessedVars;
collectVariableInfo(TheCU, SP, ProcessedVars);
// Add the range of this function to the list of ranges for the CU.
@@ -1210,7 +1195,7 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
// Under -gmlt, skip building the subprogram if there are no inlined
// subroutines inside it.
- if (TheCU.getCUNode().getEmissionKind() == DIBuilder::LineTablesOnly &&
+ if (TheCU.getCUNode()->getEmissionKind() == DIBuilder::LineTablesOnly &&
LScopes.getAbstractScopesList().empty() && !IsDarwin) {
assert(InfoHolder.getScopeVariables().empty());
assert(DbgValues.empty());
@@ -1229,16 +1214,13 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
#endif
// Construct abstract scopes.
for (LexicalScope *AScope : LScopes.getAbstractScopesList()) {
- DISubprogram SP(AScope->getScopeNode());
- assert(SP.isSubprogram());
+ DISubprogram SP = cast<MDSubprogram>(AScope->getScopeNode());
// Collect info for variables that were optimized out.
- DIArray Variables = SP.getVariables();
- for (unsigned i = 0, e = Variables.getNumElements(); i != e; ++i) {
- DIVariable DV(Variables.getElement(i));
- assert(DV && DV.isVariable());
- if (!ProcessedVars.insert(DV).second)
+ for (DIVariable DV : SP->getVariables()) {
+ if (!ProcessedVars.insert(InlinedVariable(DV, nullptr)).second)
continue;
- ensureAbstractVariableIsCreated(DV, DV.getContext());
+ ensureAbstractVariableIsCreated(InlinedVariable(DV, nullptr),
+ DV->getScope());
assert(LScopes.getAbstractScopesList().size() == NumAbstractScopes
&& "ensureAbstractVariableIsCreated inserted abstract scopes");
}
@@ -1270,12 +1252,11 @@ void DwarfDebug::recordSourceLine(unsigned Line, unsigned Col, const MDNode *S,
StringRef Dir;
unsigned Src = 1;
unsigned Discriminator = 0;
- if (DIScope Scope = DIScope(S)) {
- assert(Scope.isScope());
- Fn = Scope.getFilename();
- Dir = Scope.getDirectory();
- if (Scope.isLexicalBlockFile())
- Discriminator = DILexicalBlockFile(S).getDiscriminator();
+ if (auto *Scope = cast_or_null<MDScope>(S)) {
+ Fn = Scope->getFilename();
+ Dir = Scope->getDirectory();
+ if (auto *LBF = dyn_cast<MDLexicalBlockFile>(Scope))
+ Discriminator = LBF->getDiscriminator();
unsigned CUID = Asm->OutStreamer.getContext().getDwarfCompileUnitID();
Src = static_cast<DwarfCompileUnit &>(*InfoHolder.getUnits()[CUID])
@@ -1499,23 +1480,25 @@ static void emitDebugLocValue(const AsmPrinter &AP,
Streamer);
// Regular entry.
if (Value.isInt()) {
- DIBasicType BTy(DV.getType().resolve(TypeIdentifierMap));
- if (BTy.Verify() && (BTy.getEncoding() == dwarf::DW_ATE_signed ||
- BTy.getEncoding() == dwarf::DW_ATE_signed_char))
+ MDType *T = DV->getType().resolve(TypeIdentifierMap);
+ auto *B = dyn_cast<MDBasicType>(T);
+ if (B && (B->getEncoding() == dwarf::DW_ATE_signed ||
+ B->getEncoding() == dwarf::DW_ATE_signed_char))
DwarfExpr.AddSignedConstant(Value.getInt());
else
DwarfExpr.AddUnsignedConstant(Value.getInt());
} else if (Value.isLocation()) {
MachineLocation Loc = Value.getLoc();
DIExpression Expr = Value.getExpression();
- if (!Expr || (Expr.getNumElements() == 0))
+ if (!Expr || !Expr->getNumElements())
// Regular entry.
AP.EmitDwarfRegOp(Streamer, Loc);
else {
// Complex address entry.
if (Loc.getOffset()) {
DwarfExpr.AddMachineRegIndirect(Loc.getReg(), Loc.getOffset());
- DwarfExpr.AddExpression(Expr.begin(), Expr.end(), PieceOffsetInBits);
+ DwarfExpr.AddExpression(Expr->expr_op_begin(), Expr->expr_op_end(),
+ PieceOffsetInBits);
} else
DwarfExpr.AddMachineRegExpression(Expr, Loc.getReg(),
PieceOffsetInBits);
@@ -1542,8 +1525,8 @@ void DebugLocEntry::finalize(const AsmPrinter &AP,
unsigned Offset = 0;
for (auto Piece : Values) {
DIExpression Expr = Piece.getExpression();
- unsigned PieceOffset = Expr.getBitPieceOffset();
- unsigned PieceSize = Expr.getBitPieceSize();
+ unsigned PieceOffset = Expr->getBitPieceOffset();
+ unsigned PieceSize = Expr->getBitPieceSize();
assert(Offset <= PieceOffset && "overlapping or duplicate pieces");
if (Offset < PieceOffset) {
// The DWARF spec seriously mandates pieces with no locations for gaps.
@@ -1554,15 +1537,7 @@ void DebugLocEntry::finalize(const AsmPrinter &AP,
Offset += PieceOffset-Offset;
}
Offset += PieceSize;
-
-#ifndef NDEBUG
- DIVariable Var = Piece.getVariable();
- unsigned VarSize = Var.getSizeInBits(TypeIdentifierMap);
- assert(PieceSize+PieceOffset <= VarSize
- && "piece is larger than or outside of variable");
- assert(PieceSize != VarSize
- && "piece covers entire variable");
-#endif
+
emitDebugLocValue(AP, TypeIdentifierMap, Streamer, Piece, PieceOffset);
}
} else {
@@ -1850,7 +1825,7 @@ void DwarfDebug::emitDebugRanges() {
void DwarfDebug::initSkeletonUnit(const DwarfUnit &U, DIE &Die,
std::unique_ptr<DwarfUnit> NewU) {
NewU->addString(Die, dwarf::DW_AT_GNU_dwo_name,
- U.getCUNode().getSplitDebugFilename());
+ U.getCUNode()->getSplitDebugFilename());
if (!CompilationDir.empty())
NewU->addString(Die, dwarf::DW_AT_comp_dir, CompilationDir);
@@ -1914,7 +1889,7 @@ MCDwarfDwoLineTable *DwarfDebug::getDwoLineTable(const DwarfCompileUnit &CU) {
if (!useSplitDwarf())
return nullptr;
if (SingleCU)
- SplitTypeUnitFileTable.setCompilationDir(CU.getCUNode().getDirectory());
+ SplitTypeUnitFileTable.setCompilationDir(CU.getCUNode()->getDirectory());
return &SplitTypeUnitFileTable;
}
diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.h b/lib/CodeGen/AsmPrinter/DwarfDebug.h
index 74db3ef..e067f41 100644
--- a/lib/CodeGen/AsmPrinter/DwarfDebug.h
+++ b/lib/CodeGen/AsmPrinter/DwarfDebug.h
@@ -21,6 +21,7 @@
#include "DwarfAccelTable.h"
#include "DwarfFile.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -74,7 +75,8 @@ public:
/// - Variables that are described by multiple MMI table entries have multiple
/// expressions and frame indices.
class DbgVariable {
- DIVariable Var; /// Variable Descriptor.
+ DIVariable Var; /// Variable Descriptor.
+ DILocation IA; /// Inlined at location.
SmallVector<DIExpression, 1> Expr; /// Complex address location expression.
DIE *TheDIE; /// Variable DIE.
unsigned DotDebugLocOffset; /// Offset in DotDebugLocEntries.
@@ -84,11 +86,11 @@ class DbgVariable {
public:
/// Construct a DbgVariable from a DIVariable.
- DbgVariable(DIVariable V, DIExpression E, DwarfDebug *DD, int FI = ~0)
- : Var(V), Expr(1, E), TheDIE(nullptr), DotDebugLocOffset(~0U),
- MInsn(nullptr), DD(DD) {
+ DbgVariable(DIVariable V, DILocation IA, DIExpression E, DwarfDebug *DD,
+ int FI = ~0)
+ : Var(V), IA(IA), Expr(1, E), TheDIE(nullptr), DotDebugLocOffset(~0U),
+ MInsn(nullptr), DD(DD) {
FrameIndex.push_back(FI);
- assert(Var.Verify());
assert(!E || E->isValid());
}
@@ -96,6 +98,7 @@ public:
/// AbstractVar may be NULL.
DbgVariable(const MachineInstr *DbgValue, DwarfDebug *DD)
: Var(DbgValue->getDebugVariable()),
+ IA(DbgValue->getDebugLoc()->getInlinedAt()),
Expr(1, DbgValue->getDebugExpression()), TheDIE(nullptr),
DotDebugLocOffset(~0U), MInsn(DbgValue), DD(DD) {
FrameIndex.push_back(~0);
@@ -103,12 +106,13 @@ public:
// Accessors.
DIVariable getVariable() const { return Var; }
+ DILocation getInlinedAt() const { return IA; }
const ArrayRef<DIExpression> getExpression() const { return Expr; }
void setDIE(DIE &D) { TheDIE = &D; }
DIE *getDIE() const { return TheDIE; }
void setDotDebugLocOffset(unsigned O) { DotDebugLocOffset = O; }
unsigned getDotDebugLocOffset() const { return DotDebugLocOffset; }
- StringRef getName() const { return Var.getName(); }
+ StringRef getName() const { return Var->getName(); }
const MachineInstr *getMInsn() const { return MInsn; }
const ArrayRef<int> getFrameIndex() const { return FrameIndex; }
@@ -116,6 +120,7 @@ public:
assert( DotDebugLocOffset == ~0U && !MInsn && "not an MMI entry");
assert(V.DotDebugLocOffset == ~0U && !V.MInsn && "not an MMI entry");
assert(V.Var == Var && "conflicting DIVariable");
+ assert(V.IA == IA && "conflicting inlined-at location");
if (V.getFrameIndex().back() != ~0) {
auto E = V.getExpression();
@@ -124,40 +129,40 @@ public:
FrameIndex.append(FI.begin(), FI.end());
}
assert(Expr.size() > 1
- ? std::all_of(Expr.begin(), Expr.end(),
- [](DIExpression &E) { return E.isBitPiece(); })
- : (true && "conflicting locations for variable"));
+ ? std::all_of(Expr.begin(), Expr.end(),
+ [](DIExpression &E) { return E->isBitPiece(); })
+ : (true && "conflicting locations for variable"));
}
// Translate tag to proper Dwarf tag.
dwarf::Tag getTag() const {
- if (Var.getTag() == dwarf::DW_TAG_arg_variable)
+ if (Var->getTag() == dwarf::DW_TAG_arg_variable)
return dwarf::DW_TAG_formal_parameter;
return dwarf::DW_TAG_variable;
}
/// \brief Return true if DbgVariable is artificial.
bool isArtificial() const {
- if (Var.isArtificial())
+ if (Var->isArtificial())
return true;
- if (getType().isArtificial())
+ if (getType()->isArtificial())
return true;
return false;
}
bool isObjectPointer() const {
- if (Var.isObjectPointer())
+ if (Var->isObjectPointer())
return true;
- if (getType().isObjectPointer())
+ if (getType()->isObjectPointer())
return true;
return false;
}
bool variableHasComplexAddress() const {
- assert(Var.isVariable() && "Invalid complex DbgVariable!");
+ assert(Var && "Invalid complex DbgVariable!");
assert(Expr.size() == 1 &&
"variableHasComplexAddress() invoked on multi-FI variable");
- return Expr.back().getNumElements() > 0;
+ return Expr.back()->getNumElements() > 0;
}
bool isBlockByrefVariable() const;
DIType getType() const;
@@ -165,7 +170,7 @@ public:
private:
/// resolve - Look in the DwarfDebug map for the MDNode that
/// corresponds to the reference.
- template <typename T> T resolve(DIRef<T> Ref) const;
+ template <typename T> T *resolve(TypedDebugNodeRef<T> Ref) const;
};
@@ -324,14 +329,16 @@ class DwarfDebug : public AsmPrinterHandler {
return InfoHolder.getUnits();
}
+ typedef DbgValueHistoryMap::InlinedVariable InlinedVariable;
+
/// \brief Find abstract variable associated with Var.
- DbgVariable *getExistingAbstractVariable(const DIVariable &DV,
+ DbgVariable *getExistingAbstractVariable(InlinedVariable IV,
DIVariable &Cleansed);
- DbgVariable *getExistingAbstractVariable(const DIVariable &DV);
+ DbgVariable *getExistingAbstractVariable(InlinedVariable IV);
void createAbstractVariable(const DIVariable &DV, LexicalScope *Scope);
- void ensureAbstractVariableIsCreated(const DIVariable &Var,
+ void ensureAbstractVariableIsCreated(InlinedVariable Var,
const MDNode *Scope);
- void ensureAbstractVariableIsCreatedIfScoped(const DIVariable &Var,
+ void ensureAbstractVariableIsCreatedIfScoped(InlinedVariable Var,
const MDNode *Scope);
/// \brief Construct a DIE for this abstract scope.
@@ -461,7 +468,7 @@ class DwarfDebug : public AsmPrinterHandler {
/// \brief Populate LexicalScope entries with variables' info.
void collectVariableInfo(DwarfCompileUnit &TheCU, DISubprogram SP,
- SmallPtrSetImpl<const MDNode *> &ProcessedVars);
+ DenseSet<InlinedVariable> &ProcessedVars);
/// \brief Build the location list for all DBG_VALUEs in the
/// function that describe the same variable.
@@ -470,7 +477,7 @@ class DwarfDebug : public AsmPrinterHandler {
/// \brief Collect variable information from the side table maintained
/// by MMI.
- void collectVariableInfoFromMMITable(SmallPtrSetImpl<const MDNode *> &P);
+ void collectVariableInfoFromMMITable(DenseSet<InlinedVariable> &P);
/// \brief Ensure that a label will be emitted before MI.
void requestLabelBeforeInsn(const MachineInstr *MI) {
@@ -567,7 +574,7 @@ public:
void emitDebugLocEntryLocation(const DebugLocEntry &Entry);
/// Find the MDNode for the given reference.
- template <typename T> T resolve(DIRef<T> Ref) const {
+ template <typename T> T *resolve(TypedDebugNodeRef<T> Ref) const {
return Ref.resolve(TypeIdentifierMap);
}
diff --git a/lib/CodeGen/AsmPrinter/DwarfException.h b/lib/CodeGen/AsmPrinter/DwarfException.h
index 6eaf707..a4fd36f 100644
--- a/lib/CodeGen/AsmPrinter/DwarfException.h
+++ b/lib/CodeGen/AsmPrinter/DwarfException.h
@@ -48,7 +48,7 @@ public:
// Main entry points.
//
DwarfCFIException(AsmPrinter *A);
- virtual ~DwarfCFIException();
+ ~DwarfCFIException() override;
/// Emit all exception information that should come after the content.
void endModule() override;
@@ -70,7 +70,7 @@ public:
// Main entry points.
//
ARMException(AsmPrinter *A);
- virtual ~ARMException();
+ ~ARMException() override;
/// Emit all exception information that should come after the content.
void endModule() override;
diff --git a/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
index 489e455..e576c93 100644
--- a/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
@@ -195,27 +195,27 @@ static unsigned getOffsetOrZero(unsigned OffsetInBits,
bool DwarfExpression::AddMachineRegExpression(DIExpression Expr,
unsigned MachineReg,
unsigned PieceOffsetInBits) {
- auto I = Expr.begin();
- auto E = Expr.end();
+ auto I = Expr->expr_op_begin();
+ auto E = Expr->expr_op_end();
if (I == E)
return AddMachineRegPiece(MachineReg);
// Pattern-match combinations for which more efficient representations exist
// first.
bool ValidReg = false;
- switch (*I) {
+ switch (I->getOp()) {
case dwarf::DW_OP_bit_piece: {
- unsigned OffsetInBits = I->getArg(1);
- unsigned SizeInBits = I->getArg(2);
+ unsigned OffsetInBits = I->getArg(0);
+ unsigned SizeInBits = I->getArg(1);
// Piece always comes at the end of the expression.
return AddMachineRegPiece(MachineReg, SizeInBits,
getOffsetOrZero(OffsetInBits, PieceOffsetInBits));
}
case dwarf::DW_OP_plus: {
// [DW_OP_reg,Offset,DW_OP_plus,DW_OP_deref] --> [DW_OP_breg,Offset].
- auto N = I->getNext();
- if ((N != E) && (*N == dwarf::DW_OP_deref)) {
- unsigned Offset = I->getArg(1);
+ auto N = I.getNext();
+ if (N != E && N->getOp() == dwarf::DW_OP_deref) {
+ unsigned Offset = I->getArg(0);
ValidReg = AddMachineRegIndirect(MachineReg, Offset);
std::advance(I, 2);
break;
@@ -240,20 +240,20 @@ bool DwarfExpression::AddMachineRegExpression(DIExpression Expr,
return true;
}
-void DwarfExpression::AddExpression(DIExpression::iterator I,
- DIExpression::iterator E,
+void DwarfExpression::AddExpression(MDExpression::expr_op_iterator I,
+ MDExpression::expr_op_iterator E,
unsigned PieceOffsetInBits) {
for (; I != E; ++I) {
- switch (*I) {
+ switch (I->getOp()) {
case dwarf::DW_OP_bit_piece: {
- unsigned OffsetInBits = I->getArg(1);
- unsigned SizeInBits = I->getArg(2);
+ unsigned OffsetInBits = I->getArg(0);
+ unsigned SizeInBits = I->getArg(1);
AddOpPiece(SizeInBits, getOffsetOrZero(OffsetInBits, PieceOffsetInBits));
break;
}
case dwarf::DW_OP_plus:
EmitOp(dwarf::DW_OP_plus_uconst);
- EmitUnsigned(I->getArg(1));
+ EmitUnsigned(I->getArg(0));
break;
case dwarf::DW_OP_deref:
EmitOp(dwarf::DW_OP_deref);
diff --git a/lib/CodeGen/AsmPrinter/DwarfExpression.h b/lib/CodeGen/AsmPrinter/DwarfExpression.h
index 985d52c..a8b65f5 100644
--- a/lib/CodeGen/AsmPrinter/DwarfExpression.h
+++ b/lib/CodeGen/AsmPrinter/DwarfExpression.h
@@ -97,7 +97,8 @@ public:
/// Emit a the operations remaining the DIExpressionIterator I.
/// \param PieceOffsetInBits If this is one piece out of a fragmented
/// location, this is the offset of the piece inside the entire variable.
- void AddExpression(DIExpression::iterator I, DIExpression::iterator E,
+ void AddExpression(MDExpression::expr_op_iterator I,
+ MDExpression::expr_op_iterator E,
unsigned PieceOffsetInBits = 0);
};
diff --git a/lib/CodeGen/AsmPrinter/DwarfFile.cpp b/lib/CodeGen/AsmPrinter/DwarfFile.cpp
index 60acc58e..32adb40 100644
--- a/lib/CodeGen/AsmPrinter/DwarfFile.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfFile.cpp
@@ -139,7 +139,7 @@ bool DwarfFile::addScopeVariable(LexicalScope *LS, DbgVariable *Var) {
SmallVectorImpl<DbgVariable *> &Vars = ScopeVariables[LS];
DIVariable DV = Var->getVariable();
// Variables with positive arg numbers are parameters.
- if (unsigned ArgNum = DV.getArgNumber()) {
+ if (unsigned ArgNum = DV->getArg()) {
// Keep all parameters in order at the start of the variable list to ensure
// function types are correct (no out-of-order parameters)
//
@@ -149,7 +149,7 @@ bool DwarfFile::addScopeVariable(LexicalScope *LS, DbgVariable *Var) {
// rather than linear search.
auto I = Vars.begin();
while (I != Vars.end()) {
- unsigned CurNum = (*I)->getVariable().getArgNumber();
+ unsigned CurNum = (*I)->getVariable()->getArg();
// A local (non-parameter) variable has been found, insert immediately
// before it.
if (CurNum == 0)
diff --git a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index f6af73f..9154652 100644
--- a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -175,8 +175,8 @@ static bool isShareableAcrossCUs(DIDescriptor D) {
// level already) but may be implementable for some value in projects
// building multiple independent libraries with LTO and then linking those
// together.
- return (D.isType() ||
- (D.isSubprogram() && !DISubprogram(D).isDefinition())) &&
+ return (isa<MDType>(D) ||
+ (isa<MDSubprogram>(D) && !cast<MDSubprogram>(D)->isDefinition())) &&
!GenerateDwarfTypeUnits;
}
@@ -397,52 +397,48 @@ void DwarfUnit::addSourceLine(DIE &Die, unsigned Line, StringRef File,
/// addSourceLine - Add location information to specified debug information
/// entry.
void DwarfUnit::addSourceLine(DIE &Die, DIVariable V) {
- assert(V.isVariable());
+ assert(V);
- addSourceLine(Die, V.getLineNumber(), V.getContext().getFilename(),
- V.getContext().getDirectory());
+ addSourceLine(Die, V->getLine(), V->getScope()->getFilename(),
+ V->getScope()->getDirectory());
}
/// addSourceLine - Add location information to specified debug information
/// entry.
void DwarfUnit::addSourceLine(DIE &Die, DIGlobalVariable G) {
- assert(G.isGlobalVariable());
+ assert(G);
- addSourceLine(Die, G.getLineNumber(), G.getFilename(), G.getDirectory());
+ addSourceLine(Die, G->getLine(), G->getFilename(), G->getDirectory());
}
/// addSourceLine - Add location information to specified debug information
/// entry.
void DwarfUnit::addSourceLine(DIE &Die, DISubprogram SP) {
- assert(SP.isSubprogram());
+ assert(SP);
- addSourceLine(Die, SP.getLineNumber(), SP.getFilename(), SP.getDirectory());
+ addSourceLine(Die, SP->getLine(), SP->getFilename(), SP->getDirectory());
}
/// addSourceLine - Add location information to specified debug information
/// entry.
void DwarfUnit::addSourceLine(DIE &Die, DIType Ty) {
- assert(Ty.isType());
+ assert(Ty);
- addSourceLine(Die, Ty.getLineNumber(), Ty.getFilename(), Ty.getDirectory());
+ addSourceLine(Die, Ty->getLine(), Ty->getFilename(), Ty->getDirectory());
}
/// addSourceLine - Add location information to specified debug information
/// entry.
void DwarfUnit::addSourceLine(DIE &Die, DIObjCProperty Ty) {
- assert(Ty.isObjCProperty());
+ assert(Ty);
- DIFile File = Ty.getFile();
- addSourceLine(Die, Ty.getLineNumber(), File.getFilename(),
- File.getDirectory());
+ addSourceLine(Die, Ty->getLine(), Ty->getFilename(), Ty->getDirectory());
}
/// addSourceLine - Add location information to specified debug information
/// entry.
void DwarfUnit::addSourceLine(DIE &Die, DINameSpace NS) {
- assert(NS.Verify());
-
- addSourceLine(Die, NS.getLineNumber(), NS.getFilename(), NS.getDirectory());
+ addSourceLine(Die, NS->getLine(), NS->getFilename(), NS->getDirectory());
}
/// addRegisterOp - Add register operand.
@@ -525,28 +521,26 @@ void DwarfUnit::addBlockByrefAddress(const DbgVariable &DV, DIE &Die,
const MachineLocation &Location) {
DIType Ty = DV.getType();
DIType TmpTy = Ty;
- uint16_t Tag = Ty.getTag();
+ uint16_t Tag = Ty->getTag();
bool isPointer = false;
StringRef varName = DV.getName();
if (Tag == dwarf::DW_TAG_pointer_type) {
- DIDerivedType DTy(Ty);
- TmpTy = resolve(DTy.getTypeDerivedFrom());
+ DIDerivedType DTy = cast<MDDerivedType>(Ty);
+ TmpTy = resolve(DTy->getBaseType());
isPointer = true;
}
- DICompositeType blockStruct(TmpTy);
-
// Find the __forwarding field and the variable field in the __Block_byref
// struct.
- DIArray Fields = blockStruct.getElements();
+ DIArray Fields = cast<MDCompositeTypeBase>(TmpTy)->getElements();
DIDerivedType varField;
DIDerivedType forwardingField;
- for (unsigned i = 0, N = Fields.getNumElements(); i < N; ++i) {
- DIDerivedType DT(Fields.getElement(i));
- StringRef fieldName = DT.getName();
+ for (unsigned i = 0, N = Fields.size(); i < N; ++i) {
+ DIDerivedType DT = cast<MDDerivedTypeBase>(Fields[i]);
+ StringRef fieldName = DT->getName();
if (fieldName == "__forwarding")
forwardingField = DT;
else if (fieldName == varName)
@@ -554,8 +548,8 @@ void DwarfUnit::addBlockByrefAddress(const DbgVariable &DV, DIE &Die,
}
// Get the offsets for the forwarding field and the variable field.
- unsigned forwardingFieldOffset = forwardingField.getOffsetInBits() >> 3;
- unsigned varFieldOffset = varField.getOffsetInBits() >> 2;
+ unsigned forwardingFieldOffset = forwardingField->getOffsetInBits() >> 3;
+ unsigned varFieldOffset = varField->getOffsetInBits() >> 2;
// Decode the original location, and use that as the start of the byref
// variable's location.
@@ -601,9 +595,8 @@ void DwarfUnit::addBlockByrefAddress(const DbgVariable &DV, DIE &Die,
/// Return true if type encoding is unsigned.
static bool isUnsignedDIType(DwarfDebug *DD, DIType Ty) {
- DIDerivedType DTy(Ty);
- if (DTy.isDerivedType()) {
- dwarf::Tag T = (dwarf::Tag)Ty.getTag();
+ if (DIDerivedType DTy = dyn_cast<MDDerivedTypeBase>(Ty)) {
+ dwarf::Tag T = (dwarf::Tag)Ty->getTag();
// Encode pointer constants as unsigned bytes. This is used at least for
// null pointer constant emission.
// (Pieces of) aggregate types that get hacked apart by SROA may also be
@@ -624,56 +617,55 @@ static bool isUnsignedDIType(DwarfDebug *DD, DIType Ty) {
T == dwarf::DW_TAG_volatile_type ||
T == dwarf::DW_TAG_restrict_type ||
T == dwarf::DW_TAG_enumeration_type);
- if (DITypeRef Deriv = DTy.getTypeDerivedFrom())
+ if (DITypeRef Deriv = DTy->getBaseType())
return isUnsignedDIType(DD, DD->resolve(Deriv));
// FIXME: Enums without a fixed underlying type have unknown signedness
// here, leading to incorrectly emitted constants.
- assert(DTy.getTag() == dwarf::DW_TAG_enumeration_type);
+ assert(DTy->getTag() == dwarf::DW_TAG_enumeration_type);
return false;
}
- DIBasicType BTy(Ty);
- assert(BTy.isBasicType());
- unsigned Encoding = BTy.getEncoding();
+ DIBasicType BTy = cast<MDBasicType>(Ty);
+ unsigned Encoding = BTy->getEncoding();
assert((Encoding == dwarf::DW_ATE_unsigned ||
Encoding == dwarf::DW_ATE_unsigned_char ||
Encoding == dwarf::DW_ATE_signed ||
Encoding == dwarf::DW_ATE_signed_char ||
- Encoding == dwarf::DW_ATE_float ||
- Encoding == dwarf::DW_ATE_UTF || Encoding == dwarf::DW_ATE_boolean ||
- (Ty.getTag() == dwarf::DW_TAG_unspecified_type &&
- Ty.getName() == "decltype(nullptr)")) &&
+ Encoding == dwarf::DW_ATE_float || Encoding == dwarf::DW_ATE_UTF ||
+ Encoding == dwarf::DW_ATE_boolean ||
+ (Ty->getTag() == dwarf::DW_TAG_unspecified_type &&
+ Ty->getName() == "decltype(nullptr)")) &&
"Unsupported encoding");
- return (Encoding == dwarf::DW_ATE_unsigned ||
- Encoding == dwarf::DW_ATE_unsigned_char ||
- Encoding == dwarf::DW_ATE_UTF || Encoding == dwarf::DW_ATE_boolean ||
- Ty.getTag() == dwarf::DW_TAG_unspecified_type);
+ return Encoding == dwarf::DW_ATE_unsigned ||
+ Encoding == dwarf::DW_ATE_unsigned_char ||
+ Encoding == dwarf::DW_ATE_UTF || Encoding == dwarf::DW_ATE_boolean ||
+ Ty->getTag() == dwarf::DW_TAG_unspecified_type;
}
/// If this type is derived from a base type then return base type size.
static uint64_t getBaseTypeSize(DwarfDebug *DD, DIDerivedType Ty) {
- unsigned Tag = Ty.getTag();
+ unsigned Tag = Ty->getTag();
if (Tag != dwarf::DW_TAG_member && Tag != dwarf::DW_TAG_typedef &&
Tag != dwarf::DW_TAG_const_type && Tag != dwarf::DW_TAG_volatile_type &&
Tag != dwarf::DW_TAG_restrict_type)
- return Ty.getSizeInBits();
+ return Ty->getSizeInBits();
- DIType BaseType = DD->resolve(Ty.getTypeDerivedFrom());
+ auto *BaseType = DD->resolve(Ty->getBaseType());
- assert(BaseType.isValid() && "Unexpected invalid base type");
+ assert(BaseType && "Unexpected invalid base type");
// If this is a derived type, go ahead and get the base type, unless it's a
// reference then it's just the size of the field. Pointer types have no need
// of this since they're a different type of qualification on the type.
- if (BaseType.getTag() == dwarf::DW_TAG_reference_type ||
- BaseType.getTag() == dwarf::DW_TAG_rvalue_reference_type)
- return Ty.getSizeInBits();
+ if (BaseType->getTag() == dwarf::DW_TAG_reference_type ||
+ BaseType->getTag() == dwarf::DW_TAG_rvalue_reference_type)
+ return Ty->getSizeInBits();
- if (BaseType.isDerivedType())
- return getBaseTypeSize(DD, DIDerivedType(BaseType));
+ if (auto *DT = dyn_cast<MDDerivedTypeBase>(BaseType))
+ return getBaseTypeSize(DD, DT);
- return BaseType.getSizeInBits();
+ return BaseType->getSizeInBits();
}
/// addConstantFPValue - Add constant value entry in variable DIE.
@@ -771,39 +763,37 @@ void DwarfUnit::addLinkageName(DIE &Die, StringRef LinkageName) {
/// addTemplateParams - Add template parameters into buffer.
void DwarfUnit::addTemplateParams(DIE &Buffer, DIArray TParams) {
// Add template parameters.
- for (unsigned i = 0, e = TParams.getNumElements(); i != e; ++i) {
- DIDescriptor Element = TParams.getElement(i);
- if (Element.isTemplateTypeParameter())
- constructTemplateTypeParameterDIE(Buffer,
- DITemplateTypeParameter(Element));
- else if (Element.isTemplateValueParameter())
- constructTemplateValueParameterDIE(Buffer,
- DITemplateValueParameter(Element));
+ for (unsigned i = 0, e = TParams.size(); i != e; ++i) {
+ DIDescriptor Element = TParams[i];
+ if (auto *TTP = dyn_cast<MDTemplateTypeParameter>(Element))
+ constructTemplateTypeParameterDIE(Buffer, TTP);
+ else if (auto *TVP = dyn_cast<MDTemplateValueParameter>(Element))
+ constructTemplateValueParameterDIE(Buffer, TVP);
}
}
/// getOrCreateContextDIE - Get context owner's DIE.
DIE *DwarfUnit::getOrCreateContextDIE(DIScope Context) {
- if (!Context || Context.isFile())
+ if (!Context || isa<MDFile>(Context))
return &getUnitDie();
- if (Context.isType())
- return getOrCreateTypeDIE(DIType(Context));
- if (Context.isNameSpace())
- return getOrCreateNameSpace(DINameSpace(Context));
- if (Context.isSubprogram())
- return getOrCreateSubprogramDIE(DISubprogram(Context));
+ if (auto *T = dyn_cast<MDType>(Context))
+ return getOrCreateTypeDIE(T);
+ if (auto *NS = dyn_cast<MDNamespace>(Context))
+ return getOrCreateNameSpace(NS);
+ if (auto *SP = dyn_cast<MDSubprogram>(Context))
+ return getOrCreateSubprogramDIE(SP);
return getDIE(Context);
}
DIE *DwarfUnit::createTypeDIE(DICompositeType Ty) {
- DIScope Context = resolve(Ty.getContext());
+ DIScope Context = resolve(Ty->getScope());
DIE *ContextDIE = getOrCreateContextDIE(Context);
if (DIE *TyDIE = getDIE(Ty))
return TyDIE;
// Create new type.
- DIE &TyDIE = createAndAddDIE(Ty.getTag(), *ContextDIE, Ty);
+ DIE &TyDIE = createAndAddDIE(Ty->getTag(), *ContextDIE, Ty);
constructTypeDIE(TyDIE, Ty);
@@ -817,18 +807,18 @@ DIE *DwarfUnit::getOrCreateTypeDIE(const MDNode *TyNode) {
if (!TyNode)
return nullptr;
- DIType Ty(TyNode);
- assert(Ty.isType());
- assert(Ty == resolve(Ty.getRef()) &&
+ auto *Ty = cast<MDType>(TyNode);
+ assert(Ty == resolve(Ty->getRef()) &&
"type was not uniqued, possible ODR violation.");
// DW_TAG_restrict_type is not supported in DWARF2
- if (Ty.getTag() == dwarf::DW_TAG_restrict_type && DD->getDwarfVersion() <= 2)
- return getOrCreateTypeDIE(resolve(DIDerivedType(Ty).getTypeDerivedFrom()));
+ if (Ty->getTag() == dwarf::DW_TAG_restrict_type && DD->getDwarfVersion() <= 2)
+ return getOrCreateTypeDIE(
+ resolve(DITypeRef(cast<MDDerivedType>(Ty)->getBaseType())));
// Construct the context before querying for the existence of the DIE in case
// such construction creates the DIE.
- DIScope Context = resolve(Ty.getContext());
+ DIScope Context = resolve(Ty->getScope());
DIE *ContextDIE = getOrCreateContextDIE(Context);
assert(ContextDIE);
@@ -836,24 +826,22 @@ DIE *DwarfUnit::getOrCreateTypeDIE(const MDNode *TyNode) {
return TyDIE;
// Create new type.
- DIE &TyDIE = createAndAddDIE(Ty.getTag(), *ContextDIE, Ty);
+ DIE &TyDIE = createAndAddDIE(Ty->getTag(), *ContextDIE, Ty);
updateAcceleratorTables(Context, Ty, TyDIE);
- if (Ty.isBasicType())
- constructTypeDIE(TyDIE, DIBasicType(Ty));
- else if (Ty.isCompositeType()) {
- DICompositeType CTy(Ty);
- if (GenerateDwarfTypeUnits && !Ty.isForwardDecl())
- if (MDString *TypeId = CTy.getIdentifier()) {
+ if (auto *BT = dyn_cast<MDBasicType>(Ty))
+ constructTypeDIE(TyDIE, BT);
+ else if (DICompositeType CTy = dyn_cast<MDCompositeTypeBase>(Ty)) {
+ if (GenerateDwarfTypeUnits && !Ty->isForwardDecl())
+ if (MDString *TypeId = CTy->getRawIdentifier()) {
DD->addDwarfTypeUnitType(getCU(), TypeId->getString(), TyDIE, CTy);
// Skip updating the accelerator tables since this is not the full type.
return &TyDIE;
}
constructTypeDIE(TyDIE, CTy);
} else {
- assert(Ty.isDerivedType() && "Unknown kind of DIType");
- constructTypeDIE(TyDIE, DIDerivedType(Ty));
+ constructTypeDIE(TyDIE, cast<MDDerivedType>(Ty));
}
return &TyDIE;
@@ -861,19 +849,18 @@ DIE *DwarfUnit::getOrCreateTypeDIE(const MDNode *TyNode) {
void DwarfUnit::updateAcceleratorTables(DIScope Context, DIType Ty,
const DIE &TyDIE) {
- if (!Ty.getName().empty() && !Ty.isForwardDecl()) {
+ if (!Ty->getName().empty() && !Ty->isForwardDecl()) {
bool IsImplementation = 0;
- if (Ty.isCompositeType()) {
- DICompositeType CT(Ty);
+ if (auto *CT = dyn_cast<MDCompositeTypeBase>(Ty)) {
// A runtime language of 0 actually means C/C++ and that any
// non-negative value is some version of Objective-C/C++.
- IsImplementation = (CT.getRunTimeLang() == 0) || CT.isObjcClassComplete();
+ IsImplementation = CT->getRuntimeLang() == 0 || CT->isObjcClassComplete();
}
unsigned Flags = IsImplementation ? dwarf::DW_FLAG_type_implementation : 0;
- DD->addAccelType(Ty.getName(), TyDIE, Flags);
+ DD->addAccelType(Ty->getName(), TyDIE, Flags);
- if (!Context || Context.isCompileUnit() || Context.isFile() ||
- Context.isNameSpace())
+ if (!Context || isa<MDCompileUnit>(Context) || isa<MDFile>(Context) ||
+ isa<MDNamespace>(Context))
addGlobalType(Ty, TyDIE, Context);
}
}
@@ -914,10 +901,10 @@ std::string DwarfUnit::getParentContextString(DIScope Context) const {
std::string CS;
SmallVector<DIScope, 1> Parents;
- while (!Context.isCompileUnit()) {
+ while (!isa<MDCompileUnit>(Context)) {
Parents.push_back(Context);
- if (Context.getContext())
- Context = resolve(Context.getContext());
+ if (Context->getScope())
+ Context = resolve(Context->getScope());
else
// Structure, etc types will have a NULL context if they're at the top
// level.
@@ -929,9 +916,9 @@ std::string DwarfUnit::getParentContextString(DIScope Context) const {
for (SmallVectorImpl<DIScope>::reverse_iterator I = Parents.rbegin(),
E = Parents.rend();
I != E; ++I) {
- DIScope Ctx = *I;
- StringRef Name = Ctx.getName();
- if (Name.empty() && Ctx.isNameSpace())
+ const MDScope *Ctx = *I;
+ StringRef Name = Ctx->getName();
+ if (Name.empty() && isa<MDNamespace>(Ctx))
Name = "(anonymous namespace)";
if (!Name.empty()) {
CS += Name;
@@ -944,31 +931,31 @@ std::string DwarfUnit::getParentContextString(DIScope Context) const {
/// constructTypeDIE - Construct basic type die from DIBasicType.
void DwarfUnit::constructTypeDIE(DIE &Buffer, DIBasicType BTy) {
// Get core information.
- StringRef Name = BTy.getName();
+ StringRef Name = BTy->getName();
// Add name if not anonymous or intermediate type.
if (!Name.empty())
addString(Buffer, dwarf::DW_AT_name, Name);
// An unspecified type only has a name attribute.
- if (BTy.getTag() == dwarf::DW_TAG_unspecified_type)
+ if (BTy->getTag() == dwarf::DW_TAG_unspecified_type)
return;
addUInt(Buffer, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1,
- BTy.getEncoding());
+ BTy->getEncoding());
- uint64_t Size = BTy.getSizeInBits() >> 3;
+ uint64_t Size = BTy->getSizeInBits() >> 3;
addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size);
}
/// constructTypeDIE - Construct derived type die from DIDerivedType.
void DwarfUnit::constructTypeDIE(DIE &Buffer, DIDerivedType DTy) {
// Get core information.
- StringRef Name = DTy.getName();
- uint64_t Size = DTy.getSizeInBits() >> 3;
+ StringRef Name = DTy->getName();
+ uint64_t Size = DTy->getSizeInBits() >> 3;
uint16_t Tag = Buffer.getTag();
// Map to main type, void will not have a type.
- DIType FromTy = resolve(DTy.getTypeDerivedFrom());
+ DIType FromTy = resolve(DTy->getBaseType());
if (FromTy)
addType(Buffer, FromTy);
@@ -982,24 +969,25 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, DIDerivedType DTy) {
addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size);
if (Tag == dwarf::DW_TAG_ptr_to_member_type)
- addDIEEntry(Buffer, dwarf::DW_AT_containing_type,
- *getOrCreateTypeDIE(resolve(DTy.getClassType())));
+ addDIEEntry(
+ Buffer, dwarf::DW_AT_containing_type,
+ *getOrCreateTypeDIE(resolve(cast<MDDerivedType>(DTy)->getClassType())));
// Add source line info if available and TyDesc is not a forward declaration.
- if (!DTy.isForwardDecl())
+ if (!DTy->isForwardDecl())
addSourceLine(Buffer, DTy);
}
/// constructSubprogramArguments - Construct function argument DIEs.
void DwarfUnit::constructSubprogramArguments(DIE &Buffer, DITypeArray Args) {
- for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) {
- DIType Ty = resolve(Args.getElement(i));
+ for (unsigned i = 1, N = Args.size(); i < N; ++i) {
+ DIType Ty = resolve(Args[i]);
if (!Ty) {
assert(i == N-1 && "Unspecified parameter must be the last argument");
createAndAddDIE(dwarf::DW_TAG_unspecified_parameters, Buffer);
} else {
DIE &Arg = createAndAddDIE(dwarf::DW_TAG_formal_parameter, Buffer);
addType(Arg, Ty);
- if (Ty.isArtificial())
+ if (Ty->isArtificial())
addFlag(Arg, dwarf::DW_AT_artificial);
}
}
@@ -1008,9 +996,9 @@ void DwarfUnit::constructSubprogramArguments(DIE &Buffer, DITypeArray Args) {
/// constructTypeDIE - Construct type DIE from DICompositeType.
void DwarfUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
// Add name if not anonymous or intermediate type.
- StringRef Name = CTy.getName();
+ StringRef Name = CTy->getName();
- uint64_t Size = CTy.getSizeInBits() >> 3;
+ uint64_t Size = CTy->getSizeInBits() >> 3;
uint16_t Tag = Buffer.getTag();
switch (Tag) {
@@ -1022,14 +1010,13 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
break;
case dwarf::DW_TAG_subroutine_type: {
// Add return type. A void return won't have a type.
- DITypeArray Elements = DISubroutineType(CTy).getTypeArray();
- DIType RTy(resolve(Elements.getElement(0)));
- if (RTy)
- addType(Buffer, RTy);
+ auto Elements = cast<MDSubroutineType>(CTy)->getTypeArray();
+ if (Elements.size())
+ if (auto RTy = resolve(Elements[0]))
+ addType(Buffer, RTy);
bool isPrototyped = true;
- if (Elements.getNumElements() == 2 &&
- !Elements.getElement(1))
+ if (Elements.size() == 2 && !Elements[1])
isPrototyped = false;
constructSubprogramArguments(Buffer, Elements);
@@ -1042,60 +1029,46 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
Language == dwarf::DW_LANG_ObjC))
addFlag(Buffer, dwarf::DW_AT_prototyped);
- if (CTy.isLValueReference())
+ if (CTy->isLValueReference())
addFlag(Buffer, dwarf::DW_AT_reference);
- if (CTy.isRValueReference())
+ if (CTy->isRValueReference())
addFlag(Buffer, dwarf::DW_AT_rvalue_reference);
} break;
case dwarf::DW_TAG_structure_type:
case dwarf::DW_TAG_union_type:
case dwarf::DW_TAG_class_type: {
// Add elements to structure type.
- DIArray Elements = CTy.getElements();
- for (unsigned i = 0, N = Elements.getNumElements(); i < N; ++i) {
- DIDescriptor Element = Elements.getElement(i);
- if (Element.isSubprogram())
- getOrCreateSubprogramDIE(DISubprogram(Element));
- else if (Element.isDerivedType()) {
- DIDerivedType DDTy(Element);
- if (DDTy.getTag() == dwarf::DW_TAG_friend) {
+ DIArray Elements = CTy->getElements();
+ for (unsigned i = 0, N = Elements.size(); i < N; ++i) {
+ DIDescriptor Element = Elements[i];
+ if (!Element)
+ continue;
+ if (auto *SP = dyn_cast<MDSubprogram>(Element))
+ getOrCreateSubprogramDIE(SP);
+ else if (DIDerivedType DDTy = dyn_cast<MDDerivedTypeBase>(Element)) {
+ if (DDTy->getTag() == dwarf::DW_TAG_friend) {
DIE &ElemDie = createAndAddDIE(dwarf::DW_TAG_friend, Buffer);
- addType(ElemDie, resolve(DDTy.getTypeDerivedFrom()),
- dwarf::DW_AT_friend);
- } else if (DDTy.isStaticMember()) {
+ addType(ElemDie, resolve(DDTy->getBaseType()), dwarf::DW_AT_friend);
+ } else if (DDTy->isStaticMember()) {
getOrCreateStaticMemberDIE(DDTy);
} else {
constructMemberDIE(Buffer, DDTy);
}
- } else if (Element.isObjCProperty()) {
- DIObjCProperty Property(Element);
- DIE &ElemDie = createAndAddDIE(Property.getTag(), Buffer);
- StringRef PropertyName = Property.getObjCPropertyName();
+ } else if (DIObjCProperty Property = dyn_cast<MDObjCProperty>(Element)) {
+ DIE &ElemDie = createAndAddDIE(Property->getTag(), Buffer);
+ StringRef PropertyName = Property->getName();
addString(ElemDie, dwarf::DW_AT_APPLE_property_name, PropertyName);
- if (Property.getType())
- addType(ElemDie, Property.getType());
+ if (Property->getType())
+ addType(ElemDie, Property->getType());
addSourceLine(ElemDie, Property);
- StringRef GetterName = Property.getObjCPropertyGetterName();
+ StringRef GetterName = Property->getGetterName();
if (!GetterName.empty())
addString(ElemDie, dwarf::DW_AT_APPLE_property_getter, GetterName);
- StringRef SetterName = Property.getObjCPropertySetterName();
+ StringRef SetterName = Property->getSetterName();
if (!SetterName.empty())
addString(ElemDie, dwarf::DW_AT_APPLE_property_setter, SetterName);
- unsigned PropertyAttributes = 0;
- if (Property.isReadOnlyObjCProperty())
- PropertyAttributes |= dwarf::DW_APPLE_PROPERTY_readonly;
- if (Property.isReadWriteObjCProperty())
- PropertyAttributes |= dwarf::DW_APPLE_PROPERTY_readwrite;
- if (Property.isAssignObjCProperty())
- PropertyAttributes |= dwarf::DW_APPLE_PROPERTY_assign;
- if (Property.isRetainObjCProperty())
- PropertyAttributes |= dwarf::DW_APPLE_PROPERTY_retain;
- if (Property.isCopyObjCProperty())
- PropertyAttributes |= dwarf::DW_APPLE_PROPERTY_copy;
- if (Property.isNonAtomicObjCProperty())
- PropertyAttributes |= dwarf::DW_APPLE_PROPERTY_nonatomic;
- if (PropertyAttributes)
+ if (unsigned PropertyAttributes = Property->getAttributes())
addUInt(ElemDie, dwarf::DW_AT_APPLE_property_attribute, None,
PropertyAttributes);
@@ -1104,28 +1077,27 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
Entry = createDIEEntry(ElemDie);
insertDIEEntry(Element, Entry);
}
- } else
- continue;
+ }
}
- if (CTy.isAppleBlockExtension())
+ if (CTy->isAppleBlockExtension())
addFlag(Buffer, dwarf::DW_AT_APPLE_block);
// This is outside the DWARF spec, but GDB expects a DW_AT_containing_type
// inside C++ composite types to point to the base class with the vtable.
- DICompositeType ContainingType(resolve(CTy.getContainingType()));
- if (ContainingType)
+ if (DICompositeType ContainingType =
+ dyn_cast_or_null<MDCompositeType>(resolve(CTy->getVTableHolder())))
addDIEEntry(Buffer, dwarf::DW_AT_containing_type,
*getOrCreateTypeDIE(ContainingType));
- if (CTy.isObjcClassComplete())
+ if (CTy->isObjcClassComplete())
addFlag(Buffer, dwarf::DW_AT_APPLE_objc_complete_type);
// Add template parameters to a class, structure or union types.
// FIXME: The support isn't in the metadata for this yet.
if (Tag == dwarf::DW_TAG_class_type ||
Tag == dwarf::DW_TAG_structure_type || Tag == dwarf::DW_TAG_union_type)
- addTemplateParams(Buffer, CTy.getTemplateParams());
+ addTemplateParams(Buffer, CTy->getTemplateParams());
break;
}
@@ -1144,20 +1116,20 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
// TODO: Do we care about size for enum forward declarations?
if (Size)
addUInt(Buffer, dwarf::DW_AT_byte_size, None, Size);
- else if (!CTy.isForwardDecl())
+ else if (!CTy->isForwardDecl())
// Add zero size if it is not a forward declaration.
addUInt(Buffer, dwarf::DW_AT_byte_size, None, 0);
// If we're a forward decl, say so.
- if (CTy.isForwardDecl())
+ if (CTy->isForwardDecl())
addFlag(Buffer, dwarf::DW_AT_declaration);
// Add source line info if available.
- if (!CTy.isForwardDecl())
+ if (!CTy->isForwardDecl())
addSourceLine(Buffer, CTy);
// No harm in adding the runtime language to the declaration.
- unsigned RLang = CTy.getRunTimeLang();
+ unsigned RLang = CTy->getRuntimeLang();
if (RLang)
addUInt(Buffer, dwarf::DW_AT_APPLE_runtime_class, dwarf::DW_FORM_data1,
RLang);
@@ -1171,10 +1143,10 @@ void DwarfUnit::constructTemplateTypeParameterDIE(DIE &Buffer,
DIE &ParamDIE =
createAndAddDIE(dwarf::DW_TAG_template_type_parameter, Buffer);
// Add the type if it exists, it could be void and therefore no type.
- if (TP.getType())
- addType(ParamDIE, resolve(TP.getType()));
- if (!TP.getName().empty())
- addString(ParamDIE, dwarf::DW_AT_name, TP.getName());
+ if (TP->getType())
+ addType(ParamDIE, resolve(TP->getType()));
+ if (!TP->getName().empty())
+ addString(ParamDIE, dwarf::DW_AT_name, TP->getName());
}
/// constructTemplateValueParameterDIE - Construct new DIE for the given
@@ -1182,17 +1154,17 @@ void DwarfUnit::constructTemplateTypeParameterDIE(DIE &Buffer,
void
DwarfUnit::constructTemplateValueParameterDIE(DIE &Buffer,
DITemplateValueParameter VP) {
- DIE &ParamDIE = createAndAddDIE(VP.getTag(), Buffer);
+ DIE &ParamDIE = createAndAddDIE(VP->getTag(), Buffer);
// Add the type if there is one, template template and template parameter
// packs will not have a type.
- if (VP.getTag() == dwarf::DW_TAG_template_value_parameter)
- addType(ParamDIE, resolve(VP.getType()));
- if (!VP.getName().empty())
- addString(ParamDIE, dwarf::DW_AT_name, VP.getName());
- if (Metadata *Val = VP.getValue()) {
+ if (VP->getTag() == dwarf::DW_TAG_template_value_parameter)
+ addType(ParamDIE, resolve(VP->getType()));
+ if (!VP->getName().empty())
+ addString(ParamDIE, dwarf::DW_AT_name, VP->getName());
+ if (Metadata *Val = VP->getValue()) {
if (ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(Val))
- addConstantValue(ParamDIE, CI, resolve(VP.getType()));
+ addConstantValue(ParamDIE, CI, resolve(VP->getType()));
else if (GlobalValue *GV = mdconst::dyn_extract<GlobalValue>(Val)) {
// For declaration non-type template parameters (such as global values and
// functions)
@@ -1202,14 +1174,12 @@ DwarfUnit::constructTemplateValueParameterDIE(DIE &Buffer,
// parameter, rather than a pointer to it.
addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_stack_value);
addBlock(ParamDIE, dwarf::DW_AT_location, Loc);
- } else if (VP.getTag() == dwarf::DW_TAG_GNU_template_template_param) {
+ } else if (VP->getTag() == dwarf::DW_TAG_GNU_template_template_param) {
assert(isa<MDString>(Val));
addString(ParamDIE, dwarf::DW_AT_GNU_template_name,
cast<MDString>(Val)->getString());
- } else if (VP.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack) {
- assert(isa<MDNode>(Val));
- DIArray A(cast<MDNode>(Val));
- addTemplateParams(ParamDIE, A);
+ } else if (VP->getTag() == dwarf::DW_TAG_GNU_template_parameter_pack) {
+ addTemplateParams(ParamDIE, cast<MDTuple>(Val));
}
}
}
@@ -1218,19 +1188,19 @@ DwarfUnit::constructTemplateValueParameterDIE(DIE &Buffer,
DIE *DwarfUnit::getOrCreateNameSpace(DINameSpace NS) {
// Construct the context before querying for the existence of the DIE in case
// such construction creates the DIE.
- DIE *ContextDIE = getOrCreateContextDIE(NS.getContext());
+ DIE *ContextDIE = getOrCreateContextDIE(NS->getScope());
if (DIE *NDie = getDIE(NS))
return NDie;
DIE &NDie = createAndAddDIE(dwarf::DW_TAG_namespace, *ContextDIE, NS);
- StringRef Name = NS.getName();
+ StringRef Name = NS->getName();
if (!Name.empty())
- addString(NDie, dwarf::DW_AT_name, NS.getName());
+ addString(NDie, dwarf::DW_AT_name, NS->getName());
else
Name = "(anonymous namespace)";
DD->addAccelNamespace(Name, NDie);
- addGlobalName(Name, NDie, NS.getContext());
+ addGlobalName(Name, NDie, NS->getScope());
addSourceLine(NDie, NS);
return &NDie;
}
@@ -1241,12 +1211,12 @@ DIE *DwarfUnit::getOrCreateSubprogramDIE(DISubprogram SP, bool Minimal) {
// such construction creates the DIE (as is the case for member function
// declarations).
DIE *ContextDIE =
- Minimal ? &getUnitDie() : getOrCreateContextDIE(resolve(SP.getContext()));
+ Minimal ? &getUnitDie() : getOrCreateContextDIE(resolve(SP->getScope()));
if (DIE *SPDie = getDIE(SP))
return SPDie;
- if (DISubprogram SPDecl = SP.getFunctionDeclaration()) {
+ if (auto *SPDecl = SP->getDeclaration()) {
if (!Minimal) {
// Add subprogram definitions to the CU die directly.
ContextDIE = &getUnitDie();
@@ -1260,7 +1230,7 @@ DIE *DwarfUnit::getOrCreateSubprogramDIE(DISubprogram SP, bool Minimal) {
// Stop here and fill this in later, depending on whether or not this
// subprogram turns out to have inlined instances or not.
- if (SP.isDefinition())
+ if (SP->isDefinition())
return &SPDie;
applySubprogramAttributes(SP, SPDie);
@@ -1271,19 +1241,19 @@ bool DwarfUnit::applySubprogramDefinitionAttributes(DISubprogram SP,
DIE &SPDie) {
DIE *DeclDie = nullptr;
StringRef DeclLinkageName;
- if (DISubprogram SPDecl = SP.getFunctionDeclaration()) {
+ if (auto *SPDecl = SP->getDeclaration()) {
DeclDie = getDIE(SPDecl);
assert(DeclDie && "This DIE should've already been constructed when the "
"definition DIE was created in "
"getOrCreateSubprogramDIE");
- DeclLinkageName = SPDecl.getLinkageName();
+ DeclLinkageName = SPDecl->getLinkageName();
}
// Add function template parameters.
- addTemplateParams(SPDie, SP.getTemplateParams());
+ addTemplateParams(SPDie, SP->getTemplateParams());
// Add the linkage name if we have one and it isn't in the Decl.
- StringRef LinkageName = SP.getLinkageName();
+ StringRef LinkageName = SP->getLinkageName();
assert(((LinkageName.empty() || DeclLinkageName.empty()) ||
LinkageName == DeclLinkageName) &&
"decl has a linkage name and it is different");
@@ -1306,8 +1276,8 @@ void DwarfUnit::applySubprogramAttributes(DISubprogram SP, DIE &SPDie,
return;
// Constructors and operators for anonymous aggregates do not have names.
- if (!SP.getName().empty())
- addString(SPDie, dwarf::DW_AT_name, SP.getName());
+ if (!SP->getName().empty())
+ addString(SPDie, dwarf::DW_AT_name, SP->getName());
// Skip the rest of the attributes under -gmlt to save space.
if (Minimal)
@@ -1318,33 +1288,34 @@ void DwarfUnit::applySubprogramAttributes(DISubprogram SP, DIE &SPDie,
// Add the prototype if we have a prototype and we have a C like
// language.
uint16_t Language = getLanguage();
- if (SP.isPrototyped() &&
+ if (SP->isPrototyped() &&
(Language == dwarf::DW_LANG_C89 || Language == dwarf::DW_LANG_C99 ||
Language == dwarf::DW_LANG_ObjC))
addFlag(SPDie, dwarf::DW_AT_prototyped);
- DISubroutineType SPTy = SP.getType();
- assert(SPTy.getTag() == dwarf::DW_TAG_subroutine_type &&
+ DISubroutineType SPTy = SP->getType();
+ assert(SPTy->getTag() == dwarf::DW_TAG_subroutine_type &&
"the type of a subprogram should be a subroutine");
- DITypeArray Args = SPTy.getTypeArray();
+ auto Args = SPTy->getTypeArray();
// Add a return type. If this is a type like a C/C++ void type we don't add a
// return type.
- if (resolve(Args.getElement(0)))
- addType(SPDie, DIType(resolve(Args.getElement(0))));
+ if (Args.size())
+ if (auto Ty = resolve(Args[0]))
+ addType(SPDie, Ty);
- unsigned VK = SP.getVirtuality();
+ unsigned VK = SP->getVirtuality();
if (VK) {
addUInt(SPDie, dwarf::DW_AT_virtuality, dwarf::DW_FORM_data1, VK);
DIELoc *Block = getDIELoc();
addUInt(*Block, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
- addUInt(*Block, dwarf::DW_FORM_udata, SP.getVirtualIndex());
+ addUInt(*Block, dwarf::DW_FORM_udata, SP->getVirtualIndex());
addBlock(SPDie, dwarf::DW_AT_vtable_elem_location, Block);
ContainingTypeMap.insert(
- std::make_pair(&SPDie, resolve(SP.getContainingType())));
+ std::make_pair(&SPDie, resolve(SP->getContainingType())));
}
- if (!SP.isDefinition()) {
+ if (!SP->isDefinition()) {
addFlag(SPDie, dwarf::DW_AT_declaration);
// Add arguments. Do not add arguments for subprogram definition. They will
@@ -1352,35 +1323,35 @@ void DwarfUnit::applySubprogramAttributes(DISubprogram SP, DIE &SPDie,
constructSubprogramArguments(SPDie, Args);
}
- if (SP.isArtificial())
+ if (SP->isArtificial())
addFlag(SPDie, dwarf::DW_AT_artificial);
- if (!SP.isLocalToUnit())
+ if (!SP->isLocalToUnit())
addFlag(SPDie, dwarf::DW_AT_external);
- if (SP.isOptimized())
+ if (SP->isOptimized())
addFlag(SPDie, dwarf::DW_AT_APPLE_optimized);
if (unsigned isa = Asm->getISAEncoding())
addUInt(SPDie, dwarf::DW_AT_APPLE_isa, dwarf::DW_FORM_flag, isa);
- if (SP.isLValueReference())
+ if (SP->isLValueReference())
addFlag(SPDie, dwarf::DW_AT_reference);
- if (SP.isRValueReference())
+ if (SP->isRValueReference())
addFlag(SPDie, dwarf::DW_AT_rvalue_reference);
- if (SP.isProtected())
+ if (SP->isProtected())
addUInt(SPDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_protected);
- else if (SP.isPrivate())
+ else if (SP->isPrivate())
addUInt(SPDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_private);
- else if (SP.isPublic())
+ else if (SP->isPublic())
addUInt(SPDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_public);
- if (SP.isExplicit())
+ if (SP->isExplicit())
addFlag(SPDie, dwarf::DW_AT_explicit);
}
@@ -1393,9 +1364,9 @@ void DwarfUnit::constructSubrangeDIE(DIE &Buffer, DISubrange SR, DIE *IndexTy) {
// C/C++. The Count value is the number of elements. Values are 64 bit. If
// Count == -1 then the array is unbounded and we do not emit
// DW_AT_lower_bound and DW_AT_count attributes.
- int64_t LowerBound = SR.getLo();
+ int64_t LowerBound = SR->getLowerBound();
int64_t DefaultLowerBound = getDefaultLowerBound();
- int64_t Count = SR.getCount();
+ int64_t Count = SR->getCount();
if (DefaultLowerBound == -1 || LowerBound != DefaultLowerBound)
addUInt(DW_Subrange, dwarf::DW_AT_lower_bound, None, LowerBound);
@@ -1420,11 +1391,11 @@ DIE *DwarfUnit::getIndexTyDie() {
/// constructArrayTypeDIE - Construct array type DIE from DICompositeType.
void DwarfUnit::constructArrayTypeDIE(DIE &Buffer, DICompositeType CTy) {
- if (CTy.isVector())
+ if (CTy->isVector())
addFlag(Buffer, dwarf::DW_AT_GNU_vector);
// Emit the element type.
- addType(Buffer, resolve(CTy.getTypeDerivedFrom()));
+ addType(Buffer, resolve(CTy->getBaseType()));
// Get an anonymous type for index type.
// FIXME: This type should be passed down from the front end
@@ -1432,31 +1403,32 @@ void DwarfUnit::constructArrayTypeDIE(DIE &Buffer, DICompositeType CTy) {
DIE *IdxTy = getIndexTyDie();
// Add subranges to array type.
- DIArray Elements = CTy.getElements();
- for (unsigned i = 0, N = Elements.getNumElements(); i < N; ++i) {
- DIDescriptor Element = Elements.getElement(i);
- if (Element.getTag() == dwarf::DW_TAG_subrange_type)
- constructSubrangeDIE(Buffer, DISubrange(Element), IdxTy);
+ DIArray Elements = CTy->getElements();
+ for (unsigned i = 0, N = Elements.size(); i < N; ++i) {
+ // FIXME: Should this really be such a loose cast?
+ if (auto *Element = dyn_cast_or_null<DebugNode>(Elements[i]))
+ if (Element->getTag() == dwarf::DW_TAG_subrange_type)
+ constructSubrangeDIE(Buffer, cast<MDSubrange>(Element), IdxTy);
}
}
/// constructEnumTypeDIE - Construct an enum type DIE from DICompositeType.
void DwarfUnit::constructEnumTypeDIE(DIE &Buffer, DICompositeType CTy) {
- DIArray Elements = CTy.getElements();
+ DIArray Elements = CTy->getElements();
// Add enumerators to enumeration type.
- for (unsigned i = 0, N = Elements.getNumElements(); i < N; ++i) {
- DIEnumerator Enum(Elements.getElement(i));
- if (Enum.isEnumerator()) {
+ for (unsigned i = 0, N = Elements.size(); i < N; ++i) {
+ auto *Enum = dyn_cast_or_null<MDEnumerator>(Elements[i]);
+ if (Enum) {
DIE &Enumerator = createAndAddDIE(dwarf::DW_TAG_enumerator, Buffer);
- StringRef Name = Enum.getName();
+ StringRef Name = Enum->getName();
addString(Enumerator, dwarf::DW_AT_name, Name);
- int64_t Value = Enum.getEnumValue();
+ int64_t Value = Enum->getValue();
addSInt(Enumerator, dwarf::DW_AT_const_value, dwarf::DW_FORM_sdata,
Value);
}
}
- DIType DTy = resolve(CTy.getTypeDerivedFrom());
+ DIType DTy = resolve(CTy->getBaseType());
if (DTy) {
addType(Buffer, DTy);
addFlag(Buffer, dwarf::DW_AT_enum_class);
@@ -1481,17 +1453,20 @@ void DwarfUnit::constructContainingTypeDIEs() {
}
/// constructMemberDIE - Construct member DIE from DIDerivedType.
-void DwarfUnit::constructMemberDIE(DIE &Buffer, DIDerivedType DT) {
- DIE &MemberDie = createAndAddDIE(DT.getTag(), Buffer);
- StringRef Name = DT.getName();
+void DwarfUnit::constructMemberDIE(DIE &Buffer, DIDerivedType DT_) {
+ // Downcast to MDDerivedType.
+ const MDDerivedType *DT = cast<MDDerivedType>(DT_);
+
+ DIE &MemberDie = createAndAddDIE(DT->getTag(), Buffer);
+ StringRef Name = DT->getName();
if (!Name.empty())
addString(MemberDie, dwarf::DW_AT_name, Name);
- addType(MemberDie, resolve(DT.getTypeDerivedFrom()));
+ addType(MemberDie, resolve(DT->getBaseType()));
addSourceLine(MemberDie, DT);
- if (DT.getTag() == dwarf::DW_TAG_inheritance && DT.isVirtual()) {
+ if (DT->getTag() == dwarf::DW_TAG_inheritance && DT->isVirtual()) {
// For C++, virtual base classes are not at fixed offset. Use following
// expression to extract appropriate offset from vtable.
@@ -1501,14 +1476,14 @@ void DwarfUnit::constructMemberDIE(DIE &Buffer, DIDerivedType DT) {
addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_dup);
addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
- addUInt(*VBaseLocationDie, dwarf::DW_FORM_udata, DT.getOffsetInBits());
+ addUInt(*VBaseLocationDie, dwarf::DW_FORM_udata, DT->getOffsetInBits());
addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_minus);
addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
addUInt(*VBaseLocationDie, dwarf::DW_FORM_data1, dwarf::DW_OP_plus);
addBlock(MemberDie, dwarf::DW_AT_data_member_location, VBaseLocationDie);
} else {
- uint64_t Size = DT.getSizeInBits();
+ uint64_t Size = DT->getSizeInBits();
uint64_t FieldSize = getBaseTypeSize(DD, DT);
uint64_t OffsetInBytes;
@@ -1517,8 +1492,8 @@ void DwarfUnit::constructMemberDIE(DIE &Buffer, DIDerivedType DT) {
addUInt(MemberDie, dwarf::DW_AT_byte_size, None, FieldSize/8);
addUInt(MemberDie, dwarf::DW_AT_bit_size, None, Size);
- uint64_t Offset = DT.getOffsetInBits();
- uint64_t AlignMask = ~(DT.getAlignInBits() - 1);
+ uint64_t Offset = DT->getOffsetInBits();
+ uint64_t AlignMask = ~(DT->getAlignInBits() - 1);
uint64_t HiMark = (Offset + FieldSize) & AlignMask;
uint64_t FieldOffset = (HiMark - FieldSize);
Offset -= FieldOffset;
@@ -1533,7 +1508,7 @@ void DwarfUnit::constructMemberDIE(DIE &Buffer, DIDerivedType DT) {
OffsetInBytes = FieldOffset >> 3;
} else
// This is not a bitfield.
- OffsetInBytes = DT.getOffsetInBits() >> 3;
+ OffsetInBytes = DT->getOffsetInBits() >> 3;
if (DD->getDwarfVersion() <= 2) {
DIELoc *MemLocationDie = new (DIEValueAllocator) DIELoc();
@@ -1545,49 +1520,50 @@ void DwarfUnit::constructMemberDIE(DIE &Buffer, DIDerivedType DT) {
OffsetInBytes);
}
- if (DT.isProtected())
+ if (DT->isProtected())
addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_protected);
- else if (DT.isPrivate())
+ else if (DT->isPrivate())
addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_private);
// Otherwise C++ member and base classes are considered public.
- else if (DT.isPublic())
+ else if (DT->isPublic())
addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_public);
- if (DT.isVirtual())
+ if (DT->isVirtual())
addUInt(MemberDie, dwarf::DW_AT_virtuality, dwarf::DW_FORM_data1,
dwarf::DW_VIRTUALITY_virtual);
// Objective-C properties.
- if (MDNode *PNode = DT.getObjCProperty())
+ if (MDNode *PNode = DT->getObjCProperty())
if (DIEEntry *PropertyDie = getDIEEntry(PNode))
MemberDie.addValue(dwarf::DW_AT_APPLE_property, dwarf::DW_FORM_ref4,
PropertyDie);
- if (DT.isArtificial())
+ if (DT->isArtificial())
addFlag(MemberDie, dwarf::DW_AT_artificial);
}
/// getOrCreateStaticMemberDIE - Create new DIE for C++ static member.
-DIE *DwarfUnit::getOrCreateStaticMemberDIE(DIDerivedType DT) {
- if (!DT.Verify())
+DIE *DwarfUnit::getOrCreateStaticMemberDIE(DIDerivedType DT_) {
+ const MDDerivedType *DT = cast_or_null<MDDerivedType>(DT_);
+ if (!DT)
return nullptr;
// Construct the context before querying for the existence of the DIE in case
// such construction creates the DIE.
- DIE *ContextDIE = getOrCreateContextDIE(resolve(DT.getContext()));
+ DIE *ContextDIE = getOrCreateContextDIE(resolve(DT->getScope()));
assert(dwarf::isType(ContextDIE->getTag()) &&
"Static member should belong to a type.");
if (DIE *StaticMemberDIE = getDIE(DT))
return StaticMemberDIE;
- DIE &StaticMemberDIE = createAndAddDIE(DT.getTag(), *ContextDIE, DT);
+ DIE &StaticMemberDIE = createAndAddDIE(DT->getTag(), *ContextDIE, DT);
- DIType Ty = resolve(DT.getTypeDerivedFrom());
+ DIType Ty = resolve(DT->getBaseType());
- addString(StaticMemberDIE, dwarf::DW_AT_name, DT.getName());
+ addString(StaticMemberDIE, dwarf::DW_AT_name, DT->getName());
addType(StaticMemberDIE, Ty);
addSourceLine(StaticMemberDIE, DT);
addFlag(StaticMemberDIE, dwarf::DW_AT_external);
@@ -1595,19 +1571,19 @@ DIE *DwarfUnit::getOrCreateStaticMemberDIE(DIDerivedType DT) {
// FIXME: We could omit private if the parent is a class_type, and
// public if the parent is something else.
- if (DT.isProtected())
+ if (DT->isProtected())
addUInt(StaticMemberDIE, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_protected);
- else if (DT.isPrivate())
+ else if (DT->isPrivate())
addUInt(StaticMemberDIE, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_private);
- else if (DT.isPublic())
+ else if (DT->isPublic())
addUInt(StaticMemberDIE, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_public);
- if (const ConstantInt *CI = dyn_cast_or_null<ConstantInt>(DT.getConstant()))
+ if (const ConstantInt *CI = dyn_cast_or_null<ConstantInt>(DT->getConstant()))
addConstantValue(StaticMemberDIE, CI, Ty);
- if (const ConstantFP *CFP = dyn_cast_or_null<ConstantFP>(DT.getConstant()))
+ if (const ConstantFP *CFP = dyn_cast_or_null<ConstantFP>(DT->getConstant()))
addConstantFPValue(StaticMemberDIE, CFP);
return &StaticMemberDIE;
diff --git a/lib/CodeGen/AsmPrinter/DwarfUnit.h b/lib/CodeGen/AsmPrinter/DwarfUnit.h
index 81c5821..b354255 100644
--- a/lib/CodeGen/AsmPrinter/DwarfUnit.h
+++ b/lib/CodeGen/AsmPrinter/DwarfUnit.h
@@ -141,7 +141,7 @@ public:
// Accessors.
AsmPrinter* getAsmPrinter() const { return Asm; }
unsigned getUniqueID() const { return UniqueID; }
- uint16_t getLanguage() const { return CUNode.getLanguage(); }
+ uint16_t getLanguage() const { return CUNode->getSourceLanguage(); }
DICompileUnit getCUNode() const { return CUNode; }
DIE &getUnitDie() { return UnitDie; }
@@ -342,7 +342,7 @@ protected:
/// resolve - Look in the DwarfDebug map for the MDNode that
/// corresponds to the reference.
- template <typename T> T resolve(DIRef<T> Ref) const {
+ template <typename T> T *resolve(TypedDebugNodeRef<T> Ref) const {
return DD->resolve(Ref);
}
diff --git a/lib/CodeGen/AsmPrinter/EHStreamer.cpp b/lib/CodeGen/AsmPrinter/EHStreamer.cpp
index 14df4c9..6f64d8f 100644
--- a/lib/CodeGen/AsmPrinter/EHStreamer.cpp
+++ b/lib/CodeGen/AsmPrinter/EHStreamer.cpp
@@ -188,20 +188,12 @@ bool EHStreamer::callToNoUnwindFunction(const MachineInstr *MI) {
return MarkedNoUnwind;
}
-/// Compute the call-site table. The entry for an invoke has a try-range
-/// containing the call, a non-zero landing pad, and an appropriate action. The
-/// entry for an ordinary call has a try-range containing the call and zero for
-/// the landing pad and the action. Calls marked 'nounwind' have no entry and
-/// must not be contained in the try-range of any entry - they form gaps in the
-/// table. Entries must be ordered by try-range address.
-void EHStreamer::
-computeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
- const SmallVectorImpl<const LandingPadInfo *> &LandingPads,
- const SmallVectorImpl<unsigned> &FirstActions) {
+void EHStreamer::computePadMap(
+ const SmallVectorImpl<const LandingPadInfo *> &LandingPads,
+ RangeMapType &PadMap) {
// Invokes and nounwind calls have entries in PadMap (due to being bracketed
// by try-range labels when lowered). Ordinary calls do not, so appropriate
// try-ranges for them need be deduced so we can put them in the LSDA.
- RangeMapType PadMap;
for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
const LandingPadInfo *LandingPad = LandingPads[i];
for (unsigned j = 0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
@@ -211,6 +203,20 @@ computeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
PadMap[BeginLabel] = P;
}
}
+}
+
+/// Compute the call-site table. The entry for an invoke has a try-range
+/// containing the call, a non-zero landing pad, and an appropriate action. The
+/// entry for an ordinary call has a try-range containing the call and zero for
+/// the landing pad and the action. Calls marked 'nounwind' have no entry and
+/// must not be contained in the try-range of any entry - they form gaps in the
+/// table. Entries must be ordered by try-range address.
+void EHStreamer::
+computeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
+ const SmallVectorImpl<const LandingPadInfo *> &LandingPads,
+ const SmallVectorImpl<unsigned> &FirstActions) {
+ RangeMapType PadMap;
+ computePadMap(LandingPads, PadMap);
// The end label of the previous invoke or nounwind try-range.
MCSymbol *LastLabel = nullptr;
diff --git a/lib/CodeGen/AsmPrinter/EHStreamer.h b/lib/CodeGen/AsmPrinter/EHStreamer.h
index 94d0585..65973fa 100644
--- a/lib/CodeGen/AsmPrinter/EHStreamer.h
+++ b/lib/CodeGen/AsmPrinter/EHStreamer.h
@@ -80,13 +80,15 @@ protected:
/// `false' otherwise.
bool callToNoUnwindFunction(const MachineInstr *MI);
+ void computePadMap(const SmallVectorImpl<const LandingPadInfo *> &LandingPads,
+ RangeMapType &PadMap);
+
/// Compute the call-site table. The entry for an invoke has a try-range
/// containing the call, a non-zero landing pad and an appropriate action.
/// The entry for an ordinary call has a try-range containing the call and
/// zero for the landing pad and the action. Calls marked 'nounwind' have
/// no entry and must not be contained in the try-range of any entry - they
/// form gaps in the table. Entries must be ordered by try-range address.
-
void computeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
const SmallVectorImpl<const LandingPadInfo *> &LPs,
const SmallVectorImpl<unsigned> &FirstActions);
@@ -123,7 +125,7 @@ protected:
public:
EHStreamer(AsmPrinter *A);
- virtual ~EHStreamer();
+ ~EHStreamer() override;
// Unused.
void setSymbolSize(const MCSymbol *Sym, uint64_t Size) override {}
diff --git a/lib/CodeGen/AsmPrinter/Win64Exception.cpp b/lib/CodeGen/AsmPrinter/Win64Exception.cpp
index 7d76ead..f89d364 100644
--- a/lib/CodeGen/AsmPrinter/Win64Exception.cpp
+++ b/lib/CodeGen/AsmPrinter/Win64Exception.cpp
@@ -19,6 +19,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
@@ -67,6 +68,27 @@ void Win64Exception::beginFunction(const MachineFunction *MF) {
shouldEmitLSDA = shouldEmitPersonality &&
LSDAEncoding != dwarf::DW_EH_PE_omit;
+
+ // If this was an outlined handler, we need to define the label corresponding
+ // to the offset of the parent frame relative to the stack pointer after the
+ // prologue.
+ const Function *F = MF->getFunction();
+ const Function *ParentF = MMI->getWinEHParent(F);
+ if (F != ParentF) {
+ WinEHFuncInfo &FuncInfo = MMI->getWinEHFuncInfo(ParentF);
+ auto I = FuncInfo.CatchHandlerParentFrameObjOffset.find(F);
+ if (I != FuncInfo.CatchHandlerParentFrameObjOffset.end()) {
+ MCSymbol *HandlerTypeParentFrameOffset =
+ Asm->OutContext.getOrCreateParentFrameOffsetSymbol(
+ GlobalValue::getRealLinkageName(F->getName()));
+
+ // Emit a symbol assignment.
+ Asm->OutStreamer.EmitAssignment(
+ HandlerTypeParentFrameOffset,
+ MCConstantExpr::Create(I->second, Asm->OutContext));
+ }
+ }
+
if (!shouldEmitPersonality && !shouldEmitMoves)
return;
@@ -82,12 +104,17 @@ void Win64Exception::beginFunction(const MachineFunction *MF) {
/// endFunction - Gather and emit post-function exception information.
///
-void Win64Exception::endFunction(const MachineFunction *) {
+void Win64Exception::endFunction(const MachineFunction *MF) {
if (!shouldEmitPersonality && !shouldEmitMoves)
return;
- // Map all labels and get rid of any dead landing pads.
- MMI->TidyLandingPads();
+ EHPersonality Per = MMI->getPersonalityType();
+
+ // Get rid of any dead landing pads if we're not using a Windows EH scheme. In
+ // Windows EH schemes, the landing pad is not actually reachable. It only
+ // exists so that we can emit the right table data.
+ if (!isMSVCEHPersonality(Per))
+ MMI->TidyLandingPads();
if (shouldEmitPersonality) {
Asm->OutStreamer.PushSection();
@@ -97,9 +124,10 @@ void Win64Exception::endFunction(const MachineFunction *) {
// Emit the tables appropriate to the personality function in use. If we
// don't recognize the personality, assume it uses an Itanium-style LSDA.
- EHPersonality Per = MMI->getPersonalityType();
if (Per == EHPersonality::MSVC_Win64SEH)
emitCSpecificHandlerTable();
+ else if (Per == EHPersonality::MSVC_CXX)
+ emitCXXFrameHandler3Table(MF);
else
emitExceptionTable();
@@ -108,11 +136,19 @@ void Win64Exception::endFunction(const MachineFunction *) {
Asm->OutStreamer.EmitWinCFIEndProc();
}
-const MCSymbolRefExpr *Win64Exception::createImageRel32(const MCSymbol *Value) {
+const MCExpr *Win64Exception::createImageRel32(const MCSymbol *Value) {
+ if (!Value)
+ return MCConstantExpr::Create(0, Asm->OutContext);
return MCSymbolRefExpr::Create(Value, MCSymbolRefExpr::VK_COFF_IMGREL32,
Asm->OutContext);
}
+const MCExpr *Win64Exception::createImageRel32(const GlobalValue *GV) {
+ if (!GV)
+ return MCConstantExpr::Create(0, Asm->OutContext);
+ return createImageRel32(Asm->getSymbol(GV));
+}
+
/// Emit the language-specific data that __C_specific_handler expects. This
/// handler lives in the x64 Microsoft C runtime and allows catching or cleaning
/// up after faults with __try, __except, and __finally. The typeinfo values
@@ -237,3 +273,231 @@ void Win64Exception::emitCSpecificHandlerTable() {
}
}
}
+
+void Win64Exception::emitCXXFrameHandler3Table(const MachineFunction *MF) {
+ const Function *F = MF->getFunction();
+ const Function *ParentF = MMI->getWinEHParent(F);
+ auto &OS = Asm->OutStreamer;
+ WinEHFuncInfo &FuncInfo = MMI->getWinEHFuncInfo(ParentF);
+
+ StringRef ParentLinkageName =
+ GlobalValue::getRealLinkageName(ParentF->getName());
+
+ MCSymbol *FuncInfoXData =
+ Asm->OutContext.GetOrCreateSymbol(Twine("$cppxdata$", ParentLinkageName));
+ OS.EmitValue(createImageRel32(FuncInfoXData), 4);
+
+ // The Itanium LSDA table sorts similar landing pads together to simplify the
+ // actions table, but we don't need that.
+ SmallVector<const LandingPadInfo *, 64> LandingPads;
+ const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
+ LandingPads.reserve(PadInfos.size());
+ for (const auto &LP : PadInfos)
+ LandingPads.push_back(&LP);
+
+ RangeMapType PadMap;
+ computePadMap(LandingPads, PadMap);
+
+ // The end label of the previous invoke or nounwind try-range.
+ MCSymbol *LastLabel = Asm->getFunctionBegin();
+
+ // Whether there is a potentially throwing instruction (currently this means
+ // an ordinary call) between the end of the previous try-range and now.
+ bool SawPotentiallyThrowing = false;
+
+ int LastEHState = -2;
+
+ // The parent function and the catch handlers contribute to the 'ip2state'
+ // table.
+ for (const auto &MBB : *MF) {
+ for (const auto &MI : MBB) {
+ if (!MI.isEHLabel()) {
+ if (MI.isCall())
+ SawPotentiallyThrowing |= !callToNoUnwindFunction(&MI);
+ continue;
+ }
+
+ // End of the previous try-range?
+ MCSymbol *BeginLabel = MI.getOperand(0).getMCSymbol();
+ if (BeginLabel == LastLabel)
+ SawPotentiallyThrowing = false;
+
+ // Beginning of a new try-range?
+ RangeMapType::const_iterator L = PadMap.find(BeginLabel);
+ if (L == PadMap.end())
+ // Nope, it was just some random label.
+ continue;
+
+ const PadRange &P = L->second;
+ const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];
+ assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
+ "Inconsistent landing pad map!");
+
+ if (SawPotentiallyThrowing) {
+ FuncInfo.IPToStateList.push_back(std::make_pair(LastLabel, -1));
+ SawPotentiallyThrowing = false;
+ LastEHState = -1;
+ }
+
+ if (LandingPad->WinEHState != LastEHState)
+ FuncInfo.IPToStateList.push_back(
+ std::make_pair(BeginLabel, LandingPad->WinEHState));
+ LastEHState = LandingPad->WinEHState;
+ LastLabel = LandingPad->EndLabels[P.RangeIndex];
+ }
+ }
+
+ // Defer emission until we've visited the parent function and all the catch
+ // handlers. Cleanups don't contribute to the ip2state table yet, so don't
+ // count them.
+ if (ParentF != F && !FuncInfo.CatchHandlerMaxState.count(F))
+ return;
+ ++FuncInfo.NumIPToStateFuncsVisited;
+ if (FuncInfo.NumIPToStateFuncsVisited != FuncInfo.CatchHandlerMaxState.size())
+ return;
+
+ MCSymbol *UnwindMapXData = nullptr;
+ MCSymbol *TryBlockMapXData = nullptr;
+ MCSymbol *IPToStateXData = nullptr;
+ if (!FuncInfo.UnwindMap.empty())
+ UnwindMapXData = Asm->OutContext.GetOrCreateSymbol(
+ Twine("$stateUnwindMap$", ParentLinkageName));
+ if (!FuncInfo.TryBlockMap.empty())
+ TryBlockMapXData = Asm->OutContext.GetOrCreateSymbol(
+ Twine("$tryMap$", ParentLinkageName));
+ if (!FuncInfo.IPToStateList.empty())
+ IPToStateXData = Asm->OutContext.GetOrCreateSymbol(
+ Twine("$ip2state$", ParentLinkageName));
+
+ // FuncInfo {
+ // uint32_t MagicNumber
+ // int32_t MaxState;
+ // UnwindMapEntry *UnwindMap;
+ // uint32_t NumTryBlocks;
+ // TryBlockMapEntry *TryBlockMap;
+ // uint32_t IPMapEntries;
+ // IPToStateMapEntry *IPToStateMap;
+ // uint32_t UnwindHelp; // (x64/ARM only)
+ // ESTypeList *ESTypeList;
+ // int32_t EHFlags;
+ // }
+ // EHFlags & 1 -> Synchronous exceptions only, no async exceptions.
+ // EHFlags & 2 -> ???
+ // EHFlags & 4 -> The function is noexcept(true), unwinding can't continue.
+ OS.EmitLabel(FuncInfoXData);
+ OS.EmitIntValue(0x19930522, 4); // MagicNumber
+ OS.EmitIntValue(FuncInfo.UnwindMap.size(), 4); // MaxState
+ OS.EmitValue(createImageRel32(UnwindMapXData), 4); // UnwindMap
+ OS.EmitIntValue(FuncInfo.TryBlockMap.size(), 4); // NumTryBlocks
+ OS.EmitValue(createImageRel32(TryBlockMapXData), 4); // TryBlockMap
+ OS.EmitIntValue(FuncInfo.IPToStateList.size(), 4); // IPMapEntries
+ OS.EmitValue(createImageRel32(IPToStateXData), 4); // IPToStateMap
+ OS.EmitIntValue(FuncInfo.UnwindHelpFrameOffset, 4); // UnwindHelp
+ OS.EmitIntValue(0, 4); // ESTypeList
+ OS.EmitIntValue(1, 4); // EHFlags
+
+ // UnwindMapEntry {
+ // int32_t ToState;
+ // void (*Action)();
+ // };
+ if (UnwindMapXData) {
+ OS.EmitLabel(UnwindMapXData);
+ for (const WinEHUnwindMapEntry &UME : FuncInfo.UnwindMap) {
+ OS.EmitIntValue(UME.ToState, 4); // ToState
+ OS.EmitValue(createImageRel32(UME.Cleanup), 4); // Action
+ }
+ }
+
+ // TryBlockMap {
+ // int32_t TryLow;
+ // int32_t TryHigh;
+ // int32_t CatchHigh;
+ // int32_t NumCatches;
+ // HandlerType *HandlerArray;
+ // };
+ if (TryBlockMapXData) {
+ OS.EmitLabel(TryBlockMapXData);
+ SmallVector<MCSymbol *, 1> HandlerMaps;
+ for (size_t I = 0, E = FuncInfo.TryBlockMap.size(); I != E; ++I) {
+ WinEHTryBlockMapEntry &TBME = FuncInfo.TryBlockMap[I];
+ MCSymbol *HandlerMapXData = nullptr;
+
+ if (!TBME.HandlerArray.empty())
+ HandlerMapXData =
+ Asm->OutContext.GetOrCreateSymbol(Twine("$handlerMap$")
+ .concat(Twine(I))
+ .concat("$")
+ .concat(ParentLinkageName));
+
+ HandlerMaps.push_back(HandlerMapXData);
+
+ int CatchHigh = -1;
+ for (WinEHHandlerType &HT : TBME.HandlerArray)
+ CatchHigh =
+ std::max(CatchHigh, FuncInfo.CatchHandlerMaxState[HT.Handler]);
+
+ assert(TBME.TryLow <= TBME.TryHigh);
+ assert(CatchHigh > TBME.TryHigh);
+ OS.EmitIntValue(TBME.TryLow, 4); // TryLow
+ OS.EmitIntValue(TBME.TryHigh, 4); // TryHigh
+ OS.EmitIntValue(CatchHigh, 4); // CatchHigh
+ OS.EmitIntValue(TBME.HandlerArray.size(), 4); // NumCatches
+ OS.EmitValue(createImageRel32(HandlerMapXData), 4); // HandlerArray
+ }
+
+ for (size_t I = 0, E = FuncInfo.TryBlockMap.size(); I != E; ++I) {
+ WinEHTryBlockMapEntry &TBME = FuncInfo.TryBlockMap[I];
+ MCSymbol *HandlerMapXData = HandlerMaps[I];
+ if (!HandlerMapXData)
+ continue;
+ // HandlerType {
+ // int32_t Adjectives;
+ // TypeDescriptor *Type;
+ // int32_t CatchObjOffset;
+ // void (*Handler)();
+ // int32_t ParentFrameOffset; // x64 only
+ // };
+ OS.EmitLabel(HandlerMapXData);
+ for (const WinEHHandlerType &HT : TBME.HandlerArray) {
+ MCSymbol *ParentFrameOffset =
+ Asm->OutContext.getOrCreateParentFrameOffsetSymbol(
+ GlobalValue::getRealLinkageName(HT.Handler->getName()));
+ const MCSymbolRefExpr *ParentFrameOffsetRef = MCSymbolRefExpr::Create(
+ ParentFrameOffset, MCSymbolRefExpr::VK_None, Asm->OutContext);
+
+ // Get the frame escape label with the offset of the catch object. If
+ // the index is -1, then there is no catch object, and we should emit an
+ // offset of zero, indicating that no copy will occur.
+ const MCExpr *FrameAllocOffsetRef = nullptr;
+ if (HT.CatchObjRecoverIdx >= 0) {
+ MCSymbol *FrameAllocOffset =
+ Asm->OutContext.getOrCreateFrameAllocSymbol(
+ GlobalValue::getRealLinkageName(ParentF->getName()),
+ HT.CatchObjRecoverIdx);
+ FrameAllocOffsetRef = MCSymbolRefExpr::Create(
+ FrameAllocOffset, MCSymbolRefExpr::VK_None, Asm->OutContext);
+ } else {
+ FrameAllocOffsetRef = MCConstantExpr::Create(0, Asm->OutContext);
+ }
+
+ OS.EmitIntValue(HT.Adjectives, 4); // Adjectives
+ OS.EmitValue(createImageRel32(HT.TypeDescriptor), 4); // Type
+ OS.EmitValue(FrameAllocOffsetRef, 4); // CatchObjOffset
+ OS.EmitValue(createImageRel32(HT.Handler), 4); // Handler
+ OS.EmitValue(ParentFrameOffsetRef, 4); // ParentFrameOffset
+ }
+ }
+ }
+
+ // IPToStateMapEntry {
+ // void *IP;
+ // int32_t State;
+ // };
+ if (IPToStateXData) {
+ OS.EmitLabel(IPToStateXData);
+ for (auto &IPStatePair : FuncInfo.IPToStateList) {
+ OS.EmitValue(createImageRel32(IPStatePair.first), 4); // IP
+ OS.EmitIntValue(IPStatePair.second, 4); // State
+ }
+ }
+}
diff --git a/lib/CodeGen/AsmPrinter/Win64Exception.h b/lib/CodeGen/AsmPrinter/Win64Exception.h
index b2d5d1b..5f4237f 100644
--- a/lib/CodeGen/AsmPrinter/Win64Exception.h
+++ b/lib/CodeGen/AsmPrinter/Win64Exception.h
@@ -17,7 +17,9 @@
#include "EHStreamer.h"
namespace llvm {
+class GlobalValue;
class MachineFunction;
+class MCExpr;
class Win64Exception : public EHStreamer {
/// Per-function flag to indicate if personality info should be emitted.
@@ -31,14 +33,17 @@ class Win64Exception : public EHStreamer {
void emitCSpecificHandlerTable();
- const MCSymbolRefExpr *createImageRel32(const MCSymbol *Value);
+ void emitCXXFrameHandler3Table(const MachineFunction *MF);
+
+ const MCExpr *createImageRel32(const MCSymbol *Value);
+ const MCExpr *createImageRel32(const GlobalValue *GV);
public:
//===--------------------------------------------------------------------===//
// Main entry points.
//
Win64Exception(AsmPrinter *A);
- virtual ~Win64Exception();
+ ~Win64Exception() override;
/// Emit all exception information that should come after the content.
void endModule() override;
diff --git a/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp b/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
index d2b4eec..276e7df 100644
--- a/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
+++ b/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
@@ -20,14 +20,13 @@ namespace llvm {
StringRef WinCodeViewLineTables::getFullFilepath(const MDNode *S) {
assert(S);
- DIDescriptor D(S);
- assert((D.isCompileUnit() || D.isFile() || D.isSubprogram() ||
- D.isLexicalBlockFile() || D.isLexicalBlock()) &&
+ assert((isa<MDCompileUnit>(S) || isa<MDFile>(S) || isa<MDSubprogram>(S) ||
+ isa<MDLexicalBlockBase>(S)) &&
"Unexpected scope info");
- DIScope Scope(S);
- StringRef Dir = Scope.getDirectory(),
- Filename = Scope.getFilename();
+ auto *Scope = cast<MDScope>(S);
+ StringRef Dir = Scope->getDirectory(),
+ Filename = Scope->getFilename();
char *&Result = DirAndFilenameToFilepathMap[std::make_pair(Dir, Filename)];
if (Result)
return Result;
@@ -40,7 +39,7 @@ StringRef WinCodeViewLineTables::getFullFilepath(const MDNode *S) {
if (Filename.find(':') == 1)
Filepath = Filename;
else
- Filepath = (Dir + Twine("\\") + Filename).str();
+ Filepath = (Dir + "\\" + Filename).str();
// Canonicalize the path. We have to do it textually because we may no longer
// have access the file in the filesystem.
@@ -81,7 +80,7 @@ StringRef WinCodeViewLineTables::getFullFilepath(const MDNode *S) {
void WinCodeViewLineTables::maybeRecordLocation(DebugLoc DL,
const MachineFunction *MF) {
- const MDNode *Scope = DL.getScope(MF->getFunction()->getContext());
+ const MDNode *Scope = DL.getScope();
if (!Scope)
return;
StringRef Filename = getFullFilepath(Scope);
@@ -193,7 +192,7 @@ void WinCodeViewLineTables::emitDebugInfoForFunction(const Function *GV) {
StringRef GVName = GV->getName();
StringRef FuncName;
if (DISubprogram SP = getDISubprogram(GV))
- FuncName = SP.getDisplayName();
+ FuncName = SP->getDisplayName();
// FIXME Clang currently sets DisplayName to "bar" for a C++
// "namespace_foo::bar" function, see PR21528. Luckily, dbghelp.dll is trying
@@ -330,7 +329,7 @@ void WinCodeViewLineTables::beginFunction(const MachineFunction *MF) {
DebugLoc PrologEndLoc;
bool EmptyPrologue = true;
for (const auto &MBB : *MF) {
- if (!PrologEndLoc.isUnknown())
+ if (PrologEndLoc)
break;
for (const auto &MI : MBB) {
if (MI.isDebugValue())
@@ -339,8 +338,7 @@ void WinCodeViewLineTables::beginFunction(const MachineFunction *MF) {
// First known non-DBG_VALUE and non-frame setup location marks
// the beginning of the function body.
// FIXME: do we need the first subcondition?
- if (!MI.getFlag(MachineInstr::FrameSetup) &&
- (!MI.getDebugLoc().isUnknown())) {
+ if (!MI.getFlag(MachineInstr::FrameSetup) && MI.getDebugLoc()) {
PrologEndLoc = MI.getDebugLoc();
break;
}
@@ -348,9 +346,8 @@ void WinCodeViewLineTables::beginFunction(const MachineFunction *MF) {
}
}
// Record beginning of function if we have a non-empty prologue.
- if (!PrologEndLoc.isUnknown() && !EmptyPrologue) {
- DebugLoc FnStartDL =
- PrologEndLoc.getFnDebugLoc(MF->getFunction()->getContext());
+ if (PrologEndLoc && !EmptyPrologue) {
+ DebugLoc FnStartDL = PrologEndLoc.getFnDebugLoc();
maybeRecordLocation(FnStartDL, MF);
}
}
@@ -377,7 +374,7 @@ void WinCodeViewLineTables::beginInstruction(const MachineInstr *MI) {
if (!Asm || MI->isDebugValue() || MI->getFlag(MachineInstr::FrameSetup))
return;
DebugLoc DL = MI->getDebugLoc();
- if (DL == PrevInstLoc || DL.isUnknown())
+ if (DL == PrevInstLoc || !DL)
return;
maybeRecordLocation(DL, Asm->MF);
}
diff --git a/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h b/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h
index 8492eac..c66d141 100644
--- a/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h
+++ b/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h
@@ -114,7 +114,7 @@ class WinCodeViewLineTables : public AsmPrinterHandler {
public:
WinCodeViewLineTables(AsmPrinter *Asm);
- ~WinCodeViewLineTables() {
+ ~WinCodeViewLineTables() override {
for (DirAndFilenameToFilepathMapTy::iterator
I = DirAndFilenameToFilepathMap.begin(),
E = DirAndFilenameToFilepathMap.end();
diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp
index 6c9d048..35376e1 100644
--- a/lib/CodeGen/CodeGenPrepare.cpp
+++ b/lib/CodeGen/CodeGenPrepare.cpp
@@ -693,11 +693,11 @@ static bool SinkCast(CastInst *CI) {
InsertedCast =
CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "",
InsertPt);
- MadeChange = true;
}
// Replace a use of the cast with a use of the new cast.
TheUse = InsertedCast;
+ MadeChange = true;
++NumCastUses;
}
@@ -747,13 +747,60 @@ static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
return SinkCast(CI);
}
-/// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce
+/// CombineUAddWithOverflow - try to combine CI into a call to the
+/// llvm.uadd.with.overflow intrinsic if possible.
+///
+/// Return true if any changes were made.
+static bool CombineUAddWithOverflow(CmpInst *CI) {
+ Value *A, *B;
+ Instruction *AddI;
+ if (!match(CI,
+ m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI))))
+ return false;
+
+ Type *Ty = AddI->getType();
+ if (!isa<IntegerType>(Ty))
+ return false;
+
+ // We don't want to move around uses of condition values this late, so we we
+ // check if it is legal to create the call to the intrinsic in the basic
+ // block containing the icmp:
+
+ if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse())
+ return false;
+
+#ifndef NDEBUG
+ // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption
+ // for now:
+ if (AddI->hasOneUse())
+ assert(*AddI->user_begin() == CI && "expected!");
+#endif
+
+ Module *M = CI->getParent()->getParent()->getParent();
+ Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty);
+
+ auto *InsertPt = AddI->hasOneUse() ? CI : AddI;
+
+ auto *UAddWithOverflow =
+ CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt);
+ auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt);
+ auto *Overflow =
+ ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt);
+
+ CI->replaceAllUsesWith(Overflow);
+ AddI->replaceAllUsesWith(UAdd);
+ CI->eraseFromParent();
+ AddI->eraseFromParent();
+ return true;
+}
+
+/// SinkCmpExpression - Sink the given CmpInst into user blocks to reduce
/// the number of virtual registers that must be created and coalesced. This is
/// a clear win except on targets with multiple condition code registers
/// (PowerPC), where it might lose; some adjustment may be wanted there.
///
/// Return true if any changes are made.
-static bool OptimizeCmpExpression(CmpInst *CI) {
+static bool SinkCmpExpression(CmpInst *CI) {
BasicBlock *DefBB = CI->getParent();
/// InsertedCmp - Only insert a cmp in each block once.
@@ -787,21 +834,33 @@ static bool OptimizeCmpExpression(CmpInst *CI) {
CmpInst::Create(CI->getOpcode(),
CI->getPredicate(), CI->getOperand(0),
CI->getOperand(1), "", InsertPt);
- MadeChange = true;
}
// Replace a use of the cmp with a use of the new cmp.
TheUse = InsertedCmp;
+ MadeChange = true;
++NumCmpUses;
}
// If we removed all uses, nuke the cmp.
- if (CI->use_empty())
+ if (CI->use_empty()) {
CI->eraseFromParent();
+ MadeChange = true;
+ }
return MadeChange;
}
+static bool OptimizeCmpExpression(CmpInst *CI) {
+ if (SinkCmpExpression(CI))
+ return true;
+
+ if (CombineUAddWithOverflow(CI))
+ return true;
+
+ return false;
+}
+
/// isExtractBitsCandidateUse - Check if the candidates could
/// be combined with shift instruction, which includes:
/// 1. Truncate instruction
@@ -1081,8 +1140,9 @@ static void ScalarizeMaskedLoad(CallInst *CI) {
//
CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.load");
Builder.SetInsertPoint(InsertPt);
-
- Value* Gep = Builder.CreateInBoundsGEP(FirstEltPtr, Builder.getInt32(Idx));
+
+ Value *Gep =
+ Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
LoadInst* Load = Builder.CreateLoad(Gep, false);
VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx));
@@ -1176,7 +1236,8 @@ static void ScalarizeMaskedStore(CallInst *CI) {
Builder.SetInsertPoint(InsertPt);
Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx));
- Value* Gep = Builder.CreateInBoundsGEP(FirstEltPtr, Builder.getInt32(Idx));
+ Value *Gep =
+ Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
Builder.CreateStore(OneElt, Gep);
// Create "else" block, fill it in the next iteration
@@ -1227,13 +1288,25 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI, bool& ModifiedDT) {
cast<PointerType>(Arg->getType())->getAddressSpace()), 0);
Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*TD, Offset);
uint64_t Offset2 = Offset.getLimitedValue();
+ if ((Offset2 & (PrefAlign-1)) != 0)
+ continue;
AllocaInst *AI;
- if ((Offset2 & (PrefAlign-1)) == 0 &&
- (AI = dyn_cast<AllocaInst>(Val)) &&
+ if ((AI = dyn_cast<AllocaInst>(Val)) &&
AI->getAlignment() < PrefAlign &&
TD->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
AI->setAlignment(PrefAlign);
- // TODO: Also align GlobalVariables
+ // Global variables can only be aligned if they are defined in this
+ // object (i.e. they are uniquely initialized in this object), and
+ // over-aligning global variables that have an explicit section is
+ // forbidden.
+ GlobalVariable *GV;
+ if ((GV = dyn_cast<GlobalVariable>(Val)) &&
+ GV->hasUniqueInitializer() &&
+ !GV->hasSection() &&
+ GV->getAlignment() < PrefAlign &&
+ TD->getTypeAllocSize(
+ GV->getType()->getElementType()) >= MinSize + Offset2)
+ GV->setAlignment(PrefAlign);
}
// If this is a memcpy (or similar) then we may be able to improve the
// alignment
@@ -1841,7 +1914,7 @@ class TypePromotionTransaction {
Inst->removeFromParent();
}
- ~InstructionRemover() { delete Replacer; }
+ ~InstructionRemover() override { delete Replacer; }
/// \brief Really remove the instruction.
void commit() override { delete Inst; }
@@ -3233,7 +3306,8 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
return false;
} else {
Type *I8PtrTy =
- Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace());
+ Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace());
+ Type *I8Ty = Builder.getInt8Ty();
// Start with the base register. Do this first so that subsequent address
// matching finds it last, which will prevent it from trying to match it
@@ -3285,7 +3359,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// SDAG consecutive load/store merging.
if (ResultPtr->getType() != I8PtrTy)
ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
- ResultPtr = Builder.CreateGEP(ResultPtr, ResultIndex, "sunkaddr");
+ ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
}
ResultIndex = V;
@@ -3296,7 +3370,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
} else {
if (ResultPtr->getType() != I8PtrTy)
ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
- SunkAddr = Builder.CreateGEP(ResultPtr, ResultIndex, "sunkaddr");
+ SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
}
if (SunkAddr->getType() != Addr->getType())
diff --git a/lib/CodeGen/CriticalAntiDepBreaker.h b/lib/CodeGen/CriticalAntiDepBreaker.h
index ceef74d..af011a0 100644
--- a/lib/CodeGen/CriticalAntiDepBreaker.h
+++ b/lib/CodeGen/CriticalAntiDepBreaker.h
@@ -69,7 +69,7 @@ class TargetRegisterInfo;
public:
CriticalAntiDepBreaker(MachineFunction& MFi, const RegisterClassInfo&);
- ~CriticalAntiDepBreaker();
+ ~CriticalAntiDepBreaker() override;
/// Initialize anti-dep breaking for a new basic block.
void StartBlock(MachineBasicBlock *BB) override;
diff --git a/lib/CodeGen/EarlyIfConversion.cpp b/lib/CodeGen/EarlyIfConversion.cpp
index 8f74271..6cde4c2 100644
--- a/lib/CodeGen/EarlyIfConversion.cpp
+++ b/lib/CodeGen/EarlyIfConversion.cpp
@@ -797,9 +797,8 @@ bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) {
// if-conversion in a single pass. The tryConvertIf() function may erase
// blocks, but only blocks dominated by the head block. This makes it safe to
// update the dominator tree while the post-order iterator is still active.
- for (po_iterator<MachineDominatorTree*>
- I = po_begin(DomTree), E = po_end(DomTree); I != E; ++I)
- if (tryConvertIf(I->getBlock()))
+ for (auto DomNode : post_order(DomTree))
+ if (tryConvertIf(DomNode->getBlock()))
Changed = true;
return Changed;
diff --git a/lib/CodeGen/GCMetadata.cpp b/lib/CodeGen/GCMetadata.cpp
index a2c5fce..16cd9e8 100644
--- a/lib/CodeGen/GCMetadata.cpp
+++ b/lib/CodeGen/GCMetadata.cpp
@@ -99,10 +99,6 @@ void Printer::getAnalysisUsage(AnalysisUsage &AU) const {
static const char *DescKind(GC::PointKind Kind) {
switch (Kind) {
- case GC::Loop:
- return "loop";
- case GC::Return:
- return "return";
case GC::PreCall:
return "pre-call";
case GC::PostCall:
diff --git a/lib/CodeGen/GCRootLowering.cpp b/lib/CodeGen/GCRootLowering.cpp
index 9d38e4c..ac35165 100644
--- a/lib/CodeGen/GCRootLowering.cpp
+++ b/lib/CodeGen/GCRootLowering.cpp
@@ -332,19 +332,22 @@ bool GCMachineCodeAnalysis::runOnMachineFunction(MachineFunction &MF) {
return false;
FI = &getAnalysis<GCModuleInfo>().getFunctionInfo(*MF.getFunction());
- if (!FI->getStrategy().needsSafePoints())
- return false;
-
MMI = &getAnalysis<MachineModuleInfo>();
TII = MF.getSubtarget().getInstrInfo();
- // Find the size of the stack frame.
- FI->setFrameSize(MF.getFrameInfo()->getStackSize());
+ // Find the size of the stack frame. There may be no correct static frame
+ // size, we use UINT64_MAX to represent this.
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
+ const bool DynamicFrameSize = MFI->hasVarSizedObjects() ||
+ RegInfo->needsStackRealignment(MF);
+ FI->setFrameSize(DynamicFrameSize ? UINT64_MAX : MFI->getStackSize());
// Find all safe points.
- FindSafePoints(MF);
+ if (FI->getStrategy().needsSafePoints())
+ FindSafePoints(MF);
- // Find the stack offsets for all roots.
+ // Find the concrete stack offsets for all roots (stack slots)
FindStackOffsets(MF);
return false;
diff --git a/lib/CodeGen/GlobalMerge.cpp b/lib/CodeGen/GlobalMerge.cpp
index 4188e5d..153ba1a 100644
--- a/lib/CodeGen/GlobalMerge.cpp
+++ b/lib/CodeGen/GlobalMerge.cpp
@@ -73,9 +73,10 @@ using namespace llvm;
#define DEBUG_TYPE "global-merge"
+// FIXME: This is only useful as a last-resort way to disable the pass.
cl::opt<bool>
EnableGlobalMerge("enable-global-merge", cl::Hidden,
- cl::desc("Enable global merge pass"),
+ cl::desc("Enable the global merge pass"),
cl::init(true));
static cl::opt<bool>
@@ -222,7 +223,8 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
ConstantInt::get(Int32Ty, 0),
ConstantInt::get(Int32Ty, k-i)
};
- Constant *GEP = ConstantExpr::getInBoundsGetElementPtr(MergedGV, Idx);
+ Constant *GEP =
+ ConstantExpr::getInBoundsGetElementPtr(MergedTy, MergedGV, Idx);
Globals[k]->replaceAllUsesWith(GEP);
Globals[k]->eraseFromParent();
diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp
index f0d407f..c7e7e58 100644
--- a/lib/CodeGen/InlineSpiller.cpp
+++ b/lib/CodeGen/InlineSpiller.cpp
@@ -135,7 +135,7 @@ private:
// Dead defs generated during spilling.
SmallVector<MachineInstr*, 8> DeadDefs;
- ~InlineSpiller() {}
+ ~InlineSpiller() override {}
public:
InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
@@ -1232,6 +1232,8 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
DebugLoc DL = MI->getDebugLoc();
DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
MachineBasicBlock *MBB = MI->getParent();
+ assert(cast<MDLocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
+ "Expected inlined-at fields to agree");
BuildMI(*MBB, MBB->erase(MI), DL, TII.get(TargetOpcode::DBG_VALUE))
.addFrameIndex(StackSlot)
.addImm(Offset)
diff --git a/lib/CodeGen/LLVMTargetMachine.cpp b/lib/CodeGen/LLVMTargetMachine.cpp
index 0fb0c46..61d68f6 100644
--- a/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/lib/CodeGen/LLVMTargetMachine.cpp
@@ -140,12 +140,9 @@ static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
return &MMI->getContext();
}
-bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
- formatted_raw_ostream &Out,
- CodeGenFileType FileType,
- bool DisableVerify,
- AnalysisID StartAfter,
- AnalysisID StopAfter) {
+bool LLVMTargetMachine::addPassesToEmitFile(
+ PassManagerBase &PM, raw_pwrite_stream &Out, CodeGenFileType FileType,
+ bool DisableVerify, AnalysisID StartAfter, AnalysisID StopAfter) {
// Add common CodeGen passes.
MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify,
StartAfter, StopAfter);
@@ -175,7 +172,7 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
switch (FileType) {
case CGFT_AssemblyFile: {
MCInstPrinter *InstPrinter = getTarget().createMCInstPrinter(
- MAI.getAssemblerDialect(), MAI, MII, MRI, STI);
+ Triple(getTargetTriple()), MAI.getAssemblerDialect(), MAI, MII, MRI);
// Create a code emitter if asked to show the encoding.
MCCodeEmitter *MCE = nullptr;
@@ -184,8 +181,9 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
MCAsmBackend *MAB = getTarget().createMCAsmBackend(MRI, getTargetTriple(),
TargetCPU);
+ auto FOut = llvm::make_unique<formatted_raw_ostream>(Out);
MCStreamer *S = getTarget().createAsmStreamer(
- *Context, Out, Options.MCOptions.AsmVerbose,
+ *Context, std::move(FOut), Options.MCOptions.AsmVerbose,
Options.MCOptions.MCUseDwarfDirectory, InstPrinter, MCE, MAB,
Options.MCOptions.ShowMCInst);
AsmStreamer.reset(S);
@@ -229,9 +227,8 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
/// code is not supported. It fills the MCContext Ctx pointer which can be
/// used to build custom MCStreamer.
///
-bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM,
- MCContext *&Ctx,
- raw_ostream &Out,
+bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM, MCContext *&Ctx,
+ raw_pwrite_stream &Out,
bool DisableVerify) {
// Add common CodeGen passes.
Ctx = addPassesToGenerateCode(this, PM, DisableVerify, nullptr, nullptr);
diff --git a/lib/CodeGen/LexicalScopes.cpp b/lib/CodeGen/LexicalScopes.cpp
index 9eaf7da..d6998d6 100644
--- a/lib/CodeGen/LexicalScopes.cpp
+++ b/lib/CodeGen/LexicalScopes.cpp
@@ -59,11 +59,11 @@ void LexicalScopes::extractLexicalScopes(
for (const auto &MBB : *MF) {
const MachineInstr *RangeBeginMI = nullptr;
const MachineInstr *PrevMI = nullptr;
- DebugLoc PrevDL;
+ const MDLocation *PrevDL = nullptr;
for (const auto &MInsn : MBB) {
// Check if instruction has valid location information.
- const DebugLoc MIDL = MInsn.getDebugLoc();
- if (MIDL.isUnknown()) {
+ const MDLocation *MIDL = MInsn.getDebugLoc();
+ if (!MIDL) {
PrevMI = &MInsn;
continue;
}
@@ -96,7 +96,7 @@ void LexicalScopes::extractLexicalScopes(
}
// Create last instruction range.
- if (RangeBeginMI && PrevMI && !PrevDL.isUnknown()) {
+ if (RangeBeginMI && PrevMI && PrevDL) {
InsnRange R(RangeBeginMI, PrevMI);
MIRanges.push_back(R);
MI2ScopeMap[RangeBeginMI] = getOrCreateLexicalScope(PrevDL);
@@ -106,20 +106,17 @@ void LexicalScopes::extractLexicalScopes(
/// findLexicalScope - Find lexical scope, either regular or inlined, for the
/// given DebugLoc. Return NULL if not found.
-LexicalScope *LexicalScopes::findLexicalScope(DebugLoc DL) {
- MDNode *Scope = nullptr;
- MDNode *IA = nullptr;
- DL.getScopeAndInlinedAt(Scope, IA, MF->getFunction()->getContext());
+LexicalScope *LexicalScopes::findLexicalScope(const MDLocation *DL) {
+ MDLocalScope *Scope = DL->getScope();
if (!Scope)
return nullptr;
// The scope that we were created with could have an extra file - which
// isn't what we care about in this case.
- DIDescriptor D = DIDescriptor(Scope);
- if (D.isLexicalBlockFile())
- Scope = DILexicalBlockFile(Scope).getScope();
+ if (auto *File = dyn_cast<MDLexicalBlockFile>(Scope))
+ Scope = File->getScope();
- if (IA) {
+ if (auto *IA = DL->getInlinedAt()) {
auto I = InlinedLexicalScopeMap.find(std::make_pair(Scope, IA));
return I != InlinedLexicalScopeMap.end() ? &I->second : nullptr;
}
@@ -128,46 +125,39 @@ LexicalScope *LexicalScopes::findLexicalScope(DebugLoc DL) {
/// getOrCreateLexicalScope - Find lexical scope for the given DebugLoc. If
/// not available then create new lexical scope.
-LexicalScope *LexicalScopes::getOrCreateLexicalScope(DebugLoc DL) {
- if (DL.isUnknown())
- return nullptr;
- MDNode *Scope = nullptr;
- MDNode *InlinedAt = nullptr;
- DL.getScopeAndInlinedAt(Scope, InlinedAt, MF->getFunction()->getContext());
-
- if (InlinedAt) {
+LexicalScope *LexicalScopes::getOrCreateLexicalScope(const MDLocalScope *Scope,
+ const MDLocation *IA) {
+ if (IA) {
// Create an abstract scope for inlined function.
getOrCreateAbstractScope(Scope);
// Create an inlined scope for inlined function.
- return getOrCreateInlinedScope(Scope, InlinedAt);
+ return getOrCreateInlinedScope(Scope, IA);
}
return getOrCreateRegularScope(Scope);
}
/// getOrCreateRegularScope - Find or create a regular lexical scope.
-LexicalScope *LexicalScopes::getOrCreateRegularScope(MDNode *Scope) {
- DIDescriptor D = DIDescriptor(Scope);
- if (D.isLexicalBlockFile()) {
- Scope = DILexicalBlockFile(Scope).getScope();
- D = DIDescriptor(Scope);
- }
+LexicalScope *
+LexicalScopes::getOrCreateRegularScope(const MDLocalScope *Scope) {
+ if (auto *File = dyn_cast<MDLexicalBlockFile>(Scope))
+ Scope = File->getScope();
auto I = LexicalScopeMap.find(Scope);
if (I != LexicalScopeMap.end())
return &I->second;
+ // FIXME: Should the following dyn_cast be MDLexicalBlock?
LexicalScope *Parent = nullptr;
- if (D.isLexicalBlock())
- Parent = getOrCreateLexicalScope(DebugLoc::getFromDILexicalBlock(Scope));
+ if (auto *Block = dyn_cast<MDLexicalBlockBase>(Scope))
+ Parent = getOrCreateLexicalScope(Block->getScope());
I = LexicalScopeMap.emplace(std::piecewise_construct,
std::forward_as_tuple(Scope),
- std::forward_as_tuple(Parent, DIDescriptor(Scope),
- nullptr, false)).first;
+ std::forward_as_tuple(Parent, Scope, nullptr,
+ false)).first;
if (!Parent) {
- assert(DIDescriptor(Scope).isSubprogram());
- assert(DISubprogram(Scope).describes(MF->getFunction()));
+ assert(cast<MDSubprogram>(Scope)->describes(MF->getFunction()));
assert(!CurrentFnLexicalScope);
CurrentFnLexicalScope = &I->second;
}
@@ -176,19 +166,19 @@ LexicalScope *LexicalScopes::getOrCreateRegularScope(MDNode *Scope) {
}
/// getOrCreateInlinedScope - Find or create an inlined lexical scope.
-LexicalScope *LexicalScopes::getOrCreateInlinedScope(MDNode *ScopeNode,
- MDNode *InlinedAt) {
- std::pair<const MDNode*, const MDNode*> P(ScopeNode, InlinedAt);
+LexicalScope *
+LexicalScopes::getOrCreateInlinedScope(const MDLocalScope *Scope,
+ const MDLocation *InlinedAt) {
+ std::pair<const MDLocalScope *, const MDLocation *> P(Scope, InlinedAt);
auto I = InlinedLexicalScopeMap.find(P);
if (I != InlinedLexicalScopeMap.end())
return &I->second;
LexicalScope *Parent;
- DILexicalBlock Scope(ScopeNode);
- if (Scope.isSubprogram())
- Parent = getOrCreateLexicalScope(DebugLoc::getFromDILocation(InlinedAt));
+ if (auto *Block = dyn_cast<MDLexicalBlockBase>(Scope))
+ Parent = getOrCreateInlinedScope(Block->getScope(), InlinedAt);
else
- Parent = getOrCreateInlinedScope(Scope.getContext(), InlinedAt);
+ Parent = getOrCreateLexicalScope(InlinedAt);
I = InlinedLexicalScopeMap.emplace(std::piecewise_construct,
std::forward_as_tuple(P),
@@ -199,27 +189,26 @@ LexicalScope *LexicalScopes::getOrCreateInlinedScope(MDNode *ScopeNode,
}
/// getOrCreateAbstractScope - Find or create an abstract lexical scope.
-LexicalScope *LexicalScopes::getOrCreateAbstractScope(const MDNode *N) {
- assert(N && "Invalid Scope encoding!");
+LexicalScope *
+LexicalScopes::getOrCreateAbstractScope(const MDLocalScope *Scope) {
+ assert(Scope && "Invalid Scope encoding!");
- DIDescriptor Scope(N);
- if (Scope.isLexicalBlockFile())
- Scope = DILexicalBlockFile(Scope).getScope();
+ if (auto *File = dyn_cast<MDLexicalBlockFile>(Scope))
+ Scope = File->getScope();
auto I = AbstractScopeMap.find(Scope);
if (I != AbstractScopeMap.end())
return &I->second;
+ // FIXME: Should the following isa be MDLexicalBlock?
LexicalScope *Parent = nullptr;
- if (Scope.isLexicalBlock()) {
- DILexicalBlock DB(Scope);
- DIDescriptor ParentDesc = DB.getContext();
- Parent = getOrCreateAbstractScope(ParentDesc);
- }
+ if (auto *Block = dyn_cast<MDLexicalBlockBase>(Scope))
+ Parent = getOrCreateAbstractScope(Block->getScope());
+
I = AbstractScopeMap.emplace(std::piecewise_construct,
std::forward_as_tuple(Scope),
std::forward_as_tuple(Parent, Scope,
nullptr, true)).first;
- if (Scope.isSubprogram())
+ if (isa<MDSubprogram>(Scope))
AbstractScopesList.push_back(&I->second);
return &I->second;
}
@@ -280,7 +269,7 @@ void LexicalScopes::assignInstructionRanges(
/// have machine instructions that belong to lexical scope identified by
/// DebugLoc.
void LexicalScopes::getMachineBasicBlocks(
- DebugLoc DL, SmallPtrSetImpl<const MachineBasicBlock *> &MBBs) {
+ const MDLocation *DL, SmallPtrSetImpl<const MachineBasicBlock *> &MBBs) {
MBBs.clear();
LexicalScope *Scope = getOrCreateLexicalScope(DL);
if (!Scope)
@@ -303,7 +292,7 @@ void LexicalScopes::getMachineBasicBlocks(
/// dominates - Return true if DebugLoc's lexical scope dominates at least one
/// machine instruction's lexical scope in a given machine basic block.
-bool LexicalScopes::dominates(DebugLoc DL, MachineBasicBlock *MBB) {
+bool LexicalScopes::dominates(const MDLocation *DL, MachineBasicBlock *MBB) {
LexicalScope *Scope = getOrCreateLexicalScope(DL);
if (!Scope)
return false;
@@ -315,12 +304,10 @@ bool LexicalScopes::dominates(DebugLoc DL, MachineBasicBlock *MBB) {
bool Result = false;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
++I) {
- DebugLoc IDL = I->getDebugLoc();
- if (IDL.isUnknown())
- continue;
- if (LexicalScope *IScope = getOrCreateLexicalScope(IDL))
- if (Scope->dominates(IScope))
- return true;
+ if (const MDLocation *IDL = I->getDebugLoc())
+ if (LexicalScope *IScope = getOrCreateLexicalScope(IDL))
+ if (Scope->dominates(IScope))
+ return true;
}
return Result;
}
diff --git a/lib/CodeGen/LiveDebugVariables.cpp b/lib/CodeGen/LiveDebugVariables.cpp
index e3791be..c2993db 100644
--- a/lib/CodeGen/LiveDebugVariables.cpp
+++ b/lib/CodeGen/LiveDebugVariables.cpp
@@ -158,10 +158,10 @@ public:
UserValue *getNext() const { return next; }
/// match - Does this UserValue match the parameters?
- bool match(const MDNode *Var, const MDNode *Expr, unsigned Offset,
- bool indirect) const {
- return Var == Variable && Expr == Expression && Offset == offset &&
- indirect == IsIndirect;
+ bool match(const MDNode *Var, const MDNode *Expr, const MDLocation *IA,
+ unsigned Offset, bool indirect) const {
+ return Var == Variable && Expr == Expression && dl->getInlinedAt() == IA &&
+ Offset == offset && indirect == IsIndirect;
}
/// merge - Merge equivalence classes.
@@ -269,12 +269,6 @@ public:
void emitDebugValues(VirtRegMap *VRM,
LiveIntervals &LIS, const TargetInstrInfo &TRI);
- /// findDebugLoc - Return DebugLoc used for this DBG_VALUE instruction. A
- /// variable may have more than one corresponding DBG_VALUE instructions.
- /// Only first one needs DebugLoc to identify variable's lexical scope
- /// in source file.
- DebugLoc findDebugLoc();
-
/// getDebugLoc - Return DebugLoc of this UserValue.
DebugLoc getDebugLoc() { return dl;}
void print(raw_ostream &, const TargetRegisterInfo *);
@@ -363,10 +357,47 @@ public:
};
} // namespace
+static void printDebugLoc(DebugLoc DL, raw_ostream &CommentOS,
+ const LLVMContext &Ctx) {
+ if (!DL)
+ return;
+
+ auto *Scope = cast<MDScope>(DL.getScope());
+ // Omit the directory, because it's likely to be long and uninteresting.
+ CommentOS << Scope->getFilename();
+ CommentOS << ':' << DL.getLine();
+ if (DL.getCol() != 0)
+ CommentOS << ':' << DL.getCol();
+
+ DebugLoc InlinedAtDL = DL.getInlinedAt();
+ if (!InlinedAtDL)
+ return;
+
+ CommentOS << " @[ ";
+ printDebugLoc(InlinedAtDL, CommentOS, Ctx);
+ CommentOS << " ]";
+}
+
+static void printExtendedName(raw_ostream &OS, const MDLocalVariable *V,
+ const MDLocation *DL) {
+ const LLVMContext &Ctx = V->getContext();
+ StringRef Res = V->getName();
+ if (!Res.empty())
+ OS << Res << "," << V->getLine();
+ if (auto *InlinedAt = DL->getInlinedAt()) {
+ if (DebugLoc InlinedAtDL = InlinedAt) {
+ OS << " @[";
+ printDebugLoc(InlinedAtDL, OS, Ctx);
+ OS << "]";
+ }
+ }
+}
+
void UserValue::print(raw_ostream &OS, const TargetRegisterInfo *TRI) {
- DIVariable DV(Variable);
+ DIVariable DV = cast<MDLocalVariable>(Variable);
OS << "!\"";
- DV.printExtendedName(OS);
+ printExtendedName(OS, DV, dl);
+
OS << "\"\t";
if (offset)
OS << '+' << offset;
@@ -433,7 +464,7 @@ UserValue *LDVImpl::getUserValue(const MDNode *Var, const MDNode *Expr,
UserValue *UV = Leader->getLeader();
Leader = UV;
for (; UV; UV = UV->getNext())
- if (UV->match(Var, Expr, Offset, IsIndirect))
+ if (UV->match(Var, Expr, DL->getInlinedAt(), Offset, IsIndirect))
return UV;
}
@@ -942,11 +973,6 @@ findInsertLocation(MachineBasicBlock *MBB, SlotIndex Idx,
std::next(MachineBasicBlock::iterator(MI));
}
-DebugLoc UserValue::findDebugLoc() {
- DebugLoc D = dl;
- dl = DebugLoc();
- return D;
-}
void UserValue::insertDebugValue(MachineBasicBlock *MBB, SlotIndex Idx,
unsigned LocNo,
LiveIntervals &LIS,
@@ -955,11 +981,14 @@ void UserValue::insertDebugValue(MachineBasicBlock *MBB, SlotIndex Idx,
MachineOperand &Loc = locations[LocNo];
++NumInsertedDebugValues;
+ assert(cast<MDLocalVariable>(Variable)
+ ->isValidLocationForIntrinsic(getDebugLoc()) &&
+ "Expected inlined-at fields to agree");
if (Loc.isReg())
- BuildMI(*MBB, I, findDebugLoc(), TII.get(TargetOpcode::DBG_VALUE),
+ BuildMI(*MBB, I, getDebugLoc(), TII.get(TargetOpcode::DBG_VALUE),
IsIndirect, Loc.getReg(), offset, Variable, Expression);
else
- BuildMI(*MBB, I, findDebugLoc(), TII.get(TargetOpcode::DBG_VALUE))
+ BuildMI(*MBB, I, getDebugLoc(), TII.get(TargetOpcode::DBG_VALUE))
.addOperand(Loc)
.addImm(offset)
.addMetadata(Variable)
diff --git a/lib/CodeGen/LiveDebugVariables.h b/lib/CodeGen/LiveDebugVariables.h
index 9748329..fe296bc 100644
--- a/lib/CodeGen/LiveDebugVariables.h
+++ b/lib/CodeGen/LiveDebugVariables.h
@@ -38,7 +38,7 @@ public:
static char ID; // Pass identification, replacement for typeid
LiveDebugVariables();
- ~LiveDebugVariables();
+ ~LiveDebugVariables() override;
/// renameRegister - Move any user variables in OldReg to NewReg:SubIdx.
/// @param OldReg Old virtual register that is going away.
diff --git a/lib/CodeGen/LiveInterval.cpp b/lib/CodeGen/LiveInterval.cpp
index 2afd7fa..d75e441 100644
--- a/lib/CodeGen/LiveInterval.cpp
+++ b/lib/CodeGen/LiveInterval.cpp
@@ -816,23 +816,45 @@ static VNInfo *searchForVNI(const SlotIndexes &Indexes, LiveRange &LR,
static void determineMissingVNIs(const SlotIndexes &Indexes, LiveInterval &LI) {
SmallPtrSet<const MachineBasicBlock*, 5> Visited;
- for (LiveRange::Segment &S : LI.segments) {
- if (S.valno != nullptr)
- continue;
- // This can only happen at the begin of a basic block.
- assert(S.start.isBlock() && "valno should only be missing at block begin");
-
- Visited.clear();
- const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(S.start);
- for (const MachineBasicBlock *Pred : MBB->predecessors()) {
- VNInfo *VNI = searchForVNI(Indexes, LI, Pred, Visited);
- if (VNI != nullptr) {
- S.valno = VNI;
- break;
+
+ LiveRange::iterator OutIt;
+ VNInfo *PrevValNo = nullptr;
+ for (LiveRange::iterator I = LI.begin(), E = LI.end(); I != E; ++I) {
+ LiveRange::Segment &S = *I;
+ // Determine final VNI if necessary.
+ if (S.valno == nullptr) {
+ // This can only happen at the begin of a basic block.
+ assert(S.start.isBlock() && "valno should only be missing at block begin");
+
+ Visited.clear();
+ const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(S.start);
+ for (const MachineBasicBlock *Pred : MBB->predecessors()) {
+ VNInfo *VNI = searchForVNI(Indexes, LI, Pred, Visited);
+ if (VNI != nullptr) {
+ S.valno = VNI;
+ break;
+ }
}
+ assert(S.valno != nullptr && "could not determine valno");
+ }
+ // Merge with previous segment if it has the same VNI.
+ if (PrevValNo == S.valno && OutIt->end == S.start) {
+ OutIt->end = S.end;
+ } else {
+ // Didn't merge. Move OutIt to next segment.
+ if (PrevValNo == nullptr)
+ OutIt = LI.begin();
+ else
+ ++OutIt;
+
+ if (OutIt != I)
+ *OutIt = *I;
+ PrevValNo = S.valno;
}
- assert(S.valno != nullptr && "could not determine valno");
}
+ // If we merged some segments chop off the end.
+ ++OutIt;
+ LI.segments.erase(OutIt, LI.end());
}
void LiveInterval::constructMainRangeFromSubranges(
@@ -955,6 +977,12 @@ void LiveInterval::constructMainRangeFromSubranges(
NeedVNIFixup = true;
}
+ // In rare cases we can produce adjacent segments with the same value
+ // number (if they come from different subranges, but happen to have
+ // the same defining instruction). VNIFixup will fix those cases.
+ if (!empty() && segments.back().end == Pos &&
+ segments.back().valno == VNI)
+ NeedVNIFixup = true;
CurrentSegment.start = Pos;
CurrentSegment.valno = VNI;
ConstructingSegment = true;
diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp
index 98359b1..34131bb 100644
--- a/lib/CodeGen/MachineBasicBlock.cpp
+++ b/lib/CodeGen/MachineBasicBlock.cpp
@@ -250,7 +250,7 @@ std::string MachineBasicBlock::getFullName() const {
if (getBasicBlock())
Name += getBasicBlock()->getName();
else
- Name += (Twine("BB") + Twine(getNumber())).str();
+ Name += ("BB" + Twine(getNumber())).str();
return Name;
}
diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp
index ecc50c9..2969bad 100644
--- a/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/lib/CodeGen/MachineBlockPlacement.cpp
@@ -661,7 +661,7 @@ MachineBlockPlacement::findBestLoopExit(MachineFunction &F, MachineLoop &L,
for (MachineBasicBlock *MBB : L.getBlocks()) {
BlockChain &Chain = *BlockToChain[MBB];
// Ensure that this block is at the end of a chain; otherwise it could be
- // mid-way through an inner loop or a successor of an analyzable branch.
+ // mid-way through an inner loop or a successor of an unanalyzable branch.
if (MBB != *std::prev(Chain.end()))
continue;
@@ -715,7 +715,7 @@ MachineBlockPlacement::findBestLoopExit(MachineFunction &F, MachineLoop &L,
// a frequency higher than the current exit before we consider breaking
// the layout.
BranchProbability Bias(100 - ExitBlockBias, 100);
- if (!ExitingBB || BestExitLoopDepth < SuccLoopDepth ||
+ if (!ExitingBB || SuccLoopDepth > BestExitLoopDepth ||
ExitEdgeFreq > BestExitEdgeFreq ||
(MBB->isLayoutSuccessor(Succ) &&
!(ExitEdgeFreq < BestExitEdgeFreq * Bias))) {
@@ -724,8 +724,8 @@ MachineBlockPlacement::findBestLoopExit(MachineFunction &F, MachineLoop &L,
}
}
- // Restore the old exiting state, no viable looping successor was found.
if (!HasLoopingSucc) {
+ // Restore the old exiting state, no viable looping successor was found.
ExitingBB = OldExitingBB;
BestExitEdgeFreq = OldBestExitEdgeFreq;
continue;
diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp
index 6ceace8..448531f 100644
--- a/lib/CodeGen/MachineFunction.cpp
+++ b/lib/CodeGen/MachineFunction.cpp
@@ -380,7 +380,7 @@ namespace llvm {
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getGraphName(const MachineFunction *F) {
- return "CFG for '" + F->getName().str() + "' function";
+ return ("CFG for '" + F->getName() + "' function").str();
}
std::string getNodeLabel(const MachineBasicBlock *Node,
@@ -468,7 +468,7 @@ MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
SmallString<60> Name;
raw_svector_ostream(Name)
<< Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
- return Ctx.GetOrCreateSymbol(Name.str());
+ return Ctx.GetOrCreateSymbol(Name);
}
/// getPICBaseSymbol - Return a function-local symbol to represent the PIC
diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp
index 1240efb..d154110 100644
--- a/lib/CodeGen/MachineInstr.cpp
+++ b/lib/CodeGen/MachineInstr.cpp
@@ -881,8 +881,8 @@ bool MachineInstr::isIdenticalTo(const MachineInstr *Other,
}
// If DebugLoc does not match then two dbg.values are not identical.
if (isDebugValue())
- if (!getDebugLoc().isUnknown() && !Other->getDebugLoc().isUnknown()
- && getDebugLoc() != Other->getDebugLoc())
+ if (getDebugLoc() && Other->getDebugLoc() &&
+ getDebugLoc() != Other->getDebugLoc())
return false;
return true;
}
@@ -1619,12 +1619,9 @@ void MachineInstr::print(raw_ostream &OS, bool SkipOpers) const {
}
if (isDebugValue() && MO.isMetadata()) {
// Pretty print DBG_VALUE instructions.
- const MDNode *MD = MO.getMetadata();
- DIDescriptor DI(MD);
- DIVariable DIV(MD);
-
- if (DI.isVariable() && !DIV.getName().empty())
- OS << "!\"" << DIV.getName() << '\"';
+ DIVariable DIV = dyn_cast<MDLocalVariable>(MO.getMetadata());
+ if (DIV && !DIV->getName().empty())
+ OS << "!\"" << DIV->getName() << '\"';
else
MO.print(OS, TRI);
} else if (TRI && (isInsertSubreg() || isRegSequence()) && MO.isImm()) {
@@ -1711,13 +1708,13 @@ void MachineInstr::print(raw_ostream &OS, bool SkipOpers) const {
}
// Print debug location information.
- if (isDebugValue() && getOperand(e - 1).isMetadata()) {
+ if (isDebugValue() && getOperand(e - 2).isMetadata()) {
if (!HaveSemi) OS << ";";
- DIVariable DV(getOperand(e - 1).getMetadata());
- OS << " line no:" << DV.getLineNumber();
- if (MDNode *InlinedAt = DV.getInlinedAt()) {
- DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(InlinedAt);
- if (!InlinedAtDL.isUnknown() && MF) {
+ DIVariable DV = cast<MDLocalVariable>(getOperand(e - 2).getMetadata());
+ OS << " line no:" << DV->getLine();
+ if (auto *InlinedAt = debugLoc->getInlinedAt()) {
+ DebugLoc InlinedAtDL(InlinedAt);
+ if (InlinedAtDL && MF) {
OS << " inlined @[ ";
InlinedAtDL.print(OS);
OS << " ]";
@@ -1725,7 +1722,7 @@ void MachineInstr::print(raw_ostream &OS, bool SkipOpers) const {
}
if (isIndirectDebugValue())
OS << " indirect";
- } else if (!debugLoc.isUnknown() && MF) {
+ } else if (debugLoc && MF) {
if (!HaveSemi) OS << ";";
OS << " dbg:";
debugLoc.print(OS);
diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp
index 2f65a2e..9756f13 100644
--- a/lib/CodeGen/MachineLICM.cpp
+++ b/lib/CodeGen/MachineLICM.cpp
@@ -10,10 +10,6 @@
// This pass performs loop invariant code motion on machine instructions. We
// attempt to remove as much code from the body of a loop as possible.
//
-// This pass does not attempt to throttle itself to limit register pressure.
-// The register allocation phases are expected to perform rematerialization
-// to recover when register pressure is high.
-//
// This pass is not intended to be a replacement or a complete alternative
// for the LLVM-IR-level LICM pass. It is only designed to hoist simple
// constructs that are not exposed before lowering and instruction selection.
@@ -104,7 +100,7 @@ namespace {
SmallSet<unsigned, 32> RegSeen;
SmallVector<unsigned, 8> RegPressure;
- // Register pressure "limit" per register class. If the pressure
+ // Register pressure "limit" per register pressure set. If the pressure
// is higher than the limit, then it's considered high.
SmallVector<unsigned, 8> RegLimit;
@@ -214,7 +210,8 @@ namespace {
/// CanCauseHighRegPressure - Visit BBs from header to current BB,
/// check if hoisting an instruction of the given cost matrix can cause high
/// register pressure.
- bool CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost, bool Cheap);
+ bool CanCauseHighRegPressure(const DenseMap<unsigned, int> &Cost,
+ bool Cheap);
/// UpdateBackTraceRegPressure - Traverse the back trace from header to
/// the current block and update their register pressures to reflect the
@@ -254,21 +251,25 @@ namespace {
/// if there is little to no overhead moving instructions into loops.
void SinkIntoLoop();
- /// getRegisterClassIDAndCost - For a given MI, register, and the operand
- /// index, return the ID and cost of its representative register class by
- /// reference.
- void getRegisterClassIDAndCost(const MachineInstr *MI,
- unsigned Reg, unsigned OpIdx,
- unsigned &RCId, unsigned &RCCost) const;
-
/// InitRegPressure - Find all virtual register references that are liveout
/// of the preheader to initialize the starting "register pressure". Note
/// this does not count live through (livein but not used) registers.
void InitRegPressure(MachineBasicBlock *BB);
+ /// calcRegisterCost - Calculate the additional register pressure that the
+ /// registers used in MI cause.
+ ///
+ /// If 'ConsiderSeen' is true, updates 'RegSeen' and uses the information to
+ /// figure out which usages are live-ins.
+ /// FIXME: Figure out a way to consider 'RegSeen' from all code paths.
+ DenseMap<unsigned, int> calcRegisterCost(const MachineInstr *MI,
+ bool ConsiderSeen,
+ bool ConsiderUnseenAsDef);
+
/// UpdateRegPressure - Update estimate of register pressure after the
/// specified instruction.
- void UpdateRegPressure(const MachineInstr *MI);
+ void UpdateRegPressure(const MachineInstr *MI,
+ bool ConsiderUnseenAsDef = false);
/// ExtractHoistableLoad - Unfold a load from the given machineinstr if
/// the load itself could be hoisted. Return the unfolded and hoistable
@@ -354,13 +355,12 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
if (PreRegAlloc) {
// Estimate register pressure during pre-regalloc pass.
- unsigned NumRC = TRI->getNumRegClasses();
- RegPressure.resize(NumRC);
+ unsigned NumRPS = TRI->getNumRegPressureSets();
+ RegPressure.resize(NumRPS);
std::fill(RegPressure.begin(), RegPressure.end(), 0);
- RegLimit.resize(NumRC);
- for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
- E = TRI->regclass_end(); I != E; ++I)
- RegLimit[(*I)->getID()] = TRI->getRegPressureLimit(*I, MF);
+ RegLimit.resize(NumRPS);
+ for (unsigned i = 0, e = NumRPS; i != e; ++i)
+ RegLimit[i] = TRI->getRegPressureSetLimit(MF, i);
}
// Get our Loop information...
@@ -836,23 +836,6 @@ static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) {
return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg());
}
-/// getRegisterClassIDAndCost - For a given MI, register, and the operand
-/// index, return the ID and cost of its representative register class.
-void
-MachineLICM::getRegisterClassIDAndCost(const MachineInstr *MI,
- unsigned Reg, unsigned OpIdx,
- unsigned &RCId, unsigned &RCCost) const {
- const TargetRegisterClass *RC = MRI->getRegClass(Reg);
- MVT VT = *RC->vt_begin();
- if (VT == MVT::Untyped) {
- RCId = RC->getID();
- RCCost = 1;
- } else {
- RCId = TLI->getRepRegClassFor(VT)->getID();
- RCCost = TLI->getRepRegClassCostFor(VT);
- }
-}
-
/// InitRegPressure - Find all virtual register references that are liveout of
/// the preheader to initialize the starting "register pressure". Note this
/// does not count live through (livein but not used) registers.
@@ -870,41 +853,30 @@ void MachineLICM::InitRegPressure(MachineBasicBlock *BB) {
InitRegPressure(*BB->pred_begin());
}
- for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end();
- MII != E; ++MII) {
- MachineInstr *MI = &*MII;
- for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || MO.isImplicit())
- continue;
- unsigned Reg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
- continue;
-
- bool isNew = RegSeen.insert(Reg).second;
- unsigned RCId, RCCost;
- getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
- if (MO.isDef())
- RegPressure[RCId] += RCCost;
- else {
- bool isKill = isOperandKill(MO, MRI);
- if (isNew && !isKill)
- // Haven't seen this, it must be a livein.
- RegPressure[RCId] += RCCost;
- else if (!isNew && isKill)
- RegPressure[RCId] -= RCCost;
- }
- }
- }
+ for (const MachineInstr &MI : *BB)
+ UpdateRegPressure(&MI, /*ConsiderUnseenAsDef=*/true);
}
/// UpdateRegPressure - Update estimate of register pressure after the
/// specified instruction.
-void MachineLICM::UpdateRegPressure(const MachineInstr *MI) {
- if (MI->isImplicitDef())
- return;
+void MachineLICM::UpdateRegPressure(const MachineInstr *MI,
+ bool ConsiderUnseenAsDef) {
+ auto Cost = calcRegisterCost(MI, /*ConsiderSeen=*/true, ConsiderUnseenAsDef);
+ for (const auto &RPIdAndCost : Cost) {
+ unsigned Class = RPIdAndCost.first;
+ if (static_cast<int>(RegPressure[Class]) < -RPIdAndCost.second)
+ RegPressure[Class] = 0;
+ else
+ RegPressure[Class] += RPIdAndCost.second;
+ }
+}
- SmallVector<unsigned, 4> Defs;
+DenseMap<unsigned, int>
+MachineLICM::calcRegisterCost(const MachineInstr *MI, bool ConsiderSeen,
+ bool ConsiderUnseenAsDef) {
+ DenseMap<unsigned, int> Cost;
+ if (MI->isImplicitDef())
+ return Cost;
for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || MO.isImplicit())
@@ -913,27 +885,33 @@ void MachineLICM::UpdateRegPressure(const MachineInstr *MI) {
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
- bool isNew = RegSeen.insert(Reg).second;
+ // FIXME: It seems bad to use RegSeen only for some of these calculations.
+ bool isNew = ConsiderSeen ? RegSeen.insert(Reg).second : false;
+ const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+
+ RegClassWeight W = TRI->getRegClassWeight(RC);
+ int RCCost = 0;
if (MO.isDef())
- Defs.push_back(Reg);
- else if (!isNew && isOperandKill(MO, MRI)) {
- unsigned RCId, RCCost;
- getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
- if (RCCost > RegPressure[RCId])
- RegPressure[RCId] = 0;
+ RCCost = W.RegWeight;
+ else {
+ bool isKill = isOperandKill(MO, MRI);
+ if (isNew && !isKill && ConsiderUnseenAsDef)
+ // Haven't seen this, it must be a livein.
+ RCCost = W.RegWeight;
+ else if (!isNew && isKill)
+ RCCost = -W.RegWeight;
+ }
+ if (RCCost == 0)
+ continue;
+ const int *PS = TRI->getRegClassPressureSets(RC);
+ for (; *PS != -1; ++PS) {
+ if (Cost.find(*PS) == Cost.end())
+ Cost[*PS] = RCCost;
else
- RegPressure[RCId] -= RCCost;
+ Cost[*PS] += RCCost;
}
}
-
- unsigned Idx = 0;
- while (!Defs.empty()) {
- unsigned Reg = Defs.pop_back_val();
- unsigned RCId, RCCost;
- getRegisterClassIDAndCost(MI, Reg, Idx, RCId, RCCost);
- RegPressure[RCId] += RCCost;
- ++Idx;
- }
+ return Cost;
}
/// isLoadFromGOTOrConstantPool - Return true if this machine instruction
@@ -1125,27 +1103,23 @@ bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
/// CanCauseHighRegPressure - Visit BBs from header to current BB, check
/// if hoisting an instruction of the given cost matrix can cause high
/// register pressure.
-bool MachineLICM::CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost,
+bool MachineLICM::CanCauseHighRegPressure(const DenseMap<unsigned, int>& Cost,
bool CheapInstr) {
- for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
- CI != CE; ++CI) {
- if (CI->second <= 0)
+ for (const auto &RPIdAndCost : Cost) {
+ if (RPIdAndCost.second <= 0)
continue;
- unsigned RCId = CI->first;
- unsigned Limit = RegLimit[RCId];
- int Cost = CI->second;
+ unsigned Class = RPIdAndCost.first;
+ int Limit = RegLimit[Class];
// Don't hoist cheap instructions if they would increase register pressure,
// even if we're under the limit.
if (CheapInstr && !HoistCheapInsts)
return true;
- for (unsigned i = BackTrace.size(); i != 0; --i) {
- SmallVectorImpl<unsigned> &RP = BackTrace[i-1];
- if (RP[RCId] + Cost >= Limit)
+ for (const auto &RP : BackTrace)
+ if (static_cast<int>(RP[Class]) + RPIdAndCost.second >= Limit)
return true;
- }
}
return false;
@@ -1155,46 +1129,15 @@ bool MachineLICM::CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost,
/// current block and update their register pressures to reflect the effect
/// of hoisting MI from the current block to the preheader.
void MachineLICM::UpdateBackTraceRegPressure(const MachineInstr *MI) {
- if (MI->isImplicitDef())
- return;
-
// First compute the 'cost' of the instruction, i.e. its contribution
// to register pressure.
- DenseMap<unsigned, int> Cost;
- for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || MO.isImplicit())
- continue;
- unsigned Reg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
- continue;
-
- unsigned RCId, RCCost;
- getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
- if (MO.isDef()) {
- DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
- if (CI != Cost.end())
- CI->second += RCCost;
- else
- Cost.insert(std::make_pair(RCId, RCCost));
- } else if (isOperandKill(MO, MRI)) {
- DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
- if (CI != Cost.end())
- CI->second -= RCCost;
- else
- Cost.insert(std::make_pair(RCId, -RCCost));
- }
- }
+ auto Cost = calcRegisterCost(MI, /*ConsiderSeen=*/false,
+ /*ConsiderUnseenAsDef=*/false);
// Update register pressure of blocks from loop header to current block.
- for (unsigned i = 0, e = BackTrace.size(); i != e; ++i) {
- SmallVectorImpl<unsigned> &RP = BackTrace[i];
- for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
- CI != CE; ++CI) {
- unsigned RCId = CI->first;
- RP[RCId] += CI->second;
- }
- }
+ for (auto &RP : BackTrace)
+ for (const auto &RPIdAndCost : Cost)
+ RP[RPIdAndCost.first] += RPIdAndCost.second;
}
/// IsProfitableToHoist - Return true if it is potentially profitable to hoist
@@ -1229,15 +1172,8 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
if (TII->isTriviallyReMaterializable(&MI, AA))
return true;
- // Estimate register pressure to determine whether to LICM the instruction.
- // In low register pressure situation, we can be more aggressive about
- // hoisting. Also, favors hoisting long latency instructions even in
- // moderately high pressure situation.
- // Cheap instructions will only be hoisted if they don't increase register
- // pressure at all.
// FIXME: If there are long latency loop-invariant instructions inside the
// loop at this point, why didn't the optimizer's LICM hoist them?
- DenseMap<unsigned, int> Cost;
for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.isImplicit())
@@ -1245,24 +1181,22 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
-
- unsigned RCId, RCCost;
- getRegisterClassIDAndCost(&MI, Reg, i, RCId, RCCost);
- if (MO.isDef()) {
- if (HasHighOperandLatency(MI, i, Reg)) {
- DEBUG(dbgs() << "Hoist High Latency: " << MI);
- ++NumHighLatency;
- return true;
- }
- Cost[RCId] += RCCost;
- } else if (isOperandKill(MO, MRI)) {
- // Is a virtual register use is a kill, hoisting it out of the loop
- // may actually reduce register pressure or be register pressure
- // neutral.
- Cost[RCId] -= RCCost;
+ if (MO.isDef() && HasHighOperandLatency(MI, i, Reg)) {
+ DEBUG(dbgs() << "Hoist High Latency: " << MI);
+ ++NumHighLatency;
+ return true;
}
}
+ // Estimate register pressure to determine whether to LICM the instruction.
+ // In low register pressure situation, we can be more aggressive about
+ // hoisting. Also, favors hoisting long latency instructions even in
+ // moderately high pressure situation.
+ // Cheap instructions will only be hoisted if they don't increase register
+ // pressure at all.
+ auto Cost = calcRegisterCost(&MI, /*ConsiderSeen=*/false,
+ /*ConsiderUnseenAsDef=*/false);
+
// Visit BBs from header to current BB, if hoisting this doesn't cause
// high register pressure, then it's safe to proceed.
if (!CanCauseHighRegPressure(Cost, CheapInstr)) {
diff --git a/lib/CodeGen/MachineModuleInfo.cpp b/lib/CodeGen/MachineModuleInfo.cpp
index fca7df0..e8bd1f8 100644
--- a/lib/CodeGen/MachineModuleInfo.cpp
+++ b/lib/CodeGen/MachineModuleInfo.cpp
@@ -14,6 +14,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
@@ -425,6 +426,12 @@ void MachineModuleInfo::addPersonality(MachineBasicBlock *LandingPad,
Personalities.push_back(Personality);
}
+void MachineModuleInfo::addWinEHState(MachineBasicBlock *LandingPad,
+ int State) {
+ LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
+ LP.WinEHState = State;
+}
+
/// addCatchTypeInfo - Provide the catch typeinfo for a landing pad.
///
void MachineModuleInfo::
@@ -563,10 +570,13 @@ const Function *MachineModuleInfo::getPersonality() const {
}
EHPersonality MachineModuleInfo::getPersonalityType() {
- if (PersonalityTypeCache == EHPersonality::Unknown)
- PersonalityTypeCache = classifyEHPersonality(getPersonality());
+ if (PersonalityTypeCache == EHPersonality::Unknown) {
+ if (const Function *F = getPersonality())
+ PersonalityTypeCache = classifyEHPersonality(F);
+ }
return PersonalityTypeCache;
}
+
/// getPersonalityIndex - Return unique index for current personality
/// function. NULL/first personality function should always get zero index.
unsigned MachineModuleInfo::getPersonalityIndex() const {
@@ -588,3 +598,18 @@ unsigned MachineModuleInfo::getPersonalityIndex() const {
// in the zero index.
return 0;
}
+
+const Function *MachineModuleInfo::getWinEHParent(const Function *F) const {
+ StringRef WinEHParentName =
+ F->getFnAttribute("wineh-parent").getValueAsString();
+ if (WinEHParentName.empty() || WinEHParentName == F->getName())
+ return F;
+ return F->getParent()->getFunction(WinEHParentName);
+}
+
+WinEHFuncInfo &MachineModuleInfo::getWinEHFuncInfo(const Function *F) {
+ auto &Ptr = FuncInfoMap[getWinEHParent(F)];
+ if (!Ptr)
+ Ptr.reset(new WinEHFuncInfo);
+ return *Ptr;
+}
diff --git a/lib/CodeGen/MachineModuleInfoImpls.cpp b/lib/CodeGen/MachineModuleInfoImpls.cpp
index a1c7e9f..22d519e 100644
--- a/lib/CodeGen/MachineModuleInfoImpls.cpp
+++ b/lib/CodeGen/MachineModuleInfoImpls.cpp
@@ -31,15 +31,14 @@ static int SortSymbolPair(const void *LHS, const void *RHS) {
return LHSS->getName().compare(RHSS->getName());
}
-/// GetSortedStubs - Return the entries from a DenseMap in a deterministic
-/// sorted orer.
-MachineModuleInfoImpl::SymbolListTy
-MachineModuleInfoImpl::GetSortedStubs(const DenseMap<MCSymbol*,
- MachineModuleInfoImpl::StubValueTy>&Map) {
+MachineModuleInfoImpl::SymbolListTy MachineModuleInfoImpl::getSortedStubs(
+ DenseMap<MCSymbol *, MachineModuleInfoImpl::StubValueTy> &Map) {
MachineModuleInfoImpl::SymbolListTy List(Map.begin(), Map.end());
if (!List.empty())
qsort(&List[0], List.size(), sizeof(List[0]), SortSymbolPair);
+
+ Map.clear();
return List;
}
diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp
index 7a3c80b..a52d05f 100644
--- a/lib/CodeGen/MachineScheduler.cpp
+++ b/lib/CodeGen/MachineScheduler.cpp
@@ -1036,8 +1036,6 @@ void ScheduleDAGMILive::schedule() {
scheduleMI(SU, IsTopNode);
- updateQueues(SU, IsTopNode);
-
if (DFSResult) {
unsigned SubtreeID = DFSResult->getSubtreeID(SU);
if (!ScheduledTrees.test(SubtreeID)) {
@@ -1049,6 +1047,8 @@ void ScheduleDAGMILive::schedule() {
// Notify the scheduling strategy after updating the DAG.
SchedImpl->schedNode(SU, IsTopNode);
+
+ updateQueues(SU, IsTopNode);
}
assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
diff --git a/lib/CodeGen/MachineTraceMetrics.cpp b/lib/CodeGen/MachineTraceMetrics.cpp
index 8aacd1f..5dc7c21 100644
--- a/lib/CodeGen/MachineTraceMetrics.cpp
+++ b/lib/CodeGen/MachineTraceMetrics.cpp
@@ -463,13 +463,11 @@ void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
// Run an upwards post-order search for the trace start.
Bounds.Downward = false;
Bounds.Visited.clear();
- typedef ipo_ext_iterator<const MachineBasicBlock*, LoopBounds> UpwardPO;
- for (UpwardPO I = ipo_ext_begin(MBB, Bounds), E = ipo_ext_end(MBB, Bounds);
- I != E; ++I) {
+ for (auto I : inverse_post_order_ext(MBB, Bounds)) {
DEBUG(dbgs() << " pred for BB#" << I->getNumber() << ": ");
TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
// All the predecessors have been visited, pick the preferred one.
- TBI.Pred = pickTracePred(*I);
+ TBI.Pred = pickTracePred(I);
DEBUG({
if (TBI.Pred)
dbgs() << "BB#" << TBI.Pred->getNumber() << '\n';
@@ -477,19 +475,17 @@ void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
dbgs() << "null\n";
});
// The trace leading to I is now known, compute the depth resources.
- computeDepthResources(*I);
+ computeDepthResources(I);
}
// Run a downwards post-order search for the trace end.
Bounds.Downward = true;
Bounds.Visited.clear();
- typedef po_ext_iterator<const MachineBasicBlock*, LoopBounds> DownwardPO;
- for (DownwardPO I = po_ext_begin(MBB, Bounds), E = po_ext_end(MBB, Bounds);
- I != E; ++I) {
+ for (auto I : post_order_ext(MBB, Bounds)) {
DEBUG(dbgs() << " succ for BB#" << I->getNumber() << ": ");
TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
// All the successors have been visited, pick the preferred one.
- TBI.Succ = pickTraceSucc(*I);
+ TBI.Succ = pickTraceSucc(I);
DEBUG({
if (TBI.Succ)
dbgs() << "BB#" << TBI.Succ->getNumber() << '\n';
@@ -497,7 +493,7 @@ void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
dbgs() << "null\n";
});
// The trace leaving I is now known, compute the height resources.
- computeHeightResources(*I);
+ computeHeightResources(I);
}
}
diff --git a/lib/CodeGen/PostRASchedulerList.cpp b/lib/CodeGen/PostRASchedulerList.cpp
index ad59fc9..55f08e4 100644
--- a/lib/CodeGen/PostRASchedulerList.cpp
+++ b/lib/CodeGen/PostRASchedulerList.cpp
@@ -141,7 +141,7 @@ namespace {
TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs);
- ~SchedulePostRATDList();
+ ~SchedulePostRATDList() override;
/// startBlock - Initialize register live-range state for scheduling in
/// this block.
diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp
index e073e6a..5334a63 100644
--- a/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/lib/CodeGen/PrologEpilogInserter.cpp
@@ -30,6 +30,7 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/CodeGen/StackProtector.h"
+#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/LLVMContext.h"
@@ -752,6 +753,25 @@ void PEI::replaceFrameIndices(MachineFunction &Fn) {
const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
if (!TFI.needsFrameIndexResolution(Fn)) return;
+ MachineModuleInfo &MMI = Fn.getMMI();
+ const Function *F = Fn.getFunction();
+ const Function *ParentF = MMI.getWinEHParent(F);
+ unsigned FrameReg;
+ if (F == ParentF) {
+ WinEHFuncInfo &FuncInfo = MMI.getWinEHFuncInfo(Fn.getFunction());
+ // FIXME: This should be unconditional but we have bugs in the preparation
+ // pass.
+ if (FuncInfo.UnwindHelpFrameIdx != INT_MAX)
+ FuncInfo.UnwindHelpFrameOffset = TFI.getFrameIndexReferenceFromSP(
+ Fn, FuncInfo.UnwindHelpFrameIdx, FrameReg);
+ } else if (MMI.hasWinEHFuncInfo(F)) {
+ WinEHFuncInfo &FuncInfo = MMI.getWinEHFuncInfo(Fn.getFunction());
+ auto I = FuncInfo.CatchHandlerParentFrameObjIdx.find(F);
+ if (I != FuncInfo.CatchHandlerParentFrameObjIdx.end())
+ FuncInfo.CatchHandlerParentFrameObjOffset[F] =
+ TFI.getFrameIndexReferenceFromSP(Fn, I->second, FrameReg);
+ }
+
// Store SPAdj at exit of a basic block.
SmallVector<int, 8> SPState;
SPState.resize(Fn.getNumBlockIDs());
diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp
index c621414..c311c7b 100644
--- a/lib/CodeGen/RegAllocFast.cpp
+++ b/lib/CodeGen/RegAllocFast.cpp
@@ -301,13 +301,9 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
const MDNode *Expr = DBG->getDebugExpression();
bool IsIndirect = DBG->isIndirectDebugValue();
uint64_t Offset = IsIndirect ? DBG->getOperand(1).getImm() : 0;
- DebugLoc DL;
- if (MI == MBB->end()) {
- // If MI is at basic block end then use last instruction's location.
- MachineBasicBlock::iterator EI = MI;
- DL = (--EI)->getDebugLoc();
- } else
- DL = MI->getDebugLoc();
+ DebugLoc DL = DBG->getDebugLoc();
+ assert(cast<MDLocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
+ "Expected inlined-at fields to agree");
MachineInstr *NewDV =
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::DBG_VALUE))
.addFrameIndex(FI)
@@ -877,6 +873,9 @@ void RAFast::AllocateBasicBlock() {
const MDNode *Expr = MI->getDebugExpression();
DebugLoc DL = MI->getDebugLoc();
MachineBasicBlock *MBB = MI->getParent();
+ assert(
+ cast<MDLocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
+ "Expected inlined-at fields to agree");
MachineInstr *NewDV = BuildMI(*MBB, MBB->erase(MI), DL,
TII->get(TargetOpcode::DBG_VALUE))
.addFrameIndex(SS)
diff --git a/lib/CodeGen/RegAllocGreedy.cpp b/lib/CodeGen/RegAllocGreedy.cpp
index e94f1bb..26f42c9 100644
--- a/lib/CodeGen/RegAllocGreedy.cpp
+++ b/lib/CodeGen/RegAllocGreedy.cpp
@@ -538,8 +538,9 @@ void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) {
// Giant live ranges fall back to the global assignment heuristic, which
// prevents excessive spilling in pathological cases.
bool ReverseLocal = TRI->reverseLocalAssignment();
+ const TargetRegisterClass &RC = *MRI->getRegClass(Reg);
bool ForceGlobal = !ReverseLocal &&
- (Size / SlotIndex::InstrDist) > (2 * MRI->getRegClass(Reg)->getNumRegs());
+ (Size / SlotIndex::InstrDist) > (2 * RC.getNumRegs());
if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->empty() &&
LIS->intervalIsInOneMBB(*LI)) {
@@ -552,10 +553,10 @@ void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) {
// Allocating bottom up may allow many short LRGs to be assigned first
// to one of the cheap registers. This could be much faster for very
// large blocks on targets with many physical registers.
- Prio = Indexes->getZeroIndex().getInstrDistance(LI->beginIndex());
+ Prio = Indexes->getZeroIndex().getInstrDistance(LI->endIndex());
}
- }
- else {
+ Prio |= RC.AllocationPriority << 24;
+ } else {
// Allocate global and split ranges in long->short order. Long ranges that
// don't fit should be spilled (or split) ASAP so they don't create
// interference. Mark a bit to prioritize global above local ranges.
diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp
index 9e3cf41..5d958a6 100644
--- a/lib/CodeGen/RegisterCoalescer.cpp
+++ b/lib/CodeGen/RegisterCoalescer.cpp
@@ -2731,15 +2731,19 @@ bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const {
assert(Copy.isCopyLike());
if (!UseTerminalRule)
return false;
+ unsigned DstReg, DstSubReg, SrcReg, SrcSubReg;
+ isMoveInstr(*TRI, &Copy, SrcReg, DstReg, SrcSubReg, DstSubReg);
// Check if the destination of this copy has any other affinity.
- unsigned DstReg = Copy.getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
+ // If SrcReg is a physical register, the copy won't be coalesced.
+ // Ignoring it may have other side effect (like missing
+ // rematerialization). So keep it.
+ TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
!isTerminalReg(DstReg, Copy, MRI))
return false;
// DstReg is a terminal node. Check if it inteferes with any other
// copy involving SrcReg.
- unsigned SrcReg = Copy.getOperand(1).getReg();
const MachineBasicBlock *OrigBB = Copy.getParent();
const LiveInterval &DstLI = LIS->getInterval(DstReg);
for (const MachineInstr &MI : MRI->reg_nodbg_instructions(SrcReg)) {
@@ -2751,9 +2755,11 @@ bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const {
// For now, just consider the copies that are in the same block.
if (&MI == &Copy || !MI.isCopyLike() || MI.getParent() != OrigBB)
continue;
- unsigned OtherReg = MI.getOperand(0).getReg();
+ unsigned OtherReg, OtherSubReg, OtherSrcReg, OtherSrcSubReg;
+ isMoveInstr(*TRI, &Copy, OtherSrcReg, OtherReg, OtherSrcSubReg,
+ OtherSubReg);
if (OtherReg == SrcReg)
- OtherReg = MI.getOperand(1).getReg();
+ OtherReg = OtherSrcReg;
// Check if OtherReg is a non-terminal.
if (TargetRegisterInfo::isPhysicalRegister(OtherReg) ||
isTerminalReg(OtherReg, MI, MRI))
@@ -2775,25 +2781,45 @@ RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) {
// yet, it might invalidate the iterator.
const unsigned PrevSize = WorkList.size();
if (JoinGlobalCopies) {
+ SmallVector<MachineInstr*, 2> LocalTerminals;
+ SmallVector<MachineInstr*, 2> GlobalTerminals;
// Coalesce copies bottom-up to coalesce local defs before local uses. They
// are not inherently easier to resolve, but slightly preferable until we
// have local live range splitting. In particular this is required by
// cmp+jmp macro fusion.
for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
MII != E; ++MII) {
- if (!MII->isCopyLike() || applyTerminalRule(*MII))
+ if (!MII->isCopyLike())
continue;
- if (isLocalCopy(&(*MII), LIS))
- LocalWorkList.push_back(&(*MII));
- else
- WorkList.push_back(&(*MII));
+ bool ApplyTerminalRule = applyTerminalRule(*MII);
+ if (isLocalCopy(&(*MII), LIS)) {
+ if (ApplyTerminalRule)
+ LocalTerminals.push_back(&(*MII));
+ else
+ LocalWorkList.push_back(&(*MII));
+ } else {
+ if (ApplyTerminalRule)
+ GlobalTerminals.push_back(&(*MII));
+ else
+ WorkList.push_back(&(*MII));
+ }
}
+ // Append the copies evicted by the terminal rule at the end of the list.
+ LocalWorkList.append(LocalTerminals.begin(), LocalTerminals.end());
+ WorkList.append(GlobalTerminals.begin(), GlobalTerminals.end());
}
else {
+ SmallVector<MachineInstr*, 2> Terminals;
for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
MII != E; ++MII)
- if (MII->isCopyLike() && !applyTerminalRule(*MII))
- WorkList.push_back(MII);
+ if (MII->isCopyLike()) {
+ if (applyTerminalRule(*MII))
+ Terminals.push_back(&(*MII));
+ else
+ WorkList.push_back(MII);
+ }
+ // Append the copies evicted by the terminal rule at the end of the list.
+ WorkList.append(Terminals.begin(), Terminals.end());
}
// Try coalescing the collected copies immediately, and remove the nulls.
// This prevents the WorkList from getting too large since most copies are
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index a1c84c5..22fd6d6 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -705,16 +705,8 @@ static bool isConstantSplatVector(SDNode *N, APInt& SplatValue) {
EltVT.getSizeInBits() >= SplatBitSize);
}
-// \brief Returns the SDNode if it is a constant BuildVector or constant.
-static SDNode *isConstantBuildVectorOrConstantInt(SDValue N) {
- if (isa<ConstantSDNode>(N))
- return N.getNode();
- BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N);
- if (BV && BV->isConstant())
- return BV;
- return nullptr;
-}
-
+// \brief Returns the SDNode if it is a constant integer BuildVector
+// or constant integer.
static SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N) {
if (isa<ConstantSDNode>(N))
return N.getNode();
@@ -723,6 +715,8 @@ static SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N) {
return nullptr;
}
+// \brief Returns the SDNode if it is a constant float BuildVector
+// or constant float.
static SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) {
if (isa<ConstantFPSDNode>(N))
return N.getNode();
@@ -773,8 +767,8 @@ SDValue DAGCombiner::ReassociateOps(unsigned Opc, SDLoc DL,
SDValue N0, SDValue N1) {
EVT VT = N0.getValueType();
if (N0.getOpcode() == Opc) {
- if (SDNode *L = isConstantBuildVectorOrConstantInt(N0.getOperand(1))) {
- if (SDNode *R = isConstantBuildVectorOrConstantInt(N1)) {
+ if (SDNode *L = isConstantIntBuildVectorOrConstantInt(N0.getOperand(1))) {
+ if (SDNode *R = isConstantIntBuildVectorOrConstantInt(N1)) {
// reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2))
if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, VT, L, R))
return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode);
@@ -793,8 +787,8 @@ SDValue DAGCombiner::ReassociateOps(unsigned Opc, SDLoc DL,
}
if (N1.getOpcode() == Opc) {
- if (SDNode *R = isConstantBuildVectorOrConstantInt(N1.getOperand(1))) {
- if (SDNode *L = isConstantBuildVectorOrConstantInt(N0)) {
+ if (SDNode *R = isConstantIntBuildVectorOrConstantInt(N1.getOperand(1))) {
+ if (SDNode *L = isConstantIntBuildVectorOrConstantInt(N0)) {
// reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2))
if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, VT, R, L))
return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode);
@@ -1583,8 +1577,8 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
// fold vector ops
if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
// fold (add x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N1.getNode()))
@@ -1604,7 +1598,8 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::ADD, VT, N0C, N1C);
// canonicalize constant to RHS
- if (N0C && !N1C)
+ if (isConstantIntBuildVectorOrConstantInt(N0) &&
+ !isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::ADD, SDLoc(N), VT, N1, N0);
// fold (add x, 0) -> x
if (N1C && N1C->isNullValue())
@@ -1624,8 +1619,7 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
N0C->getAPIntValue(), VT),
N0.getOperand(1));
// reassociate add
- SDValue RADD = ReassociateOps(ISD::ADD, SDLoc(N), N0, N1);
- if (RADD.getNode())
+ if (SDValue RADD = ReassociateOps(ISD::ADD, SDLoc(N), N0, N1))
return RADD;
// fold ((0-A) + B) -> B-A
if (N0.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N0.getOperand(0)) &&
@@ -1828,8 +1822,8 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
// fold vector ops
if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
// fold (sub x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N1.getNode()))
@@ -1984,26 +1978,27 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
APInt ConstValue0, ConstValue1;
// fold vector ops
if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
N0IsConst = isConstantSplatVector(N0.getNode(), ConstValue0);
N1IsConst = isConstantSplatVector(N1.getNode(), ConstValue1);
} else {
- N0IsConst = dyn_cast<ConstantSDNode>(N0) != nullptr;
- ConstValue0 = N0IsConst ? (dyn_cast<ConstantSDNode>(N0))->getAPIntValue()
- : APInt();
- N1IsConst = dyn_cast<ConstantSDNode>(N1) != nullptr;
- ConstValue1 = N1IsConst ? (dyn_cast<ConstantSDNode>(N1))->getAPIntValue()
- : APInt();
+ N0IsConst = isa<ConstantSDNode>(N0);
+ if (N0IsConst)
+ ConstValue0 = cast<ConstantSDNode>(N0)->getAPIntValue();
+ N1IsConst = isa<ConstantSDNode>(N1);
+ if (N1IsConst)
+ ConstValue1 = cast<ConstantSDNode>(N1)->getAPIntValue();
}
// fold (mul c1, c2) -> c1*c2
if (N0IsConst && N1IsConst)
return DAG.FoldConstantArithmetic(ISD::MUL, VT, N0.getNode(), N1.getNode());
- // canonicalize constant to RHS
- if (N0IsConst && !N1IsConst)
+ // canonicalize constant to RHS (vector doesn't have to splat)
+ if (isConstantIntBuildVectorOrConstantInt(N0) &&
+ !isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::MUL, SDLoc(N), VT, N1, N0);
// fold (mul x, 0) -> 0
if (N1IsConst && ConstValue1 == 0)
@@ -2083,8 +2078,7 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
N0.getOperand(1), N1));
// reassociate mul
- SDValue RMUL = ReassociateOps(ISD::MUL, SDLoc(N), N0, N1);
- if (RMUL.getNode())
+ if (SDValue RMUL = ReassociateOps(ISD::MUL, SDLoc(N), N0, N1))
return RMUL;
return SDValue();
@@ -2096,10 +2090,9 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) {
EVT VT = N->getValueType(0);
// fold vector ops
- if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
- }
+ if (VT.isVector())
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
// fold (sdiv c1, c2) -> c1/c2
ConstantSDNode *N0C = isConstOrConstSplat(N0);
@@ -2163,7 +2156,7 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) {
return DAG.getNode(ISD::SUB, SDLoc(N), VT, DAG.getConstant(0, VT), SRA);
}
- // if integer divide is expensive and we satisfy the requirements, emit an
+ // If integer divide is expensive and we satisfy the requirements, emit an
// alternate sequence.
if (N1C && !TLI.isIntDivCheap()) {
SDValue Op = BuildSDIV(N);
@@ -2186,10 +2179,9 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) {
EVT VT = N->getValueType(0);
// fold vector ops
- if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
- }
+ if (VT.isVector())
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
// fold (udiv c1, c2) -> c1/c2
ConstantSDNode *N0C = isConstOrConstSplat(N0);
@@ -2459,8 +2451,8 @@ SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
EVT VT = N->getValueType(0);
SDLoc DL(N);
- // If the type twice as wide is legal, transform the mulhu to a wider multiply
- // plus a shift.
+ // If the type is twice as wide is legal, transform the mulhu to a wider
+ // multiply plus a shift.
if (VT.isSimple() && !VT.isVector()) {
MVT Simple = VT.getSimpleVT();
unsigned SimpleSize = Simple.getSizeInBits();
@@ -2489,8 +2481,8 @@ SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
EVT VT = N->getValueType(0);
SDLoc DL(N);
- // If the type twice as wide is legal, transform the mulhu to a wider multiply
- // plus a shift.
+ // If the type is twice as wide is legal, transform the mulhu to a wider
+ // multiply plus a shift.
if (VT.isSimple() && !VT.isVector()) {
MVT Simple = VT.getSimpleVT();
unsigned SimpleSize = Simple.getSizeInBits();
@@ -2809,8 +2801,8 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
// fold vector ops
if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
// fold (and x, 0) -> 0, vector edition
if (ISD::isBuildVectorAllZeros(N0.getNode()))
@@ -2839,7 +2831,8 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::AND, VT, N0C, N1C);
// canonicalize constant to RHS
- if (N0C && !N1C)
+ if (isConstantIntBuildVectorOrConstantInt(N0) &&
+ !isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::AND, SDLoc(N), VT, N1, N0);
// fold (and x, -1) -> x
if (N1C && N1C->isAllOnesValue())
@@ -2850,8 +2843,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
APInt::getAllOnesValue(BitWidth)))
return DAG.getConstant(0, VT);
// reassociate and
- SDValue RAND = ReassociateOps(ISD::AND, SDLoc(N), N0, N1);
- if (RAND.getNode())
+ if (SDValue RAND = ReassociateOps(ISD::AND, SDLoc(N), N0, N1))
return RAND;
// fold (and (or x, C), D) -> D if (C & D) == D
if (N1C && N0.getOpcode() == ISD::OR)
@@ -3470,8 +3462,8 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
// fold vector ops
if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
// fold (or x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N0.getNode()))
@@ -3556,7 +3548,8 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::OR, VT, N0C, N1C);
// canonicalize constant to RHS
- if (N0C && !N1C)
+ if (isConstantIntBuildVectorOrConstantInt(N0) &&
+ !isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::OR, SDLoc(N), VT, N1, N0);
// fold (or x, 0) -> x
if (N1C && N1C->isNullValue())
@@ -3580,8 +3573,7 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
return BSwap;
// reassociate or
- SDValue ROR = ReassociateOps(ISD::OR, SDLoc(N), N0, N1);
- if (ROR.getNode())
+ if (SDValue ROR = ReassociateOps(ISD::OR, SDLoc(N), N0, N1))
return ROR;
// Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
// iff (c1 & c2) == 0.
@@ -3875,8 +3867,8 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
// fold vector ops
if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
// fold (xor x, 0) -> x, vector edition
if (ISD::isBuildVectorAllZeros(N0.getNode()))
@@ -3899,14 +3891,14 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::XOR, VT, N0C, N1C);
// canonicalize constant to RHS
- if (N0C && !N1C)
+ if (isConstantIntBuildVectorOrConstantInt(N0) &&
+ !isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0);
// fold (xor x, 0) -> x
if (N1C && N1C->isNullValue())
return N0;
// reassociate xor
- SDValue RXOR = ReassociateOps(ISD::XOR, SDLoc(N), N0, N1);
- if (RXOR.getNode())
+ if (SDValue RXOR = ReassociateOps(ISD::XOR, SDLoc(N), N0, N1))
return RXOR;
// fold !(x cc y) -> (x !cc y)
@@ -4152,8 +4144,8 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
// fold vector ops
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
BuildVectorSDNode *N1CV = dyn_cast<BuildVectorSDNode>(N1);
// If setcc produces all-one true value then:
@@ -4332,8 +4324,8 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
// fold vector ops
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
N1C = isConstOrConstSplat(N1);
}
@@ -4478,8 +4470,8 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
// fold vector ops
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
N1C = isConstOrConstSplat(N1);
}
@@ -4889,7 +4881,7 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
SDValue N1_0 = N1->getOperand(0);
SDValue N1_1 = N1->getOperand(1);
SDValue N1_2 = N1->getOperand(2);
- if (N1_2 == N2) {
+ if (N1_2 == N2 && N0.getValueType() == N1_0.getValueType()) {
// Create the actual and node if we can generate good code for it.
if (!TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT)) {
SDValue And = DAG.getNode(ISD::AND, SDLoc(N), N0.getValueType(),
@@ -4908,7 +4900,7 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
SDValue N2_0 = N2->getOperand(0);
SDValue N2_1 = N2->getOperand(1);
SDValue N2_2 = N2->getOperand(2);
- if (N2_1 == N1) {
+ if (N2_1 == N1 && N0.getValueType() == N2_0.getValueType()) {
// Create the actual or node if we can generate good code for it.
if (!TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT)) {
SDValue Or = DAG.getNode(ISD::OR, SDLoc(N), N0.getValueType(),
@@ -7037,7 +7029,6 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
// Finally, this must be the case where we are shrinking elements: each input
// turns into multiple outputs.
- bool isS2V = ISD::isScalarToVector(BV);
unsigned NumOutputsPerInput = SrcBitSize/DstBitSize;
EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
NumOutputsPerInput*BV->getNumOperands());
@@ -7055,10 +7046,6 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
APInt ThisVal = OpVal.trunc(DstBitSize);
Ops.push_back(DAG.getConstant(ThisVal, DstEltVT));
- if (isS2V && i == 0 && j == 0 && ThisVal.zext(SrcBitSize) == OpVal)
- // Simply turn this into a SCALAR_TO_VECTOR of the new type.
- return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(BV), VT,
- Ops[0]);
OpVal = OpVal.lshr(DstBitSize);
}
@@ -7206,10 +7193,9 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
const TargetOptions &Options = DAG.getTarget().Options;
// fold vector ops
- if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
- }
+ if (VT.isVector())
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
// fold (fadd c1, c2) -> c1 + c2
if (N0CFP && N1CFP)
@@ -7400,10 +7386,9 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
const TargetOptions &Options = DAG.getTarget().Options;
// fold vector ops
- if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
- }
+ if (VT.isVector())
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
// fold (fsub c1, c2) -> c1-c2
if (N0CFP && N1CFP)
@@ -7552,15 +7537,8 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
// fold vector ops
if (VT.isVector()) {
// This just handles C1 * C2 for vectors. Other vector folds are below.
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode())
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
return FoldedVOp;
- // Canonicalize vector constant to RHS.
- if (N0.getOpcode() == ISD::BUILD_VECTOR &&
- N1.getOpcode() != ISD::BUILD_VECTOR)
- if (auto *BV0 = dyn_cast<BuildVectorSDNode>(N0))
- if (BV0->isConstant())
- return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0);
}
// fold (fmul c1, c2) -> c1*c2
@@ -7568,7 +7546,8 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N0, N1);
// canonicalize constant to RHS
- if (N0CFP && !N1CFP)
+ if (isConstantFPBuildVectorOrConstantFP(N0) &&
+ !isConstantFPBuildVectorOrConstantFP(N1))
return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N1, N0);
// fold (fmul A, 1.0) -> A
@@ -7734,10 +7713,9 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
const TargetOptions &Options = DAG.getTarget().Options;
// fold vector ops
- if (VT.isVector()) {
- SDValue FoldedVOp = SimplifyVBinOp(N);
- if (FoldedVOp.getNode()) return FoldedVOp;
- }
+ if (VT.isVector())
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
// fold (fdiv c1, c2) -> c1/c2
if (N0CFP && N1CFP)
@@ -8216,11 +8194,10 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
SDValue DAGCombiner::visitFCEIL(SDNode *N) {
SDValue N0 = N->getOperand(0);
- ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
EVT VT = N->getValueType(0);
// fold (fceil c1) -> fceil(c1)
- if (N0CFP)
+ if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FCEIL, SDLoc(N), VT, N0);
return SDValue();
@@ -8228,11 +8205,10 @@ SDValue DAGCombiner::visitFCEIL(SDNode *N) {
SDValue DAGCombiner::visitFTRUNC(SDNode *N) {
SDValue N0 = N->getOperand(0);
- ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
EVT VT = N->getValueType(0);
// fold (ftrunc c1) -> ftrunc(c1)
- if (N0CFP)
+ if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0);
return SDValue();
@@ -8240,11 +8216,10 @@ SDValue DAGCombiner::visitFTRUNC(SDNode *N) {
SDValue DAGCombiner::visitFFLOOR(SDNode *N) {
SDValue N0 = N->getOperand(0);
- ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
EVT VT = N->getValueType(0);
// fold (ffloor c1) -> ffloor(c1)
- if (N0CFP)
+ if (isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(ISD::FFLOOR, SDLoc(N), VT, N0);
return SDValue();
@@ -10080,19 +10055,19 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8;
LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
- unsigned EarliestNodeUsed = 0;
+ unsigned LatestNodeUsed = 0;
for (unsigned i=0; i < NumElem; ++i) {
// Find a chain for the new wide-store operand. Notice that some
// of the store nodes that we found may not be selected for inclusion
// in the wide store. The chain we use needs to be the chain of the
- // earliest store node which is *used* and replaced by the wide store.
- if (StoreNodes[i].SequenceNum > StoreNodes[EarliestNodeUsed].SequenceNum)
- EarliestNodeUsed = i;
+ // latest store node which is *used* and replaced by the wide store.
+ if (StoreNodes[i].SequenceNum < StoreNodes[LatestNodeUsed].SequenceNum)
+ LatestNodeUsed = i;
}
- // The earliest Node in the DAG.
- LSBaseSDNode *EarliestOp = StoreNodes[EarliestNodeUsed].MemNode;
+ // The latest Node in the DAG.
+ LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode;
SDLoc DL(StoreNodes[0].MemNode);
SDValue StoredVal;
@@ -10151,17 +10126,17 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
StoredVal = DAG.getConstant(StoreInt, StoreTy);
}
- SDValue NewStore = DAG.getStore(EarliestOp->getChain(), DL, StoredVal,
+ SDValue NewStore = DAG.getStore(LatestOp->getChain(), DL, StoredVal,
FirstInChain->getBasePtr(),
FirstInChain->getPointerInfo(),
false, false,
FirstInChain->getAlignment());
- // Replace the first store with the new store
- CombineTo(EarliestOp, NewStore);
+ // Replace the last store with the new store
+ CombineTo(LatestOp, NewStore);
// Erase all other stores.
for (unsigned i = 0; i < NumElem ; ++i) {
- if (StoreNodes[i].MemNode == EarliestOp)
+ if (StoreNodes[i].MemNode == LatestOp)
continue;
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
// ReplaceAllUsesWith will replace all uses that existed when it was
@@ -10538,18 +10513,19 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
if (NumElem < 2)
return false;
- // The earliest Node in the DAG.
- unsigned EarliestNodeUsed = 0;
- LSBaseSDNode *EarliestOp = StoreNodes[EarliestNodeUsed].MemNode;
+ // The latest Node in the DAG.
+ unsigned LatestNodeUsed = 0;
for (unsigned i=1; i<NumElem; ++i) {
// Find a chain for the new wide-store operand. Notice that some
// of the store nodes that we found may not be selected for inclusion
// in the wide store. The chain we use needs to be the chain of the
- // earliest store node which is *used* and replaced by the wide store.
- if (StoreNodes[i].SequenceNum > StoreNodes[EarliestNodeUsed].SequenceNum)
- EarliestNodeUsed = i;
+ // latest store node which is *used* and replaced by the wide store.
+ if (StoreNodes[i].SequenceNum < StoreNodes[LatestNodeUsed].SequenceNum)
+ LatestNodeUsed = i;
}
+ LSBaseSDNode *LatestOp = StoreNodes[LatestNodeUsed].MemNode;
+
// Find if it is better to use vectors or integers to load and store
// to memory.
EVT JointMemOpVT;
@@ -10571,7 +10547,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
false, false, false,
FirstLoad->getAlignment());
- SDValue NewStore = DAG.getStore(EarliestOp->getChain(), StoreDL, NewLoad,
+ SDValue NewStore = DAG.getStore(LatestOp->getChain(), StoreDL, NewLoad,
FirstInChain->getBasePtr(),
FirstInChain->getPointerInfo(), false, false,
FirstInChain->getAlignment());
@@ -10589,12 +10565,12 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Ld->getChain());
}
- // Replace the first store with the new store.
- CombineTo(EarliestOp, NewStore);
+ // Replace the last store with the new store.
+ CombineTo(LatestOp, NewStore);
// Erase all other stores.
for (unsigned i = 0; i < NumElem ; ++i) {
// Remove all Store nodes.
- if (StoreNodes[i].MemNode == EarliestOp)
+ if (StoreNodes[i].MemNode == LatestOp)
continue;
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain());
@@ -11523,6 +11499,68 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
return SDValue();
}
+static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ EVT OpVT = N->getOperand(0).getValueType();
+
+ // If the operands are legal vectors, leave them alone.
+ if (TLI.isTypeLegal(OpVT))
+ return SDValue();
+
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ SmallVector<SDValue, 8> Ops;
+
+ EVT SVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits());
+ SDValue ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);
+
+ // Keep track of what we encounter.
+ bool AnyInteger = false;
+ bool AnyFP = false;
+ for (const SDValue &Op : N->ops()) {
+ if (ISD::BITCAST == Op.getOpcode() &&
+ !Op.getOperand(0).getValueType().isVector())
+ Ops.push_back(Op.getOperand(0));
+ else if (ISD::UNDEF == Op.getOpcode())
+ Ops.push_back(ScalarUndef);
+ else
+ return SDValue();
+
+ // Note whether we encounter an integer or floating point scalar.
+ // If it's neither, bail out, it could be something weird like x86mmx.
+ EVT LastOpVT = Ops.back().getValueType();
+ if (LastOpVT.isFloatingPoint())
+ AnyFP = true;
+ else if (LastOpVT.isInteger())
+ AnyInteger = true;
+ else
+ return SDValue();
+ }
+
+ // If any of the operands is a floating point scalar bitcast to a vector,
+ // use floating point types throughout, and bitcast everything.
+ // Replace UNDEFs by another scalar UNDEF node, of the final desired type.
+ if (AnyFP) {
+ SVT = EVT::getFloatingPointVT(OpVT.getSizeInBits());
+ ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);
+ if (AnyInteger) {
+ for (SDValue &Op : Ops) {
+ if (Op.getValueType() == SVT)
+ continue;
+ if (Op.getOpcode() == ISD::UNDEF)
+ Op = ScalarUndef;
+ else
+ Op = DAG.getNode(ISD::BITCAST, DL, SVT, Op);
+ }
+ }
+ }
+
+ EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SVT,
+ VT.getSizeInBits() / SVT.getSizeInBits());
+ return DAG.getNode(ISD::BITCAST, DL, VT,
+ DAG.getNode(ISD::BUILD_VECTOR, DL, VecVT, Ops));
+}
+
SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
// TODO: Check to see if this is a CONCAT_VECTORS of a bunch of
// EXTRACT_SUBVECTOR operations. If so, and if the EXTRACT_SUBVECTOR vector
@@ -11538,9 +11576,10 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
if (ISD::allOperandsUndef(N))
return DAG.getUNDEF(VT);
- // Optimize concat_vectors where one of the vectors is undef.
- if (N->getNumOperands() == 2 &&
- N->getOperand(1)->getOpcode() == ISD::UNDEF) {
+ // Optimize concat_vectors where all but the first of the vectors are undef.
+ if (std::all_of(std::next(N->op_begin()), N->op_end(), [](const SDValue &Op) {
+ return Op.getOpcode() == ISD::UNDEF;
+ })) {
SDValue In = N->getOperand(0);
assert(In.getValueType().isVector() && "Must concat vectors");
@@ -11548,6 +11587,15 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
if (In->getOpcode() == ISD::BITCAST &&
!In->getOperand(0)->getValueType(0).isVector()) {
SDValue Scalar = In->getOperand(0);
+
+ // If the bitcast type isn't legal, it might be a trunc of a legal type;
+ // look through the trunc so we can still do the transform:
+ // concat_vectors(trunc(scalar), undef) -> scalar_to_vector(scalar)
+ if (Scalar->getOpcode() == ISD::TRUNCATE &&
+ !TLI.isTypeLegal(Scalar.getValueType()) &&
+ TLI.isTypeLegal(Scalar->getOperand(0).getValueType()))
+ Scalar = Scalar->getOperand(0);
+
EVT SclTy = Scalar->getValueType(0);
if (!SclTy.isFloatingPoint() && !SclTy.isInteger())
@@ -11615,6 +11663,10 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Opnds);
}
+ // Fold CONCAT_VECTORS of only bitcast scalars (or undef) to BUILD_VECTOR.
+ if (SDValue V = combineConcatVectorOfScalars(N, DAG))
+ return V;
+
// Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR
// nodes often generate nop CONCAT_VECTOR nodes.
// Scan the CONCAT_VECTOR operands and look for a CONCAT operations that
@@ -11676,7 +11728,7 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) {
// type.
if (V->getOperand(0).getValueType() != NVT)
return SDValue();
- unsigned Idx = dyn_cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned Idx = N->getConstantOperandVal(1);
unsigned NumElems = NVT.getVectorNumElements();
assert((Idx % NumElems) == 0 &&
"IDX in concat is not a multiple of the result vector length.");
@@ -12001,6 +12053,43 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
return V;
}
+ // Attempt to combine a shuffle of 2 inputs of 'scalar sources' -
+ // BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR.
+ if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT)) {
+ SmallVector<SDValue, 8> Ops;
+ for (int M : SVN->getMask()) {
+ SDValue Op = DAG.getUNDEF(VT.getScalarType());
+ if (M >= 0) {
+ int Idx = M % NumElts;
+ SDValue &S = (M < (int)NumElts ? N0 : N1);
+ if (S.getOpcode() == ISD::BUILD_VECTOR && S.hasOneUse()) {
+ Op = S.getOperand(Idx);
+ } else if (S.getOpcode() == ISD::SCALAR_TO_VECTOR && S.hasOneUse()) {
+ if (Idx == 0)
+ Op = S.getOperand(0);
+ } else {
+ // Operand can't be combined - bail out.
+ break;
+ }
+ }
+ Ops.push_back(Op);
+ }
+ if (Ops.size() == VT.getVectorNumElements()) {
+ // BUILD_VECTOR requires all inputs to be of the same type, find the
+ // maximum type and extend them all.
+ EVT SVT = VT.getScalarType();
+ if (SVT.isInteger())
+ for (SDValue &Op : Ops)
+ SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
+ if (SVT != VT.getScalarType())
+ for (SDValue &Op : Ops)
+ Op = TLI.isZExtFree(Op.getValueType(), SVT)
+ ? DAG.getZExtOrTrunc(Op, SDLoc(N), SVT)
+ : DAG.getSExtOrTrunc(Op, SDLoc(N), SVT);
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), VT, Ops);
+ }
+ }
+
// If this shuffle only has a single input that is a bitcasted shuffle,
// attempt to merge the 2 shuffles and suitably bitcast the inputs/output
// back to their original types.
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index 223a149..5ffb826 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -425,7 +425,7 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
// Check if the second operand is a constant and handle it appropriately.
if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
- uint64_t Imm = CI->getZExtValue();
+ uint64_t Imm = CI->getSExtValue();
// Transform "sdiv exact X, 8" -> "sra X, 3".
if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
@@ -1079,11 +1079,16 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
// The donothing intrinsic does, well, nothing.
case Intrinsic::donothing:
return true;
+ case Intrinsic::eh_actions: {
+ unsigned ResultReg = getRegForValue(UndefValue::get(II->getType()));
+ if (!ResultReg)
+ return false;
+ updateValueMap(II, ResultReg);
+ return true;
+ }
case Intrinsic::dbg_declare: {
const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
- DIVariable DIVar(DI->getVariable());
- assert((!DIVar || DIVar.isVariable()) &&
- "Variable in DbgDeclareInst should be either null or a DIVariable.");
+ DIVariable DIVar = DI->getVariable();
if (!DIVar || !FuncInfo.MF->getMMI().hasDebugInfo()) {
DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
@@ -1124,6 +1129,8 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
false);
if (Op) {
+ assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
+ "Expected inlined-at fields to agree");
if (Op->isReg()) {
Op->setIsDebug(true);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -1148,6 +1155,8 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
const DbgValueInst *DI = cast<DbgValueInst>(II);
const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
const Value *V = DI->getValue();
+ assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
+ "Expected inlined-at fields to agree");
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 291b583..4b8ae32 100644
--- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -20,6 +20,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
@@ -79,12 +80,35 @@ static ISD::NodeType getPreferredExtendForValue(const Value *V) {
return ExtendKind;
}
+namespace {
+struct WinEHNumbering {
+ WinEHNumbering(WinEHFuncInfo &FuncInfo) : FuncInfo(FuncInfo), NextState(0) {}
+
+ WinEHFuncInfo &FuncInfo;
+ int NextState;
+
+ SmallVector<ActionHandler *, 4> HandlerStack;
+ SmallPtrSet<const Function *, 4> VisitedHandlers;
+
+ int currentEHNumber() const {
+ return HandlerStack.empty() ? -1 : HandlerStack.back()->getEHState();
+ }
+
+ void createUnwindMapEntry(int ToState, ActionHandler *AH);
+ void createTryBlockMapEntry(int TryLow, int TryHigh,
+ ArrayRef<CatchHandler *> Handlers);
+ void processCallSite(ArrayRef<ActionHandler *> Actions, ImmutableCallSite CS);
+ void calculateStateNumbers(const Function &F);
+};
+}
+
void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
SelectionDAG *DAG) {
Fn = &fn;
MF = &mf;
TLI = MF->getSubtarget().getTargetLowering();
RegInfo = &MF->getRegInfo();
+ MachineModuleInfo &MMI = MF->getMMI();
// Check whether the function can return without sret-demotion.
SmallVector<ISD::OutputArg, 4> Outs;
@@ -178,13 +202,8 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
// during the initial isel pass through the IR so that it is done
// in a predictable order.
if (const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(I)) {
- MachineModuleInfo &MMI = MF->getMMI();
- DIVariable DIVar(DI->getVariable());
- assert((!DIVar || DIVar.isVariable()) &&
- "Variable in DbgDeclareInst should be either null or a DIVariable.");
- if (MMI.hasDebugInfo() &&
- DIVar &&
- !DI->getDebugLoc().isUnknown()) {
+ DIVariable DIVar = DI->getVariable();
+ if (MMI.hasDebugInfo() && DIVar && DI->getDebugLoc()) {
// Don't handle byval struct arguments or VLAs, for example.
// Non-byval arguments are handled here (they refer to the stack
// temporary alloca at this point).
@@ -252,8 +271,179 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
// Mark landing pad blocks.
for (BB = Fn->begin(); BB != EB; ++BB)
- if (const InvokeInst *Invoke = dyn_cast<InvokeInst>(BB->getTerminator()))
+ if (const auto *Invoke = dyn_cast<InvokeInst>(BB->getTerminator()))
MBBMap[Invoke->getSuccessor(1)]->setIsLandingPad();
+
+ // Calculate EH numbers for WinEH.
+ if (fn.hasFnAttribute("wineh-parent")) {
+ const Function *WinEHParentFn = MMI.getWinEHParent(&fn);
+ WinEHFuncInfo &FI = MMI.getWinEHFuncInfo(WinEHParentFn);
+ if (FI.LandingPadStateMap.empty()) {
+ WinEHNumbering Num(FI);
+ Num.calculateStateNumbers(*WinEHParentFn);
+ // Pop everything on the handler stack.
+ Num.processCallSite(None, ImmutableCallSite());
+ }
+ }
+}
+
+void WinEHNumbering::createUnwindMapEntry(int ToState, ActionHandler *AH) {
+ WinEHUnwindMapEntry UME;
+ UME.ToState = ToState;
+ if (auto *CH = dyn_cast_or_null<CleanupHandler>(AH))
+ UME.Cleanup = cast<Function>(CH->getHandlerBlockOrFunc());
+ else
+ UME.Cleanup = nullptr;
+ FuncInfo.UnwindMap.push_back(UME);
+}
+
+void WinEHNumbering::createTryBlockMapEntry(int TryLow, int TryHigh,
+ ArrayRef<CatchHandler *> Handlers) {
+ WinEHTryBlockMapEntry TBME;
+ TBME.TryLow = TryLow;
+ TBME.TryHigh = TryHigh;
+ assert(TBME.TryLow <= TBME.TryHigh);
+ for (CatchHandler *CH : Handlers) {
+ WinEHHandlerType HT;
+ if (CH->getSelector()->isNullValue()) {
+ HT.Adjectives = 0x40;
+ HT.TypeDescriptor = nullptr;
+ } else {
+ auto *GV = cast<GlobalVariable>(CH->getSelector()->stripPointerCasts());
+ // Selectors are always pointers to GlobalVariables with 'struct' type.
+ // The struct has two fields, adjectives and a type descriptor.
+ auto *CS = cast<ConstantStruct>(GV->getInitializer());
+ HT.Adjectives =
+ cast<ConstantInt>(CS->getAggregateElement(0U))->getZExtValue();
+ HT.TypeDescriptor =
+ cast<GlobalVariable>(CS->getAggregateElement(1)->stripPointerCasts());
+ }
+ HT.Handler = cast<Function>(CH->getHandlerBlockOrFunc());
+ HT.CatchObjRecoverIdx = CH->getExceptionVarIndex();
+ TBME.HandlerArray.push_back(HT);
+ }
+ FuncInfo.TryBlockMap.push_back(TBME);
+}
+
+static void print_name(const Value *V) {
+#ifndef NDEBUG
+ if (!V) {
+ DEBUG(dbgs() << "null");
+ return;
+ }
+
+ if (const auto *F = dyn_cast<Function>(V))
+ DEBUG(dbgs() << F->getName());
+ else
+ DEBUG(V->dump());
+#endif
+}
+
+void WinEHNumbering::processCallSite(ArrayRef<ActionHandler *> Actions,
+ ImmutableCallSite CS) {
+ int FirstMismatch = 0;
+ for (int E = std::min(HandlerStack.size(), Actions.size()); FirstMismatch < E;
+ ++FirstMismatch) {
+ if (HandlerStack[FirstMismatch]->getHandlerBlockOrFunc() !=
+ Actions[FirstMismatch]->getHandlerBlockOrFunc())
+ break;
+ delete Actions[FirstMismatch];
+ }
+
+ bool EnteringScope = (int)Actions.size() > FirstMismatch;
+
+ // Don't recurse while we are looping over the handler stack. Instead, defer
+ // the numbering of the catch handlers until we are done popping.
+ SmallVector<CatchHandler *, 4> PoppedCatches;
+ for (int I = HandlerStack.size() - 1; I >= FirstMismatch; --I) {
+ if (auto *CH = dyn_cast<CatchHandler>(HandlerStack.back())) {
+ PoppedCatches.push_back(CH);
+ } else {
+ // Delete cleanup handlers
+ delete HandlerStack.back();
+ }
+ HandlerStack.pop_back();
+ }
+
+ // We need to create a new state number if we are exiting a try scope and we
+ // will not push any more actions.
+ int TryHigh = NextState - 1;
+ if (!EnteringScope && !PoppedCatches.empty()) {
+ createUnwindMapEntry(currentEHNumber(), nullptr);
+ ++NextState;
+ }
+
+ int LastTryLowIdx = 0;
+ for (int I = 0, E = PoppedCatches.size(); I != E; ++I) {
+ CatchHandler *CH = PoppedCatches[I];
+ if (I + 1 == E || CH->getEHState() != PoppedCatches[I + 1]->getEHState()) {
+ int TryLow = CH->getEHState();
+ auto Handlers =
+ makeArrayRef(&PoppedCatches[LastTryLowIdx], I - LastTryLowIdx + 1);
+ createTryBlockMapEntry(TryLow, TryHigh, Handlers);
+ LastTryLowIdx = I + 1;
+ }
+ }
+
+ for (CatchHandler *CH : PoppedCatches) {
+ if (auto *F = dyn_cast<Function>(CH->getHandlerBlockOrFunc()))
+ calculateStateNumbers(*F);
+ delete CH;
+ }
+
+ bool LastActionWasCatch = false;
+ for (size_t I = FirstMismatch; I != Actions.size(); ++I) {
+ // We can reuse eh states when pushing two catches for the same invoke.
+ bool CurrActionIsCatch = isa<CatchHandler>(Actions[I]);
+ // FIXME: Reenable this optimization!
+ if (CurrActionIsCatch && LastActionWasCatch && false) {
+ Actions[I]->setEHState(currentEHNumber());
+ } else {
+ createUnwindMapEntry(currentEHNumber(), Actions[I]);
+ Actions[I]->setEHState(NextState);
+ NextState++;
+ DEBUG(dbgs() << "Creating unwind map entry for: (");
+ print_name(Actions[I]->getHandlerBlockOrFunc());
+ DEBUG(dbgs() << ", " << currentEHNumber() << ")\n");
+ }
+ HandlerStack.push_back(Actions[I]);
+ LastActionWasCatch = CurrActionIsCatch;
+ }
+
+ DEBUG(dbgs() << "In EHState " << currentEHNumber() << " for CallSite: ");
+ print_name(CS ? CS.getCalledValue() : nullptr);
+ DEBUG(dbgs() << '\n');
+}
+
+void WinEHNumbering::calculateStateNumbers(const Function &F) {
+ auto I = VisitedHandlers.insert(&F);
+ if (!I.second)
+ return; // We've already visited this handler, don't renumber it.
+
+ DEBUG(dbgs() << "Calculating state numbers for: " << F.getName() << '\n');
+ SmallVector<ActionHandler *, 4> ActionList;
+ for (const BasicBlock &BB : F) {
+ for (const Instruction &I : BB) {
+ const auto *CI = dyn_cast<CallInst>(&I);
+ if (!CI || CI->doesNotThrow())
+ continue;
+ processCallSite(None, CI);
+ }
+ const auto *II = dyn_cast<InvokeInst>(BB.getTerminator());
+ if (!II)
+ continue;
+ const LandingPadInst *LPI = II->getLandingPadInst();
+ auto *ActionsCall = dyn_cast<IntrinsicInst>(LPI->getNextNode());
+ if (!ActionsCall)
+ continue;
+ assert(ActionsCall->getIntrinsicID() == Intrinsic::eh_actions);
+ parseEHActions(ActionsCall, ActionList);
+ processCallSite(ActionList, II);
+ ActionList.clear();
+ FuncInfo.LandingPadStateMap[LPI] = currentEHNumber();
+ }
+
+ FuncInfo.CatchHandlerMaxState[&F] = NextState - 1;
}
/// clear - Clear out all the function-specific state. This returns this
@@ -462,8 +652,7 @@ void llvm::ComputeUsesVAFloatArgument(const CallInst &I,
if (FT->isVarArg() && !MMI->usesVAFloatArgument()) {
for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
Type* T = I.getArgOperand(i)->getType();
- for (po_iterator<Type*> i = po_begin(T), e = po_end(T);
- i != e; ++i) {
+ for (auto i : post_order(T)) {
if (i->isFloatingPointTy()) {
MMI->setUsesVAFloatArgument(true);
return;
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 93699a7..64d606a 100644
--- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -650,6 +650,8 @@ InstrEmitter::EmitDbgValue(SDDbgValue *SD,
MDNode *Var = SD->getVariable();
MDNode *Expr = SD->getExpression();
DebugLoc DL = SD->getDebugLoc();
+ assert(cast<MDLocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
+ "Expected inlined-at fields to agree");
if (SD->getKind() == SDDbgValue::FRAMEIX) {
// Stack address; this needs to be lowered in target-dependent fashion.
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index ece38f3..4a28a4b 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -4033,6 +4033,8 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
Node->getOpcode() == ISD::SETCC) {
OVT = Node->getOperand(0).getSimpleValueType();
}
+ if (Node->getOpcode() == ISD::BR_CC)
+ OVT = Node->getOperand(2).getSimpleValueType();
MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
SDLoc dl(Node);
SDValue Tmp1, Tmp2, Tmp3;
@@ -4188,11 +4190,28 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
Tmp1, Tmp2, Node->getOperand(2)));
break;
}
+ case ISD::BR_CC: {
+ unsigned ExtOp = ISD::FP_EXTEND;
+ if (NVT.isInteger()) {
+ ISD::CondCode CCCode =
+ cast<CondCodeSDNode>(Node->getOperand(1))->get();
+ ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
+ }
+ Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2));
+ Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(3));
+ Results.push_back(DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0),
+ Node->getOperand(0), Node->getOperand(1),
+ Tmp1, Tmp2, Node->getOperand(4)));
+ break;
+ }
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
case ISD::FDIV:
case ISD::FREM:
+ case ISD::FMINNUM:
+ case ISD::FMAXNUM:
+ case ISD::FCOPYSIGN:
case ISD::FPOW: {
Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(1));
@@ -4201,10 +4220,40 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
Tmp3, DAG.getIntPtrConstant(0)));
break;
}
- case ISD::FLOG2:
- case ISD::FEXP2:
+ case ISD::FMA: {
+ Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
+ Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(1));
+ Tmp3 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(2));
+ Results.push_back(
+ DAG.getNode(ISD::FP_ROUND, dl, OVT,
+ DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2, Tmp3),
+ DAG.getIntPtrConstant(0)));
+ break;
+ }
+ case ISD::FPOWI: {
+ Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
+ Tmp2 = Node->getOperand(1);
+ Tmp3 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2);
+ Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT,
+ Tmp3, DAG.getIntPtrConstant(0)));
+ break;
+ }
+ case ISD::FFLOOR:
+ case ISD::FCEIL:
+ case ISD::FRINT:
+ case ISD::FNEARBYINT:
+ case ISD::FROUND:
+ case ISD::FTRUNC:
+ case ISD::FNEG:
+ case ISD::FSQRT:
+ case ISD::FSIN:
+ case ISD::FCOS:
case ISD::FLOG:
- case ISD::FEXP: {
+ case ISD::FLOG2:
+ case ISD::FLOG10:
+ case ISD::FABS:
+ case ISD::FEXP:
+ case ISD::FEXP2: {
Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
Tmp2 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1);
Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT,
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index cef3fc9..9de85d7 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -593,6 +593,7 @@ private:
bool SplitVectorOperand(SDNode *N, unsigned OpNo);
SDValue SplitVecOp_VSELECT(SDNode *N, unsigned OpNo);
SDValue SplitVecOp_UnaryOp(SDNode *N);
+ SDValue SplitVecOp_TruncateHelper(SDNode *N, unsigned TruncateOp);
SDValue SplitVecOp_BITCAST(SDNode *N);
SDValue SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N);
@@ -600,7 +601,6 @@ private:
SDValue SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo);
SDValue SplitVecOp_MSTORE(MaskedStoreSDNode *N, unsigned OpNo);
SDValue SplitVecOp_CONCAT_VECTORS(SDNode *N);
- SDValue SplitVecOp_TRUNCATE(SDNode *N);
SDValue SplitVecOp_VSETCC(SDNode *N);
SDValue SplitVecOp_FP_ROUND(SDNode *N);
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 03c2734..408d5ed 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -379,8 +379,8 @@ SDValue VectorLegalizer::Promote(SDValue Op) {
// There are currently two cases of vector promotion:
// 1) Bitcasting a vector of integers to a different type to a vector of the
- // same overall length. For example, x86 promotes ISD::AND on v2i32 to v1i64.
- // 2) Extending a vector of floats to a vector of the same number oflarger
+ // same overall length. For example, x86 promotes ISD::AND v2i32 to v1i64.
+ // 2) Extending a vector of floats to a vector of the same number of larger
// floats. For example, AArch64 promotes ISD::FADD on v4f16 to v4f32.
MVT VT = Op.getSimpleValueType();
assert(Op.getNode()->getNumValues() == 1 &&
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index f7e4557..f000902 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1293,7 +1293,9 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break;
case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break;
case ISD::CONCAT_VECTORS: Res = SplitVecOp_CONCAT_VECTORS(N); break;
- case ISD::TRUNCATE: Res = SplitVecOp_TRUNCATE(N); break;
+ case ISD::TRUNCATE:
+ Res = SplitVecOp_TruncateHelper(N, ISD::TRUNCATE);
+ break;
case ISD::FP_ROUND: Res = SplitVecOp_FP_ROUND(N); break;
case ISD::STORE:
Res = SplitVecOp_STORE(cast<StoreSDNode>(N), OpNo);
@@ -1304,20 +1306,32 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::VSELECT:
Res = SplitVecOp_VSELECT(N, OpNo);
break;
- case ISD::CTTZ:
- case ISD::CTLZ:
- case ISD::CTPOP:
- case ISD::FP_EXTEND:
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
+ if (N->getValueType(0).bitsLT(N->getOperand(0)->getValueType(0)))
+ Res = SplitVecOp_TruncateHelper(N, ISD::TRUNCATE);
+ else
+ Res = SplitVecOp_UnaryOp(N);
+ break;
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
- case ISD::FTRUNC:
+ if (N->getValueType(0).bitsLT(N->getOperand(0)->getValueType(0)))
+ Res = SplitVecOp_TruncateHelper(N, ISD::FTRUNC);
+ else
+ Res = SplitVecOp_UnaryOp(N);
+ break;
+ case ISD::CTTZ:
+ case ISD::CTLZ:
+ case ISD::CTPOP:
+ case ISD::FP_EXTEND:
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
Res = SplitVecOp_UnaryOp(N);
break;
+ case ISD::FTRUNC:
+ Res = SplitVecOp_TruncateHelper(N, ISD::FTRUNC);
+ break;
}
}
@@ -1581,7 +1595,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) {
return DAG.getNode(ISD::BUILD_VECTOR, DL, N->getValueType(0), Elts);
}
-SDValue DAGTypeLegalizer::SplitVecOp_TRUNCATE(SDNode *N) {
+SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N,
+ unsigned TruncateOp) {
// The result type is legal, but the input type is illegal. If splitting
// ends up with the result type of each half still being legal, just
// do that. If, however, that would result in an illegal result type,
@@ -1624,8 +1639,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_TRUNCATE(SDNode *N) {
EVT HalfElementVT = EVT::getIntegerVT(*DAG.getContext(), InElementSize/2);
EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT,
NumElements/2);
- SDValue HalfLo = DAG.getNode(ISD::TRUNCATE, DL, HalfVT, InLoVec);
- SDValue HalfHi = DAG.getNode(ISD::TRUNCATE, DL, HalfVT, InHiVec);
+ SDValue HalfLo = DAG.getNode(N->getOpcode(), DL, HalfVT, InLoVec);
+ SDValue HalfHi = DAG.getNode(N->getOpcode(), DL, HalfVT, InHiVec);
// Concatenate them to get the full intermediate truncation result.
EVT InterVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT, NumElements);
SDValue InterVec = DAG.getNode(ISD::CONCAT_VECTORS, DL, InterVT, HalfLo,
@@ -1634,7 +1649,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_TRUNCATE(SDNode *N) {
// type. This should normally be something that ends up being legal directly,
// but in theory if a target has very wide vectors and an annoyingly
// restricted set of legal types, this split can chain to build things up.
- return DAG.getNode(ISD::TRUNCATE, DL, OutVT, InterVec);
+ return DAG.getNode(TruncateOp, DL, OutVT, InterVec);
}
SDValue DAGTypeLegalizer::SplitVecOp_VSETCC(SDNode *N) {
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 8b54e656..fd0fa31 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -173,7 +173,7 @@ public:
HazardRec = STI.getInstrInfo()->CreateTargetHazardRecognizer(&STI, this);
}
- ~ScheduleDAGRRList() {
+ ~ScheduleDAGRRList() override {
delete HazardRec;
delete AvailableQueue;
}
@@ -1423,9 +1423,10 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
// If one or more successors has been unscheduled, then the current
// node is no longer available.
- if (!TrySU->isAvailable)
+ if (!TrySU->isAvailable || !TrySU->NodeQueueId)
CurSU = AvailableQueue->pop();
else {
+ // Available and in AvailableQueue
AvailableQueue->remove(TrySU);
CurSU = TrySU;
}
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
index 2cd1f4b..6351fa2 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
@@ -44,7 +44,7 @@ namespace llvm {
explicit ScheduleDAGSDNodes(MachineFunction &mf);
- virtual ~ScheduleDAGSDNodes() {}
+ ~ScheduleDAGSDNodes() override {}
/// Run - perform scheduling.
///
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
index 418b58e..eee4a4b 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
@@ -76,7 +76,7 @@ public:
HazardRec = STI.getInstrInfo()->CreateTargetHazardRecognizer(&STI, this);
}
- ~ScheduleDAGVLIW() {
+ ~ScheduleDAGVLIW() override {
delete HazardRec;
delete AvailableQueue;
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index b52f648..770f0b2 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -2851,10 +2851,16 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
// FIXME: Entirely reasonable to perform folding of other unary
// operations here as the need arises.
break;
+ case ISD::TRUNCATE:
+ // Constant build vector truncation can be done with the original scalar
+ // operands but with a new build vector with the truncated value type.
+ return getNode(ISD::BUILD_VECTOR, DL, VT, BV->ops());
case ISD::FNEG:
case ISD::FABS:
+ case ISD::FCEIL:
+ case ISD::FTRUNC:
+ case ISD::FFLOOR:
case ISD::FP_EXTEND:
- case ISD::TRUNCATE:
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP: {
// Let the above scalar folding handle the folding of each element.
@@ -2870,6 +2876,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
}
if (Ops.size() == VT.getVectorNumElements())
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
+ break;
}
}
}
@@ -3628,7 +3635,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
CSEMap.InsertNode(N, IP);
} else {
-
N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact);
}
@@ -3791,12 +3797,27 @@ static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT);
}
- Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
+ assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
+ EVT IntVT = VT.getScalarType();
+ if (!IntVT.isInteger())
+ IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
+
+ Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
if (NumBits > 8) {
// Use a multiplication with 0x010101... to extend the input to the
// required length.
APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
- Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT));
+ Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
+ DAG.getConstant(Magic, IntVT));
+ }
+
+ if (VT != Value.getValueType() && !VT.isInteger())
+ Value = DAG.getNode(ISD::BITCAST, dl, VT.getScalarType(), Value);
+ if (VT != Value.getValueType()) {
+ assert(VT.getVectorElementType() == Value.getValueType() &&
+ "value type should be one vector element here");
+ SmallVector<SDValue, 8> BVOps(VT.getVectorNumElements(), Value);
+ Value = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, BVOps);
}
return Value;
@@ -4276,7 +4297,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
unsigned Align, bool isVol, bool AlwaysInline,
- MachinePointerInfo DstPtrInfo,
+ bool isTailCall, MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) {
assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
@@ -4334,15 +4355,16 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
Type::getVoidTy(*getContext()),
getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
TLI->getPointerTy()), std::move(Args), 0)
- .setDiscardResult();
- std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
+ .setDiscardResult()
+ .setTailCall(isTailCall);
+ std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
}
SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
- unsigned Align, bool isVol,
+ unsigned Align, bool isVol, bool isTailCall,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) {
assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
@@ -4389,15 +4411,16 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
Type::getVoidTy(*getContext()),
getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
TLI->getPointerTy()), std::move(Args), 0)
- .setDiscardResult();
- std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
+ .setDiscardResult()
+ .setTailCall(isTailCall);
+ std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
}
SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
- unsigned Align, bool isVol,
+ unsigned Align, bool isVol, bool isTailCall,
MachinePointerInfo DstPtrInfo) {
assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
@@ -4446,7 +4469,8 @@ SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
Type::getVoidTy(*getContext()),
getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
TLI->getPointerTy()), std::move(Args), 0)
- .setDiscardResult();
+ .setDiscardResult()
+ .setTailCall(isTailCall);
std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
@@ -5574,8 +5598,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
/// For IROrder, we keep the smaller of the two
SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) {
DebugLoc NLoc = N->getDebugLoc();
- if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) &&
- (OLoc.getDebugLoc() != NLoc)) {
+ if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
N->setDebugLoc(DebugLoc());
}
unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
@@ -5885,6 +5908,8 @@ SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
SDDbgValue *SelectionDAG::getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N,
unsigned R, bool IsIndirect, uint64_t Off,
DebugLoc DL, unsigned O) {
+ assert(cast<MDLocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
+ "Expected inlined-at fields to agree");
return new (Allocator) SDDbgValue(Var, Expr, N, R, IsIndirect, Off, DL, O);
}
@@ -5892,6 +5917,8 @@ SDDbgValue *SelectionDAG::getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N,
SDDbgValue *SelectionDAG::getConstantDbgValue(MDNode *Var, MDNode *Expr,
const Value *C, uint64_t Off,
DebugLoc DL, unsigned O) {
+ assert(cast<MDLocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
+ "Expected inlined-at fields to agree");
return new (Allocator) SDDbgValue(Var, Expr, C, Off, DL, O);
}
@@ -5899,6 +5926,8 @@ SDDbgValue *SelectionDAG::getConstantDbgValue(MDNode *Var, MDNode *Expr,
SDDbgValue *SelectionDAG::getFrameIndexDbgValue(MDNode *Var, MDNode *Expr,
unsigned FI, uint64_t Off,
DebugLoc DL, unsigned O) {
+ assert(cast<MDLocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
+ "Expected inlined-at fields to agree");
return new (Allocator) SDDbgValue(Var, Expr, FI, Off, DL, O);
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 6c14e79..32d2aae 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -35,6 +35,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/StackMaps.h"
+#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
@@ -997,14 +998,16 @@ void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
const DbgValueInst *DI = DDI.getDI();
DebugLoc dl = DDI.getdl();
unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
- MDNode *Variable = DI->getVariable();
- MDNode *Expr = DI->getExpression();
+ MDLocalVariable *Variable = DI->getVariable();
+ MDExpression *Expr = DI->getExpression();
+ assert(Variable->isValidLocationForIntrinsic(dl) &&
+ "Expected inlined-at fields to agree");
uint64_t Offset = DI->getOffset();
// A dbg.value for an alloca is always indirect.
bool IsIndirect = isa<AllocaInst>(V) || Offset != 0;
SDDbgValue *SDV;
if (Val.getNode()) {
- if (!EmitFuncArgumentDbgValue(V, Variable, Expr, Offset, IsIndirect,
+ if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, Offset, IsIndirect,
Val)) {
SDV = DAG.getDbgValue(Variable, Expr, Val.getNode(), Val.getResNo(),
IsIndirect, Offset, dl, DbgSDNodeOrder);
@@ -4447,11 +4450,9 @@ static unsigned getTruncatedArgReg(const SDValue &N) {
/// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function
/// argument, create the corresponding DBG_VALUE machine instruction for it now.
/// At the end of instruction selection, they will be inserted to the entry BB.
-bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V,
- MDNode *Variable,
- MDNode *Expr, int64_t Offset,
- bool IsIndirect,
- const SDValue &N) {
+bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
+ const Value *V, MDLocalVariable *Variable, MDExpression *Expr,
+ MDLocation *DL, int64_t Offset, bool IsIndirect, const SDValue &N) {
const Argument *Arg = dyn_cast<Argument>(V);
if (!Arg)
return false;
@@ -4460,8 +4461,10 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V,
const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
// Ignore inlined function arguments here.
+ //
+ // FIXME: Should we be checking DL->inlinedAt() to determine this?
DIVariable DV(Variable);
- if (DV.isInlinedFnArgument(MF.getFunction()))
+ if (!DV->getScope()->getSubprogram()->describes(MF.getFunction()))
return false;
Optional<MachineOperand> Op;
@@ -4502,13 +4505,15 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V,
if (!Op)
return false;
+ assert(Variable->isValidLocationForIntrinsic(DL) &&
+ "Expected inlined-at fields to agree");
if (Op->isReg())
FuncInfo.ArgDbgValues.push_back(
- BuildMI(MF, getCurDebugLoc(), TII->get(TargetOpcode::DBG_VALUE),
- IsIndirect, Op->getReg(), Offset, Variable, Expr));
+ BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
+ Op->getReg(), Offset, Variable, Expr));
else
FuncInfo.ArgDbgValues.push_back(
- BuildMI(MF, getCurDebugLoc(), TII->get(TargetOpcode::DBG_VALUE))
+ BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE))
.addOperand(*Op)
.addImm(Offset)
.addMetadata(Variable)
@@ -4589,9 +4594,12 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
if (!Align)
Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment.
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
- DAG.setRoot(DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, false,
- MachinePointerInfo(I.getArgOperand(0)),
- MachinePointerInfo(I.getArgOperand(1))));
+ bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
+ SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
+ false, isTC,
+ MachinePointerInfo(I.getArgOperand(0)),
+ MachinePointerInfo(I.getArgOperand(1)));
+ updateDAGForMaybeTailCall(MC);
return nullptr;
}
case Intrinsic::memset: {
@@ -4608,8 +4616,10 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
if (!Align)
Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment.
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
- DAG.setRoot(DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
- MachinePointerInfo(I.getArgOperand(0))));
+ bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
+ SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
+ isTC, MachinePointerInfo(I.getArgOperand(0)));
+ updateDAGForMaybeTailCall(MS);
return nullptr;
}
case Intrinsic::memmove: {
@@ -4628,19 +4638,19 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
if (!Align)
Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment.
bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
- DAG.setRoot(DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
- MachinePointerInfo(I.getArgOperand(0)),
- MachinePointerInfo(I.getArgOperand(1))));
+ bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
+ SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
+ isTC, MachinePointerInfo(I.getArgOperand(0)),
+ MachinePointerInfo(I.getArgOperand(1)));
+ updateDAGForMaybeTailCall(MM);
return nullptr;
}
case Intrinsic::dbg_declare: {
const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
- MDNode *Variable = DI.getVariable();
- MDNode *Expression = DI.getExpression();
+ MDLocalVariable *Variable = DI.getVariable();
+ MDExpression *Expression = DI.getExpression();
const Value *Address = DI.getAddress();
- DIVariable DIVar(Variable);
- assert((!DIVar || DIVar.isVariable()) &&
- "Variable in DbgDeclareInst should be either null or a DIVariable.");
+ DIVariable DIVar = Variable;
if (!Address || !DIVar) {
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
return nullptr;
@@ -4663,7 +4673,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
Address = BCI->getOperand(0);
// Parameters are handled specially.
bool isParameter =
- (DIVariable(Variable).getTag() == dwarf::DW_TAG_arg_variable ||
+ (DIVariable(Variable)->getTag() == dwarf::DW_TAG_arg_variable ||
isa<Argument>(Address));
const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
@@ -4677,7 +4687,8 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
else {
// Address is an argument, so try to emit its dbg value using
// virtual register info from the FuncInfo.ValueMap.
- EmitFuncArgumentDbgValue(Address, Variable, Expression, 0, false, N);
+ EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, false,
+ N);
return nullptr;
}
} else if (AI)
@@ -4694,7 +4705,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
} else {
// If Address is an argument then try to emit its dbg value using
// virtual register info from the FuncInfo.ValueMap.
- if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, 0, false,
+ if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, false,
N)) {
// If variable is pinned by a alloca in dominating bb then
// use StaticAllocaMap.
@@ -4717,14 +4728,12 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::dbg_value: {
const DbgValueInst &DI = cast<DbgValueInst>(I);
- DIVariable DIVar(DI.getVariable());
- assert((!DIVar || DIVar.isVariable()) &&
- "Variable in DbgValueInst should be either null or a DIVariable.");
+ DIVariable DIVar = DI.getVariable();
if (!DIVar)
return nullptr;
- MDNode *Variable = DI.getVariable();
- MDNode *Expression = DI.getExpression();
+ MDLocalVariable *Variable = DI.getVariable();
+ MDExpression *Expression = DI.getExpression();
uint64_t Offset = DI.getOffset();
const Value *V = DI.getValue();
if (!V)
@@ -4745,7 +4754,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
if (N.getNode()) {
// A dbg.value for an alloca is always indirect.
bool IsIndirect = isa<AllocaInst>(V) || Offset != 0;
- if (!EmitFuncArgumentDbgValue(V, Variable, Expression, Offset,
+ if (!EmitFuncArgumentDbgValue(V, Variable, Expression, dl, Offset,
IsIndirect, N)) {
SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
IsIndirect, Offset, dl, SDNodeOrder);
@@ -5360,6 +5369,9 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::clear_cache:
return TLI.getClearCacheBuiltinName();
+ case Intrinsic::eh_actions:
+ setValue(&I, DAG.getUNDEF(TLI.getPointerTy()));
+ return nullptr;
case Intrinsic::donothing:
// ignore
return nullptr;
@@ -5397,14 +5409,16 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// Directly emit some FRAME_ALLOC machine instrs. Label assignment emission
// is the same on all targets.
for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
- AllocaInst *Slot =
- cast<AllocaInst>(I.getArgOperand(Idx)->stripPointerCasts());
+ Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
+ if (isa<ConstantPointerNull>(Arg))
+ continue; // Skip null pointers. They represent a hole in index space.
+ AllocaInst *Slot = cast<AllocaInst>(Arg);
assert(FuncInfo.StaticAllocaMap.count(Slot) &&
"can only escape static allocas");
int FI = FuncInfo.StaticAllocaMap[Slot];
MCSymbol *FrameAllocSym =
- MF.getMMI().getContext().getOrCreateFrameAllocSymbol(MF.getName(),
- Idx);
+ MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
+ GlobalValue::getRealLinkageName(MF.getName()), Idx);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
TII->get(TargetOpcode::FRAME_ALLOC))
.addSym(FrameAllocSym)
@@ -5424,8 +5438,8 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
unsigned IdxVal = unsigned(Idx->getLimitedValue(INT_MAX));
MCSymbol *FrameAllocSym =
- MF.getMMI().getContext().getOrCreateFrameAllocSymbol(Fn->getName(),
- IdxVal);
+ MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
+ GlobalValue::getRealLinkageName(Fn->getName()), IdxVal);
// Create a TargetExternalSymbol for the label to avoid any target lowering
// that would make this PC relative.
@@ -5446,16 +5460,6 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::eh_begincatch:
case Intrinsic::eh_endcatch:
llvm_unreachable("begin/end catch intrinsics not lowered in codegen");
- case Intrinsic::eh_unwindhelp: {
- AllocaInst *Slot =
- cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
- assert(FuncInfo.StaticAllocaMap.count(Slot) &&
- "can only use static allocas with llvm.eh.unwindhelp");
- int FI = FuncInfo.StaticAllocaMap[Slot];
- // TODO: Save this in the not-yet-existant WinEHFuncInfo struct.
- (void)FI;
- return nullptr;
- }
}
}
@@ -5546,6 +5550,11 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
// Skip the first return-type Attribute to get to params.
Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
Args.push_back(Entry);
+
+ // If we have an explicit sret argument that is an Instruction, (i.e., it
+ // might point to function-local memory), we can't meaningfully tail-call.
+ if (Entry.isSRet && isa<Instruction>(V))
+ isTailCall = false;
}
// Check if target-independent constraints permit a tail call here.
@@ -7183,6 +7192,10 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Entry.Alignment = Align;
CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
+
+ // sret demotion isn't compatible with tail-calls, since the sret argument
+ // points into the callers stack frame.
+ CLI.IsTailCall = false;
} else {
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
@@ -7790,3 +7803,17 @@ MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
return nullptr;
return I;
}
+
+/// During lowering new call nodes can be created (such as memset, etc.).
+/// Those will become new roots of the current DAG, but complications arise
+/// when they are tail calls. In such cases, the call lowering will update
+/// the root, but the builder still needs to know that a tail call has been
+/// lowered in order to avoid generating an additional return.
+void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
+ // If the node is null, we do have a tail call.
+ if (MaybeTC.getNode() != nullptr)
+ DAG.setRoot(MaybeTC);
+ else
+ HasTailCall = true;
+}
+
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 30240d8..a27f470 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -397,7 +397,6 @@ private:
StackProtectorDescriptor() : ParentMBB(nullptr), SuccessMBB(nullptr),
FailureMBB(nullptr), Guard(nullptr),
GuardReg(0) { }
- ~StackProtectorDescriptor() { }
/// Returns true if all fields of the stack protector descriptor are
/// initialized implying that we should/are ready to emit a stack protector.
@@ -823,12 +822,17 @@ private:
/// EmitFuncArgumentDbgValue - If V is an function argument then create
/// corresponding DBG_VALUE machine instruction for it now. At the end of
/// instruction selection, they will be inserted to the entry BB.
- bool EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable, MDNode *Expr,
+ bool EmitFuncArgumentDbgValue(const Value *V, MDLocalVariable *Variable,
+ MDExpression *Expr, MDLocation *DL,
int64_t Offset, bool IsIndirect,
const SDValue &N);
/// Return the next block after MBB, or nullptr if there is none.
MachineBasicBlock *NextBlock(MachineBasicBlock *MBB);
+
+ /// Update the DAG and DAG builder with the relevant information after
+ /// a new root node has been created which could be a tail call.
+ void updateDAGForMaybeTailCall(SDValue MaybeTC);
};
} // end namespace llvm
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 5898da4..636c0a7 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -520,22 +520,20 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
if (getNodeId() != -1)
OS << " [ID=" << getNodeId() << ']';
- DebugLoc dl = getDebugLoc();
- if (G && !dl.isUnknown()) {
- DIScope
- Scope(dl.getScope(G->getMachineFunction().getFunction()->getContext()));
- OS << " dbg:";
- assert((!Scope || Scope.isScope()) &&
- "Scope of a DebugLoc should be null or a DIScope.");
- // Omit the directory, since it's usually long and uninteresting.
- if (Scope)
- OS << Scope.getFilename();
- else
- OS << "<unknown>";
- OS << ':' << dl.getLine();
- if (dl.getCol() != 0)
- OS << ':' << dl.getCol();
- }
+ if (!G)
+ return;
+
+ MDLocation *L = getDebugLoc();
+ if (!L)
+ return;
+
+ if (auto *Scope = L->getScope())
+ OS << Scope->getFilename();
+ else
+ OS << "<unknown>";
+ OS << ':' << L->getLine();
+ if (unsigned C = L->getColumn())
+ OS << ':' << C;
}
static void DumpNodes(const SDNode *N, unsigned indent, const SelectionDAG *G) {
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 4d2af3f..1e116dd 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -33,6 +33,7 @@
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
@@ -500,12 +501,14 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
MachineBasicBlock::iterator InsertPos = Def;
const MDNode *Variable = MI->getDebugVariable();
const MDNode *Expr = MI->getDebugExpression();
+ DebugLoc DL = MI->getDebugLoc();
bool IsIndirect = MI->isIndirectDebugValue();
unsigned Offset = IsIndirect ? MI->getOperand(1).getImm() : 0;
+ assert(cast<MDLocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
+ "Expected inlined-at fields to agree");
// Def is never a terminator here, so it is ok to increment InsertPos.
- BuildMI(*EntryMBB, ++InsertPos, MI->getDebugLoc(),
- TII->get(TargetOpcode::DBG_VALUE), IsIndirect, LDI->second, Offset,
- Variable, Expr);
+ BuildMI(*EntryMBB, ++InsertPos, DL, TII->get(TargetOpcode::DBG_VALUE),
+ IsIndirect, LDI->second, Offset, Variable, Expr);
// If this vreg is directly copied into an exported register then
// that COPY instructions also need DBG_VALUE, if it is the only
@@ -523,9 +526,10 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
CopyUseMI = nullptr; break;
}
if (CopyUseMI) {
+ // Use MI's debug location, which describes where Variable was
+ // declared, rather than whatever is attached to CopyUseMI.
MachineInstr *NewMI =
- BuildMI(*MF, CopyUseMI->getDebugLoc(),
- TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
+ BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
CopyUseMI->getOperand(0).getReg(), Offset, Variable, Expr);
MachineBasicBlock::iterator Pos = CopyUseMI;
EntryMBB->insertAfter(Pos, NewMI);
@@ -670,8 +674,8 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
#endif
{
BlockNumber = FuncInfo->MBB->getNumber();
- BlockName = MF->getName().str() + ":" +
- FuncInfo->MBB->getBasicBlock()->getName().str();
+ BlockName =
+ (MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
}
DEBUG(dbgs() << "Initial selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
@@ -929,53 +933,74 @@ void SelectionDAGISel::PrepareEHLandingPad() {
const LandingPadInst *LPadInst = LLVMBB->getLandingPadInst();
MF->getMMI().addPersonality(
MBB, cast<Function>(LPadInst->getPersonalityFn()->stripPointerCasts()));
- if (MF->getMMI().getPersonalityType() == EHPersonality::MSVC_Win64SEH) {
- // Make virtual registers and a series of labels that fill in values for the
- // clauses.
- auto &RI = MF->getRegInfo();
- FuncInfo->ExceptionSelectorVirtReg = RI.createVirtualRegister(PtrRC);
+ EHPersonality Personality = MF->getMMI().getPersonalityType();
- // Get all invoke BBs that will unwind into the clause BBs.
+ if (isMSVCEHPersonality(Personality)) {
+ SmallVector<MachineBasicBlock *, 4> ClauseBBs;
+ const IntrinsicInst *Actions =
+ dyn_cast<IntrinsicInst>(LLVMBB->getFirstInsertionPt());
+ // Get all invoke BBs that unwind to this landingpad.
SmallVector<MachineBasicBlock *, 4> InvokeBBs(MBB->pred_begin(),
MBB->pred_end());
+ if (Actions && Actions->getIntrinsicID() == Intrinsic::eh_actions) {
+ // If this is a call to llvm.eh.actions followed by indirectbr, then we've
+ // run WinEHPrepare, and we should remove this block from the machine CFG.
+ // Mark the targets of the indirectbr as landingpads instead.
+ for (const BasicBlock *LLVMSucc : successors(LLVMBB)) {
+ MachineBasicBlock *ClauseBB = FuncInfo->MBBMap[LLVMSucc];
+ // Add the edge from the invoke to the clause.
+ for (MachineBasicBlock *InvokeBB : InvokeBBs)
+ InvokeBB->addSuccessor(ClauseBB);
+ }
+ } else {
+ // Otherwise, we haven't done the preparation, and we need to invent some
+ // clause basic blocks that branch into the landingpad.
+ // FIXME: Remove this code once SEH preparation works.
+
+ // Make virtual registers and a series of labels that fill in values for
+ // the clauses.
+ auto &RI = MF->getRegInfo();
+ FuncInfo->ExceptionSelectorVirtReg = RI.createVirtualRegister(PtrRC);
+
+ // Emit separate machine basic blocks with separate labels for each clause
+ // before the main landing pad block.
+ MachineInstrBuilder SelectorPHI = BuildMI(
+ *MBB, MBB->begin(), SDB->getCurDebugLoc(),
+ TII->get(TargetOpcode::PHI), FuncInfo->ExceptionSelectorVirtReg);
+ for (unsigned I = 0, E = LPadInst->getNumClauses(); I != E; ++I) {
+ // Skip filter clauses, we can't implement them.
+ if (LPadInst->isFilter(I))
+ continue;
- // Emit separate machine basic blocks with separate labels for each clause
- // before the main landing pad block.
- MachineInstrBuilder SelectorPHI = BuildMI(
- *MBB, MBB->begin(), SDB->getCurDebugLoc(), TII->get(TargetOpcode::PHI),
- FuncInfo->ExceptionSelectorVirtReg);
- for (unsigned I = 0, E = LPadInst->getNumClauses(); I != E; ++I) {
- // Skip filter clauses, we can't implement them yet.
- if (LPadInst->isFilter(I))
- continue;
-
- MachineBasicBlock *ClauseBB = MF->CreateMachineBasicBlock(LLVMBB);
- MF->insert(MBB, ClauseBB);
+ MachineBasicBlock *ClauseBB = MF->CreateMachineBasicBlock(LLVMBB);
+ MF->insert(MBB, ClauseBB);
- // Add the edge from the invoke to the clause.
- for (MachineBasicBlock *InvokeBB : InvokeBBs)
- InvokeBB->addSuccessor(ClauseBB);
+ // Add the edge from the invoke to the clause.
+ for (MachineBasicBlock *InvokeBB : InvokeBBs)
+ InvokeBB->addSuccessor(ClauseBB);
- // Mark the clause as a landing pad or MI passes will delete it.
- ClauseBB->setIsLandingPad();
+ // Mark the clause as a landing pad or MI passes will delete it.
+ ClauseBB->setIsLandingPad();
- GlobalValue *ClauseGV = ExtractTypeInfo(LPadInst->getClause(I));
+ GlobalValue *ClauseGV = ExtractTypeInfo(LPadInst->getClause(I));
- // Start the BB with a label.
- MCSymbol *ClauseLabel = MF->getMMI().addClauseForLandingPad(MBB);
- BuildMI(*ClauseBB, ClauseBB->begin(), SDB->getCurDebugLoc(), II)
- .addSym(ClauseLabel);
+ // Start the BB with a label.
+ MCSymbol *ClauseLabel = MF->getMMI().addClauseForLandingPad(MBB);
+ BuildMI(*ClauseBB, ClauseBB->begin(), SDB->getCurDebugLoc(), II)
+ .addSym(ClauseLabel);
- // Construct a simple BB that defines a register with the typeid constant.
- FuncInfo->MBB = ClauseBB;
- FuncInfo->InsertPt = ClauseBB->end();
- unsigned VReg = SDB->visitLandingPadClauseBB(ClauseGV, MBB);
- CurDAG->setRoot(SDB->getRoot());
- SDB->clear();
- CodeGenAndEmitDAG();
+ // Construct a simple BB that defines a register with the typeid
+ // constant.
+ FuncInfo->MBB = ClauseBB;
+ FuncInfo->InsertPt = ClauseBB->end();
+ unsigned VReg = SDB->visitLandingPadClauseBB(ClauseGV, MBB);
+ CurDAG->setRoot(SDB->getRoot());
+ SDB->clear();
+ CodeGenAndEmitDAG();
- // Add the typeid virtual register to the phi in the main landing pad.
- SelectorPHI.addReg(VReg).addMBB(ClauseBB);
+ // Add the typeid virtual register to the phi in the main landing pad.
+ SelectorPHI.addReg(VReg).addMBB(ClauseBB);
+ }
}
// Remove the edge from the invoke to the lpad.
@@ -986,6 +1011,12 @@ void SelectionDAGISel::PrepareEHLandingPad() {
// pad block.
FuncInfo->MBB = MBB;
FuncInfo->InsertPt = MBB->end();
+
+ // Transfer EH state number assigned to the IR block to the MBB.
+ if (Personality == EHPersonality::MSVC_CXX) {
+ WinEHFuncInfo &FI = MF->getMMI().getWinEHFuncInfo(MF->getFunction());
+ MF->getMMI().addWinEHState(MBB, FI.LandingPadStateMap[LPadInst]);
+ }
return;
}
@@ -1165,7 +1196,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Setup an EH landing-pad block.
FuncInfo->ExceptionPointerVirtReg = 0;
FuncInfo->ExceptionSelectorVirtReg = 0;
- if (FuncInfo->MBB->isLandingPad())
+ if (LLVMBB->isLandingPad())
PrepareEHLandingPad();
// Before doing SelectionDAG ISel, see if FastISel has been requested.
@@ -2552,7 +2583,7 @@ public:
SelectionDAG::DAGUpdateListener(DAG),
RecordedNodes(RN), MatchScopes(MS) { }
- void NodeDeleted(SDNode *N, SDNode *E) {
+ void NodeDeleted(SDNode *N, SDNode *E) override {
// Some early-returns here to avoid the search if we deleted the node or
// if the update comes from MorphNodeTo (MorphNodeTo is the last thing we
// do, so it's unnecessary to update matching state at that point).
diff --git a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index 3cc7a98..a9ffa72 100644
--- a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -397,10 +397,11 @@ static void lowerIncomingStatepointValue(SDValue Incoming,
Builder.DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
Ops.push_back(Builder.DAG.getTargetConstant(C->getSExtValue(), MVT::i64));
} else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Incoming)) {
- // This handles allocas as arguments to the statepoint
- const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
- Ops.push_back(
- Builder.DAG.getTargetFrameIndex(FI->getIndex(), TLI.getPointerTy()));
+ // This handles allocas as arguments to the statepoint (this is only
+ // really meaningful for a deopt value. For GC, we'd be trying to
+ // relocate the address of the alloca itself?)
+ Ops.push_back(Builder.DAG.getTargetFrameIndex(FI->getIndex(),
+ Incoming.getValueType()));
} else {
// Otherwise, locate a spill slot and explicitly spill it so it
// can be found by the runtime later. We currently do not support
@@ -441,27 +442,25 @@ static void lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
// heap. This is basically just here to help catch errors during statepoint
// insertion. TODO: This should actually be in the Verifier, but we can't get
// to the GCStrategy from there (yet).
- if (Builder.GFI) {
- GCStrategy &S = Builder.GFI->getStrategy();
- for (const Value *V : Bases) {
- auto Opt = S.isGCManagedPointer(V);
- if (Opt.hasValue()) {
- assert(Opt.getValue() &&
- "non gc managed base pointer found in statepoint");
- }
+ GCStrategy &S = Builder.GFI->getStrategy();
+ for (const Value *V : Bases) {
+ auto Opt = S.isGCManagedPointer(V);
+ if (Opt.hasValue()) {
+ assert(Opt.getValue() &&
+ "non gc managed base pointer found in statepoint");
}
- for (const Value *V : Ptrs) {
- auto Opt = S.isGCManagedPointer(V);
- if (Opt.hasValue()) {
- assert(Opt.getValue() &&
- "non gc managed derived pointer found in statepoint");
- }
+ }
+ for (const Value *V : Ptrs) {
+ auto Opt = S.isGCManagedPointer(V);
+ if (Opt.hasValue()) {
+ assert(Opt.getValue() &&
+ "non gc managed derived pointer found in statepoint");
}
- for (const Value *V : Relocations) {
- auto Opt = S.isGCManagedPointer(V);
- if (Opt.hasValue()) {
- assert(Opt.getValue() && "non gc managed pointer relocated");
- }
+ }
+ for (const Value *V : Relocations) {
+ auto Opt = S.isGCManagedPointer(V);
+ if (Opt.hasValue()) {
+ assert(Opt.getValue() && "non gc managed pointer relocated");
}
}
#endif
@@ -523,6 +522,21 @@ static void lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
SDValue Incoming = Builder.getValue(V);
lowerIncomingStatepointValue(Incoming, Ops, Builder);
}
+
+ // If there are any explicit spill slots passed to the statepoint, record
+ // them, but otherwise do not do anything special. These are user provided
+ // allocas and give control over placement to the consumer. In this case,
+ // it is the contents of the slot which may get updated, not the pointer to
+ // the alloca
+ for (Value *V : StatepointSite.gc_args()) {
+ SDValue Incoming = Builder.getValue(V);
+ if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Incoming)) {
+ // This handles allocas as arguments to the statepoint
+ Ops.push_back(Builder.DAG.getTargetFrameIndex(FI->getIndex(),
+ Incoming.getValueType()));
+
+ }
+ }
}
void SelectionDAGBuilder::visitStatepoint(const CallInst &CI) {
@@ -565,10 +579,8 @@ SelectionDAGBuilder::LowerStatepoint(ImmutableStatepoint ISP,
// TODO: This if should become an assert. For now, we allow the GCStrategy
// to be optional for backwards compatibility. This will only last a short
// period (i.e. a couple of weeks).
- if (GFI) {
- assert(GFI->getStrategy().useStatepoints() &&
- "GCStrategy does not expect to encounter statepoints");
- }
+ assert(GFI->getStrategy().useStatepoints() &&
+ "GCStrategy does not expect to encounter statepoints");
#endif
// Lower statepoint vmstate and gcstate arguments
@@ -614,7 +626,7 @@ SelectionDAGBuilder::LowerStatepoint(ImmutableStatepoint ISP,
// Add a leading constant argument with the Flags and the calling convention
// masked together
CallingConv::ID CallConv = CS.getCallingConv();
- int Flags = dyn_cast<ConstantInt>(CS.getArgument(2))->getZExtValue();
+ int Flags = cast<ConstantInt>(CS.getArgument(2))->getZExtValue();
assert(Flags == 0 && "not expected to be used");
Ops.push_back(DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
Ops.push_back(
diff --git a/lib/CodeGen/ShadowStackGCLowering.cpp b/lib/CodeGen/ShadowStackGCLowering.cpp
index 66a6a3c..7c0b2bb 100644
--- a/lib/CodeGen/ShadowStackGCLowering.cpp
+++ b/lib/CodeGen/ShadowStackGCLowering.cpp
@@ -239,7 +239,7 @@ Constant *ShadowStackGCLowering::GetFrameMap(Function &F) {
Constant *GEPIndices[2] = {
ConstantInt::get(Type::getInt32Ty(F.getContext()), 0),
ConstantInt::get(Type::getInt32Ty(F.getContext()), 0)};
- return ConstantExpr::getGetElementPtr(GV, GEPIndices);
+ return ConstantExpr::getGetElementPtr(FrameMap->getType(), GV, GEPIndices);
}
Type *ShadowStackGCLowering::GetConcreteStackEntryType(Function &F) {
@@ -249,7 +249,7 @@ Type *ShadowStackGCLowering::GetConcreteStackEntryType(Function &F) {
for (size_t I = 0; I != Roots.size(); I++)
EltTys.push_back(Roots[I].second->getAllocatedType());
- return StructType::create(EltTys, "gc_stackentry." + F.getName().str());
+ return StructType::create(EltTys, ("gc_stackentry." + F.getName()).str());
}
/// doInitialization - If this module uses the GC intrinsics, find them now. If
diff --git a/lib/CodeGen/SjLjEHPrepare.cpp b/lib/CodeGen/SjLjEHPrepare.cpp
index 2335a88..0635173 100644
--- a/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/lib/CodeGen/SjLjEHPrepare.cpp
@@ -46,6 +46,8 @@ STATISTIC(NumSpilled, "Number of registers live across unwind edges");
namespace {
class SjLjEHPrepare : public FunctionPass {
const TargetMachine *TM;
+ Type *doubleUnderDataTy;
+ Type *doubleUnderJBufTy;
Type *FunctionContextTy;
Constant *RegisterFn;
Constant *UnregisterFn;
@@ -93,12 +95,14 @@ bool SjLjEHPrepare::doInitialization(Module &M) {
// builtin_setjmp uses a five word jbuf
Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext());
Type *Int32Ty = Type::getInt32Ty(M.getContext());
- FunctionContextTy = StructType::get(VoidPtrTy, // __prev
- Int32Ty, // call_site
- ArrayType::get(Int32Ty, 4), // __data
- VoidPtrTy, // __personality
- VoidPtrTy, // __lsda
- ArrayType::get(VoidPtrTy, 5), // __jbuf
+ doubleUnderDataTy = ArrayType::get(Int32Ty, 4);
+ doubleUnderJBufTy = ArrayType::get(VoidPtrTy, 5);
+ FunctionContextTy = StructType::get(VoidPtrTy, // __prev
+ Int32Ty, // call_site
+ doubleUnderDataTy, // __data
+ VoidPtrTy, // __personality
+ VoidPtrTy, // __lsda
+ doubleUnderJBufTy, // __jbuf
nullptr);
RegisterFn = M.getOrInsertFunction(
"_Unwind_SjLj_Register", Type::getVoidTy(M.getContext()),
@@ -204,16 +208,17 @@ Value *SjLjEHPrepare::setupFunctionContext(Function &F,
IRBuilder<> Builder(LPI->getParent()->getFirstInsertionPt());
// Reference the __data field.
- Value *FCData = Builder.CreateConstGEP2_32(FuncCtx, 0, 2, "__data");
+ Value *FCData =
+ Builder.CreateConstGEP2_32(FunctionContextTy, FuncCtx, 0, 2, "__data");
// The exception values come back in context->__data[0].
- Value *ExceptionAddr =
- Builder.CreateConstGEP2_32(FCData, 0, 0, "exception_gep");
+ Value *ExceptionAddr = Builder.CreateConstGEP2_32(doubleUnderDataTy, FCData,
+ 0, 0, "exception_gep");
Value *ExnVal = Builder.CreateLoad(ExceptionAddr, true, "exn_val");
ExnVal = Builder.CreateIntToPtr(ExnVal, Builder.getInt8PtrTy());
- Value *SelectorAddr =
- Builder.CreateConstGEP2_32(FCData, 0, 1, "exn_selector_gep");
+ Value *SelectorAddr = Builder.CreateConstGEP2_32(doubleUnderDataTy, FCData,
+ 0, 1, "exn_selector_gep");
Value *SelVal = Builder.CreateLoad(SelectorAddr, true, "exn_selector_val");
substituteLPadValues(LPI, ExnVal, SelVal);
@@ -223,15 +228,16 @@ Value *SjLjEHPrepare::setupFunctionContext(Function &F,
IRBuilder<> Builder(EntryBB->getTerminator());
if (!PersonalityFn)
PersonalityFn = LPads[0]->getPersonalityFn();
- Value *PersonalityFieldPtr =
- Builder.CreateConstGEP2_32(FuncCtx, 0, 3, "pers_fn_gep");
+ Value *PersonalityFieldPtr = Builder.CreateConstGEP2_32(
+ FunctionContextTy, FuncCtx, 0, 3, "pers_fn_gep");
Builder.CreateStore(
Builder.CreateBitCast(PersonalityFn, Builder.getInt8PtrTy()),
PersonalityFieldPtr, /*isVolatile=*/true);
// LSDA address
Value *LSDA = Builder.CreateCall(LSDAAddrFn, "lsda_addr");
- Value *LSDAFieldPtr = Builder.CreateConstGEP2_32(FuncCtx, 0, 4, "lsda_gep");
+ Value *LSDAFieldPtr =
+ Builder.CreateConstGEP2_32(FunctionContextTy, FuncCtx, 0, 4, "lsda_gep");
Builder.CreateStore(LSDA, LSDAFieldPtr, /*isVolatile=*/true);
return FuncCtx;
@@ -400,16 +406,19 @@ bool SjLjEHPrepare::setupEntryBlockAndCallSites(Function &F) {
IRBuilder<> Builder(EntryBB->getTerminator());
// Get a reference to the jump buffer.
- Value *JBufPtr = Builder.CreateConstGEP2_32(FuncCtx, 0, 5, "jbuf_gep");
+ Value *JBufPtr =
+ Builder.CreateConstGEP2_32(FunctionContextTy, FuncCtx, 0, 5, "jbuf_gep");
// Save the frame pointer.
- Value *FramePtr = Builder.CreateConstGEP2_32(JBufPtr, 0, 0, "jbuf_fp_gep");
+ Value *FramePtr = Builder.CreateConstGEP2_32(doubleUnderJBufTy, JBufPtr, 0, 0,
+ "jbuf_fp_gep");
Value *Val = Builder.CreateCall(FrameAddrFn, Builder.getInt32(0), "fp");
Builder.CreateStore(Val, FramePtr, /*isVolatile=*/true);
// Save the stack pointer.
- Value *StackPtr = Builder.CreateConstGEP2_32(JBufPtr, 0, 2, "jbuf_sp_gep");
+ Value *StackPtr = Builder.CreateConstGEP2_32(doubleUnderJBufTy, JBufPtr, 0, 2,
+ "jbuf_sp_gep");
Val = Builder.CreateCall(StackAddrFn, "sp");
Builder.CreateStore(Val, StackPtr, /*isVolatile=*/true);
diff --git a/lib/CodeGen/SpillPlacement.h b/lib/CodeGen/SpillPlacement.h
index 622361e..03dd58d 100644
--- a/lib/CodeGen/SpillPlacement.h
+++ b/lib/CodeGen/SpillPlacement.h
@@ -70,7 +70,7 @@ public:
static char ID; // Pass identification, replacement for typeid.
SpillPlacement() : MachineFunctionPass(ID), nodes(nullptr) {}
- ~SpillPlacement() { releaseMemory(); }
+ ~SpillPlacement() override { releaseMemory(); }
/// BorderConstraint - A basic block has separate constraints for entry and
/// exit.
diff --git a/lib/CodeGen/StackColoring.cpp b/lib/CodeGen/StackColoring.cpp
index 7572803..2bf2d64 100644
--- a/lib/CodeGen/StackColoring.cpp
+++ b/lib/CodeGen/StackColoring.cpp
@@ -464,7 +464,7 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
continue;
if (SlotRemap.count(VI.Slot)) {
DEBUG(dbgs() << "Remapping debug info for ["
- << DIVariable(VI.Var).getName() << "].\n");
+ << cast<MDLocalVariable>(VI.Var)->getName() << "].\n");
VI.Slot = SlotRemap[VI.Slot];
FixedDbg++;
}
diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp
index 58a6d52..2162a51 100644
--- a/lib/CodeGen/TargetLoweringBase.cpp
+++ b/lib/CodeGen/TargetLoweringBase.cpp
@@ -1246,20 +1246,13 @@ void TargetLoweringBase::computeRegisterProperties(
ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
}
- // Decide how to handle f32. If the target does not have native support for
- // f32, promote it to f64 if it is legal. Otherwise, expand it to i32.
+ // Decide how to handle f32. If the target does not have native f32 support,
+ // expand it to i32 and we will be generating soft float library calls.
if (!isTypeLegal(MVT::f32)) {
- if (isTypeLegal(MVT::f64)) {
- NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64];
- RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64];
- TransformToType[MVT::f32] = MVT::f64;
- ValueTypeActions.setTypeAction(MVT::f32, TypePromoteInteger);
- } else {
- NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
- RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
- TransformToType[MVT::f32] = MVT::i32;
- ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
- }
+ NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
+ RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
+ TransformToType[MVT::f32] = MVT::i32;
+ ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
}
if (!isTypeLegal(MVT::f16)) {
diff --git a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index bcf2aa7..5b795e4 100644
--- a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -245,9 +245,11 @@ static StringRef getSectionPrefixForGlobal(SectionKind Kind) {
return ".data.rel.ro";
}
-static const MCSectionELF *selectELFSectionForGlobal(
- MCContext &Ctx, const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
- const TargetMachine &TM, bool EmitUniqueSection, unsigned Flags) {
+static const MCSectionELF *
+selectELFSectionForGlobal(MCContext &Ctx, const GlobalValue *GV,
+ SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM, bool EmitUniqueSection,
+ unsigned Flags, unsigned *NextUniqueID) {
unsigned EntrySize = 0;
if (Kind.isMergeableCString()) {
if (Kind.isMergeable2ByteCString()) {
@@ -297,9 +299,13 @@ static const MCSectionELF *selectELFSectionForGlobal(
Name.push_back('.');
TM.getNameWithPrefix(Name, GV, Mang, true);
}
+ unsigned UniqueID = ~0;
+ if (EmitUniqueSection && !UniqueSectionNames) {
+ UniqueID = *NextUniqueID;
+ (*NextUniqueID)++;
+ }
return Ctx.getELFSection(Name, getELFSectionType(Name, Kind), Flags,
- EntrySize, Group,
- EmitUniqueSection && !UniqueSectionNames);
+ EntrySize, Group, UniqueID);
}
const MCSection *TargetLoweringObjectFileELF::SelectSectionForGlobal(
@@ -319,7 +325,7 @@ const MCSection *TargetLoweringObjectFileELF::SelectSectionForGlobal(
EmitUniqueSection |= GV->hasComdat();
return selectELFSectionForGlobal(getContext(), GV, Kind, Mang, TM,
- EmitUniqueSection, Flags);
+ EmitUniqueSection, Flags, &NextUniqueID);
}
const MCSection *TargetLoweringObjectFileELF::getSectionForJumpTable(
@@ -332,7 +338,8 @@ const MCSection *TargetLoweringObjectFileELF::getSectionForJumpTable(
return ReadOnlySection;
return selectELFSectionForGlobal(getContext(), &F, SectionKind::getReadOnly(),
- Mang, TM, EmitUniqueSection, ELF::SHF_ALLOC);
+ Mang, TM, EmitUniqueSection, ELF::SHF_ALLOC,
+ &NextUniqueID);
}
bool TargetLoweringObjectFileELF::shouldPutJumpTableInFunctionSection(
diff --git a/lib/CodeGen/WinEHPrepare.cpp b/lib/CodeGen/WinEHPrepare.cpp
index ab0f96e..35b944e 100644
--- a/lib/CodeGen/WinEHPrepare.cpp
+++ b/lib/CodeGen/WinEHPrepare.cpp
@@ -20,6 +20,8 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Analysis/LibCallSemantics.h"
+#include "llvm/CodeGen/WinEHFuncInfo.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
@@ -33,6 +35,7 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include <memory>
using namespace llvm;
@@ -49,14 +52,15 @@ namespace {
// frame allocation structure.
typedef MapVector<Value *, TinyPtrVector<AllocaInst *>> FrameVarInfoMap;
-typedef SmallSet<BasicBlock *, 4> VisitedBlockSet;
+// TinyPtrVector cannot hold nullptr, so we need our own sentinel that isn't
+// quite null.
+AllocaInst *getCatchObjectSentinel() {
+ return static_cast<AllocaInst *>(nullptr) + 1;
+}
-enum ActionType { Catch, Cleanup };
+typedef SmallSet<BasicBlock *, 4> VisitedBlockSet;
class LandingPadActions;
-class ActionHandler;
-class CatchHandler;
-class CleanupHandler;
class LandingPadMap;
typedef DenseMap<const BasicBlock *, CatchHandler *> CatchHandlerMapTy;
@@ -66,7 +70,7 @@ class WinEHPrepare : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid.
WinEHPrepare(const TargetMachine *TM = nullptr)
- : FunctionPass(ID) {}
+ : FunctionPass(ID), DT(nullptr) {}
bool runOnFunction(Function &Fn) override;
@@ -81,31 +85,62 @@ public:
private:
bool prepareExceptionHandlers(Function &F,
SmallVectorImpl<LandingPadInst *> &LPads);
+ void promoteLandingPadValues(LandingPadInst *LPad);
+ void completeNestedLandingPad(Function *ParentFn,
+ LandingPadInst *OutlinedLPad,
+ const LandingPadInst *OriginalLPad,
+ FrameVarInfoMap &VarInfo);
bool outlineHandler(ActionHandler *Action, Function *SrcFn,
LandingPadInst *LPad, BasicBlock *StartBB,
FrameVarInfoMap &VarInfo);
+ void addStubInvokeToHandlerIfNeeded(Function *Handler, Value *PersonalityFn);
void mapLandingPadBlocks(LandingPadInst *LPad, LandingPadActions &Actions);
CatchHandler *findCatchHandler(BasicBlock *BB, BasicBlock *&NextBB,
VisitedBlockSet &VisitedBlocks);
- CleanupHandler *findCleanupHandler(BasicBlock *StartBB, BasicBlock *EndBB);
+ void findCleanupHandlers(LandingPadActions &Actions, BasicBlock *StartBB,
+ BasicBlock *EndBB);
void processSEHCatchHandler(CatchHandler *Handler, BasicBlock *StartBB);
// All fields are reset by runOnFunction.
+ DominatorTree *DT;
EHPersonality Personality;
CatchHandlerMapTy CatchHandlerMap;
CleanupHandlerMapTy CleanupHandlerMap;
- DenseMap<const LandingPadInst *, LandingPadMap> LPadMaps;
+ DenseMap<const LandingPadInst *, LandingPadMap> LPadMaps;
+
+ // This maps landing pad instructions found in outlined handlers to
+ // the landing pad instruction in the parent function from which they
+ // were cloned. The cloned/nested landing pad is used as the key
+ // because the landing pad may be cloned into multiple handlers.
+ // This map will be used to add the llvm.eh.actions call to the nested
+ // landing pads after all handlers have been outlined.
+ DenseMap<LandingPadInst *, const LandingPadInst *> NestedLPtoOriginalLP;
+
+ // This maps blocks in the parent function which are destinations of
+ // catch handlers to cloned blocks in (other) outlined handlers. This
+ // handles the case where a nested landing pads has a catch handler that
+ // returns to a handler function rather than the parent function.
+ // The original block is used as the key here because there should only
+ // ever be one handler function from which the cloned block is not pruned.
+ // The original block will be pruned from the parent function after all
+ // handlers have been outlined. This map will be used to adjust the
+ // return instructions of handlers which return to the block that was
+ // outlined into a handler. This is done after all handlers have been
+ // outlined but before the outlined code is pruned from the parent function.
+ DenseMap<const BasicBlock *, BasicBlock *> LPadTargetBlocks;
};
class WinEHFrameVariableMaterializer : public ValueMaterializer {
public:
WinEHFrameVariableMaterializer(Function *OutlinedFn,
FrameVarInfoMap &FrameVarInfo);
- ~WinEHFrameVariableMaterializer() {}
+ ~WinEHFrameVariableMaterializer() override {}
+
+ Value *materializeValueFor(Value *V) override;
- virtual Value *materializeValueFor(Value *V) override;
+ void escapeCatchObject(Value *V);
private:
FrameVarInfoMap &FrameVarInfo;
@@ -119,42 +154,23 @@ public:
bool isInitialized() { return OriginLPad != nullptr; }
- bool mapIfEHPtrLoad(const LoadInst *Load) {
- return mapIfEHLoad(Load, EHPtrStores, EHPtrStoreAddrs);
- }
- bool mapIfSelectorLoad(const LoadInst *Load) {
- return mapIfEHLoad(Load, SelectorStores, SelectorStoreAddrs);
- }
-
+ bool isOriginLandingPadBlock(const BasicBlock *BB) const;
bool isLandingPadSpecificInst(const Instruction *Inst) const;
- void remapSelector(ValueToValueMapTy &VMap, Value *MappedValue) const;
+ void remapEHValues(ValueToValueMapTy &VMap, Value *EHPtrValue,
+ Value *SelectorValue) const;
private:
- bool mapIfEHLoad(const LoadInst *Load,
- SmallVectorImpl<const StoreInst *> &Stores,
- SmallVectorImpl<const Value *> &StoreAddrs);
-
const LandingPadInst *OriginLPad;
// We will normally only see one of each of these instructions, but
// if more than one occurs for some reason we can handle that.
TinyPtrVector<const ExtractValueInst *> ExtractedEHPtrs;
TinyPtrVector<const ExtractValueInst *> ExtractedSelectors;
-
- // In optimized code, there will typically be at most one instance of
- // each of the following, but in unoptimized IR it is not uncommon
- // for the values to be stored, loaded and then stored again. In that
- // case we will create a second entry for each store and store address.
- SmallVector<const StoreInst *, 2> EHPtrStores;
- SmallVector<const StoreInst *, 2> SelectorStores;
- SmallVector<const Value *, 2> EHPtrStoreAddrs;
- SmallVector<const Value *, 2> SelectorStoreAddrs;
};
class WinEHCloningDirectorBase : public CloningDirector {
public:
- WinEHCloningDirectorBase(Function *HandlerFn,
- FrameVarInfoMap &VarInfo,
+ WinEHCloningDirectorBase(Function *HandlerFn, FrameVarInfoMap &VarInfo,
LandingPadMap &LPadMap)
: Materializer(HandlerFn, VarInfo),
SelectorIDType(Type::getInt32Ty(HandlerFn->getContext())),
@@ -180,6 +196,9 @@ public:
virtual CloningAction handleResume(ValueToValueMapTy &VMap,
const ResumeInst *Resume,
BasicBlock *NewBB) = 0;
+ virtual CloningAction handleLandingPad(ValueToValueMapTy &VMap,
+ const LandingPadInst *LPad,
+ BasicBlock *NewBB) = 0;
ValueMaterializer *getValueMaterializer() override { return &Materializer; }
@@ -192,11 +211,13 @@ protected:
class WinEHCatchDirector : public WinEHCloningDirectorBase {
public:
- WinEHCatchDirector(Function *CatchFn, Value *Selector,
- FrameVarInfoMap &VarInfo, LandingPadMap &LPadMap)
+ WinEHCatchDirector(
+ Function *CatchFn, Value *Selector, FrameVarInfoMap &VarInfo,
+ LandingPadMap &LPadMap,
+ DenseMap<LandingPadInst *, const LandingPadInst *> &NestedLPads)
: WinEHCloningDirectorBase(CatchFn, VarInfo, LPadMap),
CurrentSelector(Selector->stripPointerCasts()),
- ExceptionObjectVar(nullptr) {}
+ ExceptionObjectVar(nullptr), NestedLPtoOriginalLP(NestedLPads) {}
CloningAction handleBeginCatch(ValueToValueMapTy &VMap,
const Instruction *Inst,
@@ -210,21 +231,28 @@ public:
BasicBlock *NewBB) override;
CloningAction handleResume(ValueToValueMapTy &VMap, const ResumeInst *Resume,
BasicBlock *NewBB) override;
+ CloningAction handleLandingPad(ValueToValueMapTy &VMap,
+ const LandingPadInst *LPad,
+ BasicBlock *NewBB) override;
- const Value *getExceptionVar() { return ExceptionObjectVar; }
+ Value *getExceptionVar() { return ExceptionObjectVar; }
TinyPtrVector<BasicBlock *> &getReturnTargets() { return ReturnTargets; }
private:
Value *CurrentSelector;
- const Value *ExceptionObjectVar;
+ Value *ExceptionObjectVar;
TinyPtrVector<BasicBlock *> ReturnTargets;
+
+ // This will be a reference to the field of the same name in the WinEHPrepare
+ // object which instantiates this WinEHCatchDirector object.
+ DenseMap<LandingPadInst *, const LandingPadInst *> &NestedLPtoOriginalLP;
};
class WinEHCleanupDirector : public WinEHCloningDirectorBase {
public:
- WinEHCleanupDirector(Function *CleanupFn,
- FrameVarInfoMap &VarInfo, LandingPadMap &LPadMap)
+ WinEHCleanupDirector(Function *CleanupFn, FrameVarInfoMap &VarInfo,
+ LandingPadMap &LPadMap)
: WinEHCloningDirectorBase(CleanupFn, VarInfo, LPadMap) {}
CloningAction handleBeginCatch(ValueToValueMapTy &VMap,
@@ -239,66 +267,9 @@ public:
BasicBlock *NewBB) override;
CloningAction handleResume(ValueToValueMapTy &VMap, const ResumeInst *Resume,
BasicBlock *NewBB) override;
-};
-
-class ActionHandler {
-public:
- ActionHandler(BasicBlock *BB, ActionType Type)
- : StartBB(BB), Type(Type), HandlerBlockOrFunc(nullptr) {}
-
- ActionType getType() const { return Type; }
- BasicBlock *getStartBlock() const { return StartBB; }
-
- bool hasBeenProcessed() { return HandlerBlockOrFunc != nullptr; }
-
- void setHandlerBlockOrFunc(Constant *F) { HandlerBlockOrFunc = F; }
- Constant *getHandlerBlockOrFunc() { return HandlerBlockOrFunc; }
-
-private:
- BasicBlock *StartBB;
- ActionType Type;
-
- // Can be either a BlockAddress or a Function depending on the EH personality.
- Constant *HandlerBlockOrFunc;
-};
-
-class CatchHandler : public ActionHandler {
-public:
- CatchHandler(BasicBlock *BB, Constant *Selector, BasicBlock *NextBB)
- : ActionHandler(BB, ActionType::Catch), Selector(Selector),
- NextBB(NextBB), ExceptionObjectVar(nullptr) {}
-
- // Method for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ActionHandler *H) {
- return H->getType() == ActionType::Catch;
- }
-
- Constant *getSelector() const { return Selector; }
- BasicBlock *getNextBB() const { return NextBB; }
-
- const Value *getExceptionVar() { return ExceptionObjectVar; }
- TinyPtrVector<BasicBlock *> &getReturnTargets() { return ReturnTargets; }
-
- void setExceptionVar(const Value *Val) { ExceptionObjectVar = Val; }
- void setReturnTargets(TinyPtrVector<BasicBlock *> &Targets) {
- ReturnTargets = Targets;
- }
-
-private:
- Constant *Selector;
- BasicBlock *NextBB;
- const Value *ExceptionObjectVar;
- TinyPtrVector<BasicBlock *> ReturnTargets;
-};
-
-class CleanupHandler : public ActionHandler {
-public:
- CleanupHandler(BasicBlock *BB) : ActionHandler(BB, ActionType::Cleanup) {}
-
- // Method for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ActionHandler *H) {
- return H->getType() == ActionType::Cleanup;
- }
+ CloningAction handleLandingPad(ValueToValueMapTy &VMap,
+ const LandingPadInst *LPad,
+ BasicBlock *NewBB) override;
};
class LandingPadActions {
@@ -313,6 +284,7 @@ public:
bool includesCleanup() const { return HasCleanupHandlers; }
+ SmallVectorImpl<ActionHandler *> &actions() { return Actions; }
SmallVectorImpl<ActionHandler *>::iterator begin() { return Actions.begin(); }
SmallVectorImpl<ActionHandler *>::iterator end() { return Actions.end(); }
@@ -336,8 +308,8 @@ FunctionPass *llvm::createWinEHPass(const TargetMachine *TM) {
// FIXME: Remove this once the backend can handle the prepared IR.
static cl::opt<bool>
-SEHPrepare("sehprepare", cl::Hidden,
- cl::desc("Prepare functions with SEH personalities"));
+ SEHPrepare("sehprepare", cl::Hidden,
+ cl::desc("Prepare functions with SEH personalities"));
bool WinEHPrepare::runOnFunction(Function &Fn) {
SmallVector<LandingPadInst *, 4> LPads;
@@ -360,6 +332,8 @@ bool WinEHPrepare::runOnFunction(Function &Fn) {
if (!isMSVCEHPersonality(Personality))
return false;
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+
if (isAsynchronousEHPersonality(Personality) && !SEHPrepare) {
// Replace all resume instructions with unreachable.
// FIXME: Remove this once the backend can handle the prepared IR.
@@ -375,11 +349,11 @@ bool WinEHPrepare::runOnFunction(Function &Fn) {
return true;
}
-bool WinEHPrepare::doFinalization(Module &M) {
- return false;
-}
+bool WinEHPrepare::doFinalization(Module &M) { return false; }
-void WinEHPrepare::getAnalysisUsage(AnalysisUsage &AU) const {}
+void WinEHPrepare::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<DominatorTreeWrapperPass>();
+}
bool WinEHPrepare::prepareExceptionHandlers(
Function &F, SmallVectorImpl<LandingPadInst *> &LPads) {
@@ -422,9 +396,14 @@ bool WinEHPrepare::prepareExceptionHandlers(
if (LPadHasActionList)
continue;
+ // If either of the values in the aggregate returned by the landing pad is
+ // extracted and stored to memory, promote the stored value to a register.
+ promoteLandingPadValues(LPad);
+
LandingPadActions Actions;
mapLandingPadBlocks(LPad, Actions);
+ HandlersOutlined |= !Actions.actions().empty();
for (ActionHandler *Action : Actions) {
if (Action->hasBeenProcessed())
continue;
@@ -436,24 +415,17 @@ bool WinEHPrepare::prepareExceptionHandlers(
if (isAsynchronousEHPersonality(Personality)) {
if (auto *CatchAction = dyn_cast<CatchHandler>(Action)) {
processSEHCatchHandler(CatchAction, StartBB);
- HandlersOutlined = true;
continue;
}
}
- if (outlineHandler(Action, &F, LPad, StartBB, FrameVarInfo)) {
- HandlersOutlined = true;
- }
- } // End for each Action
-
- // FIXME: We need a guard against partially outlined functions.
- if (!HandlersOutlined)
- continue;
+ outlineHandler(Action, &F, LPad, StartBB, FrameVarInfo);
+ }
// Replace the landing pad with a new llvm.eh.action based landing pad.
BasicBlock *NewLPadBB = BasicBlock::Create(Context, "lpad", &F, LPadBB);
assert(!isa<PHINode>(LPadBB->begin()));
- Instruction *NewLPad = LPad->clone();
+ auto *NewLPad = cast<LandingPadInst>(LPad->clone());
NewLPadBB->getInstList().push_back(NewLPad);
while (!pred_empty(LPadBB)) {
auto *pred = *pred_begin(LPadBB);
@@ -461,6 +433,19 @@ bool WinEHPrepare::prepareExceptionHandlers(
Invoke->setUnwindDest(NewLPadBB);
}
+ // If anyone is still using the old landingpad value, just give them undef
+ // instead. The eh pointer and selector values are not real.
+ LPad->replaceAllUsesWith(UndefValue::get(LPad->getType()));
+
+ // Replace the mapping of any nested landing pad that previously mapped
+ // to this landing pad with a referenced to the cloned version.
+ for (auto &LPadPair : NestedLPtoOriginalLP) {
+ const LandingPadInst *OriginalLPad = LPadPair.second;
+ if (OriginalLPad == LPad) {
+ LPadPair.second = NewLPad;
+ }
+ }
+
// Replace uses of the old lpad in phis with this block and delete the old
// block.
LPadBB->replaceSuccessorsPhiUsesWith(NewLPadBB);
@@ -474,11 +459,17 @@ bool WinEHPrepare::prepareExceptionHandlers(
if (auto *CatchAction = dyn_cast<CatchHandler>(Action)) {
ActionArgs.push_back(ConstantInt::get(Int32Type, 1));
ActionArgs.push_back(CatchAction->getSelector());
+ // Find the frame escape index of the exception object alloca in the
+ // parent.
+ int FrameEscapeIdx = -1;
Value *EHObj = const_cast<Value *>(CatchAction->getExceptionVar());
- if (EHObj)
- ActionArgs.push_back(EHObj);
- else
- ActionArgs.push_back(ConstantPointerNull::get(Int8PtrType));
+ if (EHObj && !isa<ConstantPointerNull>(EHObj)) {
+ auto I = FrameVarInfo.find(EHObj);
+ assert(I != FrameVarInfo.end() &&
+ "failed to map llvm.eh.begincatch var");
+ FrameEscapeIdx = std::distance(FrameVarInfo.begin(), I);
+ }
+ ActionArgs.push_back(ConstantInt::get(Int32Type, FrameEscapeIdx));
} else {
ActionArgs.push_back(ConstantInt::get(Int32Type, 0));
}
@@ -502,6 +493,15 @@ bool WinEHPrepare::prepareExceptionHandlers(
if (!HandlersOutlined)
return false;
+ // Replace any nested landing pad stubs with the correct action handler.
+ // This must be done before we remove unreachable blocks because it
+ // cleans up references to outlined blocks that will be deleted.
+ for (auto &LPadPair : NestedLPtoOriginalLP)
+ completeNestedLandingPad(&F, LPadPair.first, LPadPair.second, FrameVarInfo);
+ NestedLPtoOriginalLP.clear();
+
+ F.addFnAttr("wineh-parent", F.getName());
+
// Delete any blocks that were only used by handlers that were outlined above.
removeUnreachableBlocks(F);
@@ -554,26 +554,26 @@ bool WinEHPrepare::prepareExceptionHandlers(
++InsertPt;
ParentAlloca =
new AllocaInst(ParentInst->getType(), nullptr,
- ParentInst->getName() + ".reg2mem", InsertPt);
+ ParentInst->getName() + ".reg2mem",
+ AllocaInsertPt);
new StoreInst(ParentInst, ParentAlloca, InsertPt);
} else {
- ParentAlloca = DemoteRegToStack(*ParentInst, true, ParentInst);
+ ParentAlloca = DemoteRegToStack(*ParentInst, true, AllocaInsertPt);
}
}
}
- // If the parent alloca is no longer used and only one of the handlers used
- // it, erase the parent and leave the copy in the outlined handler.
- if (ParentAlloca->getNumUses() == 0 && Allocas.size() == 1) {
- ParentAlloca->eraseFromParent();
- continue;
- }
+ // FIXME: We should try to sink unescaped allocas from the parent frame into
+ // the child frame. If the alloca is escaped, we have to use the lifetime
+ // markers to ensure that the alloca is only live within the child frame.
// Add this alloca to the list of things to escape.
AllocasToEscape.push_back(ParentAlloca);
// Next replace all outlined allocas that are mapped to it.
for (AllocaInst *TempAlloca : Allocas) {
+ if (TempAlloca == getCatchObjectSentinel())
+ continue; // Skip catch parameter sentinels.
Function *HandlerFn = TempAlloca->getParent()->getParent();
// FIXME: Sink this GEP into the blocks where it is used.
Builder.SetInsertPoint(TempAlloca);
@@ -601,19 +601,6 @@ bool WinEHPrepare::prepareExceptionHandlers(
Builder.SetInsertPoint(&F.getEntryBlock().back());
Builder.CreateCall(FrameEscapeFn, AllocasToEscape);
- // Insert an alloca for the EH state in the entry block. On x86, we will also
- // insert stores to update the EH state, but on other ISAs, the runtime does
- // it for us.
- // FIXME: This record is different on x86.
- Type *UnwindHelpTy = Type::getInt64Ty(Context);
- AllocaInst *UnwindHelp =
- new AllocaInst(UnwindHelpTy, "unwindhelp", &F.getEntryBlock().front());
- Builder.CreateStore(llvm::ConstantInt::get(UnwindHelpTy, -2), UnwindHelp);
- Function *UnwindHelpFn =
- Intrinsic::getDeclaration(M, Intrinsic::eh_unwindhelp);
- Builder.CreateCall(UnwindHelpFn,
- Builder.CreateBitCast(UnwindHelp, Int8PtrType));
-
// Clean up the handler action maps we created for this function
DeleteContainerSeconds(CatchHandlerMap);
CatchHandlerMap.clear();
@@ -623,6 +610,125 @@ bool WinEHPrepare::prepareExceptionHandlers(
return HandlersOutlined;
}
+void WinEHPrepare::promoteLandingPadValues(LandingPadInst *LPad) {
+ // If the return values of the landing pad instruction are extracted and
+ // stored to memory, we want to promote the store locations to reg values.
+ SmallVector<AllocaInst *, 2> EHAllocas;
+
+ // The landingpad instruction returns an aggregate value. Typically, its
+ // value will be passed to a pair of extract value instructions and the
+ // results of those extracts are often passed to store instructions.
+ // In unoptimized code the stored value will often be loaded and then stored
+ // again.
+ for (auto *U : LPad->users()) {
+ ExtractValueInst *Extract = dyn_cast<ExtractValueInst>(U);
+ if (!Extract)
+ continue;
+
+ for (auto *EU : Extract->users()) {
+ if (auto *Store = dyn_cast<StoreInst>(EU)) {
+ auto *AV = cast<AllocaInst>(Store->getPointerOperand());
+ EHAllocas.push_back(AV);
+ }
+ }
+ }
+
+ // We can't do this without a dominator tree.
+ assert(DT);
+
+ if (!EHAllocas.empty()) {
+ PromoteMemToReg(EHAllocas, *DT);
+ EHAllocas.clear();
+ }
+
+ // After promotion, some extracts may be trivially dead. Remove them.
+ SmallVector<Value *, 4> Users(LPad->user_begin(), LPad->user_end());
+ for (auto *U : Users)
+ RecursivelyDeleteTriviallyDeadInstructions(U);
+}
+
+void WinEHPrepare::completeNestedLandingPad(Function *ParentFn,
+ LandingPadInst *OutlinedLPad,
+ const LandingPadInst *OriginalLPad,
+ FrameVarInfoMap &FrameVarInfo) {
+ // Get the nested block and erase the unreachable instruction that was
+ // temporarily inserted as its terminator.
+ LLVMContext &Context = ParentFn->getContext();
+ BasicBlock *OutlinedBB = OutlinedLPad->getParent();
+ assert(isa<UnreachableInst>(OutlinedBB->getTerminator()));
+ OutlinedBB->getTerminator()->eraseFromParent();
+ // That should leave OutlinedLPad as the last instruction in its block.
+ assert(&OutlinedBB->back() == OutlinedLPad);
+
+ // The original landing pad will have already had its action intrinsic
+ // built by the outlining loop. We need to clone that into the outlined
+ // location. It may also be necessary to add references to the exception
+ // variables to the outlined handler in which this landing pad is nested
+ // and remap return instructions in the nested handlers that should return
+ // to an address in the outlined handler.
+ Function *OutlinedHandlerFn = OutlinedBB->getParent();
+ BasicBlock::const_iterator II = OriginalLPad;
+ ++II;
+ // The instruction after the landing pad should now be a call to eh.actions.
+ const Instruction *Recover = II;
+ assert(match(Recover, m_Intrinsic<Intrinsic::eh_actions>()));
+ IntrinsicInst *EHActions = cast<IntrinsicInst>(Recover->clone());
+
+ // Remap the exception variables into the outlined function.
+ WinEHFrameVariableMaterializer Materializer(OutlinedHandlerFn, FrameVarInfo);
+ SmallVector<BlockAddress *, 4> ActionTargets;
+ SmallVector<ActionHandler *, 4> ActionList;
+ parseEHActions(EHActions, ActionList);
+ for (auto *Action : ActionList) {
+ auto *Catch = dyn_cast<CatchHandler>(Action);
+ if (!Catch)
+ continue;
+ // The dyn_cast to function here selects C++ catch handlers and skips
+ // SEH catch handlers.
+ auto *Handler = dyn_cast<Function>(Catch->getHandlerBlockOrFunc());
+ if (!Handler)
+ continue;
+ // Visit all the return instructions, looking for places that return
+ // to a location within OutlinedHandlerFn.
+ for (BasicBlock &NestedHandlerBB : *Handler) {
+ auto *Ret = dyn_cast<ReturnInst>(NestedHandlerBB.getTerminator());
+ if (!Ret)
+ continue;
+
+ // Handler functions must always return a block address.
+ BlockAddress *BA = cast<BlockAddress>(Ret->getReturnValue());
+ // The original target will have been in the main parent function,
+ // but if it is the address of a block that has been outlined, it
+ // should be a block that was outlined into OutlinedHandlerFn.
+ assert(BA->getFunction() == ParentFn);
+
+ // Ignore targets that aren't part of OutlinedHandlerFn.
+ if (!LPadTargetBlocks.count(BA->getBasicBlock()))
+ continue;
+
+ // If the return value is the address ofF a block that we
+ // previously outlined into the parent handler function, replace
+ // the return instruction and add the mapped target to the list
+ // of possible return addresses.
+ BasicBlock *MappedBB = LPadTargetBlocks[BA->getBasicBlock()];
+ assert(MappedBB->getParent() == OutlinedHandlerFn);
+ BlockAddress *NewBA = BlockAddress::get(OutlinedHandlerFn, MappedBB);
+ Ret->eraseFromParent();
+ ReturnInst::Create(Context, NewBA, &NestedHandlerBB);
+ ActionTargets.push_back(NewBA);
+ }
+ }
+ DeleteContainerPointers(ActionList);
+ ActionList.clear();
+ OutlinedBB->getInstList().push_back(EHActions);
+
+ // Insert an indirect branch into the outlined landing pad BB.
+ IndirectBrInst *IBr = IndirectBrInst::Create(EHActions, 0, OutlinedBB);
+ // Add the previously collected action targets.
+ for (auto *Target : ActionTargets)
+ IBr->addDestination(Target->getBasicBlock());
+}
+
// This function examines a block to determine whether the block ends with a
// conditional branch to a catch handler based on a selector comparison.
// This function is used both by the WinEHPrepare::findSelectorComparison() and
@@ -657,6 +763,59 @@ static bool isSelectorDispatch(BasicBlock *BB, BasicBlock *&CatchHandler,
return false;
}
+static BasicBlock *createStubLandingPad(Function *Handler,
+ Value *PersonalityFn) {
+ // FIXME: Finish this!
+ LLVMContext &Context = Handler->getContext();
+ BasicBlock *StubBB = BasicBlock::Create(Context, "stub");
+ Handler->getBasicBlockList().push_back(StubBB);
+ IRBuilder<> Builder(StubBB);
+ LandingPadInst *LPad = Builder.CreateLandingPad(
+ llvm::StructType::get(Type::getInt8PtrTy(Context),
+ Type::getInt32Ty(Context), nullptr),
+ PersonalityFn, 0);
+ LPad->setCleanup(true);
+ Builder.CreateUnreachable();
+ return StubBB;
+}
+
+// Cycles through the blocks in an outlined handler function looking for an
+// invoke instruction and inserts an invoke of llvm.donothing with an empty
+// landing pad if none is found. The code that generates the .xdata tables for
+// the handler needs at least one landing pad to identify the parent function's
+// personality.
+void WinEHPrepare::addStubInvokeToHandlerIfNeeded(Function *Handler,
+ Value *PersonalityFn) {
+ ReturnInst *Ret = nullptr;
+ for (BasicBlock &BB : *Handler) {
+ TerminatorInst *Terminator = BB.getTerminator();
+ // If we find an invoke, there is nothing to be done.
+ auto *II = dyn_cast<InvokeInst>(Terminator);
+ if (II)
+ return;
+ // If we've already recorded a return instruction, keep looking for invokes.
+ if (Ret)
+ continue;
+ // If we haven't recorded a return instruction yet, try this terminator.
+ Ret = dyn_cast<ReturnInst>(Terminator);
+ }
+
+ // If we got this far, the handler contains no invokes. We should have seen
+ // at least one return. We'll insert an invoke of llvm.donothing ahead of
+ // that return.
+ assert(Ret);
+ BasicBlock *OldRetBB = Ret->getParent();
+ BasicBlock *NewRetBB = SplitBlock(OldRetBB, Ret);
+ // SplitBlock adds an unconditional branch instruction at the end of the
+ // parent block. We want to replace that with an invoke call, so we can
+ // erase it now.
+ OldRetBB->getTerminator()->eraseFromParent();
+ BasicBlock *StubLandingPad = createStubLandingPad(Handler, PersonalityFn);
+ Function *F =
+ Intrinsic::getDeclaration(Handler->getParent(), Intrinsic::donothing);
+ InvokeInst::Create(F, NewRetBB, StubLandingPad, None, "", OldRetBB);
+}
+
bool WinEHPrepare::outlineHandler(ActionHandler *Action, Function *SrcFn,
LandingPadInst *LPad, BasicBlock *StartBB,
FrameVarInfoMap &VarInfo) {
@@ -680,6 +839,8 @@ bool WinEHPrepare::outlineHandler(ActionHandler *Action, Function *SrcFn,
SrcFn->getName() + ".cleanup", M);
}
+ Handler->addFnAttr("wineh-parent", SrcFn->getName());
+
// Generate a standard prolog to setup the frame recovery structure.
IRBuilder<> Builder(Context);
BasicBlock *Entry = BasicBlock::Create(Context, "entry");
@@ -696,10 +857,14 @@ bool WinEHPrepare::outlineHandler(ActionHandler *Action, Function *SrcFn,
LPadMap.mapLandingPad(LPad);
if (auto *CatchAction = dyn_cast<CatchHandler>(Action)) {
Constant *Sel = CatchAction->getSelector();
- Director.reset(new WinEHCatchDirector(Handler, Sel, VarInfo, LPadMap));
- LPadMap.remapSelector(VMap, ConstantInt::get(Type::getInt32Ty(Context), 1));
+ Director.reset(new WinEHCatchDirector(Handler, Sel, VarInfo, LPadMap,
+ NestedLPtoOriginalLP));
+ LPadMap.remapEHValues(VMap, UndefValue::get(Int8PtrType),
+ ConstantInt::get(Type::getInt32Ty(Context), 1));
} else {
Director.reset(new WinEHCleanupDirector(Handler, VarInfo, LPadMap));
+ LPadMap.remapEHValues(VMap, UndefValue::get(Int8PtrType),
+ UndefValue::get(Type::getInt32Ty(Context)));
}
SmallVector<ReturnInst *, 8> Returns;
@@ -735,12 +900,45 @@ bool WinEHPrepare::outlineHandler(ActionHandler *Action, Function *SrcFn,
Entry->getInstList().splice(Entry->end(), FirstClonedBB->getInstList());
FirstClonedBB->eraseFromParent();
+ // Make sure we can identify the handler's personality later.
+ addStubInvokeToHandlerIfNeeded(Handler, LPad->getPersonalityFn());
+
if (auto *CatchAction = dyn_cast<CatchHandler>(Action)) {
WinEHCatchDirector *CatchDirector =
reinterpret_cast<WinEHCatchDirector *>(Director.get());
CatchAction->setExceptionVar(CatchDirector->getExceptionVar());
CatchAction->setReturnTargets(CatchDirector->getReturnTargets());
- }
+
+ // Look for blocks that are not part of the landing pad that we just
+ // outlined but terminate with a call to llvm.eh.endcatch and a
+ // branch to a block that is in the handler we just outlined.
+ // These blocks will be part of a nested landing pad that intends to
+ // return to an address in this handler. This case is best handled
+ // after both landing pads have been outlined, so for now we'll just
+ // save the association of the blocks in LPadTargetBlocks. The
+ // return instructions which are created from these branches will be
+ // replaced after all landing pads have been outlined.
+ for (const auto MapEntry : VMap) {
+ // VMap maps all values and blocks that were just cloned, but dead
+ // blocks which were pruned will map to nullptr.
+ if (!isa<BasicBlock>(MapEntry.first) || MapEntry.second == nullptr)
+ continue;
+ const BasicBlock *MappedBB = cast<BasicBlock>(MapEntry.first);
+ for (auto *Pred : predecessors(const_cast<BasicBlock *>(MappedBB))) {
+ auto *Branch = dyn_cast<BranchInst>(Pred->getTerminator());
+ if (!Branch || !Branch->isUnconditional() || Pred->size() <= 1)
+ continue;
+ BasicBlock::iterator II = const_cast<BranchInst *>(Branch);
+ --II;
+ if (match(cast<Value>(II), m_Intrinsic<Intrinsic::eh_endcatch>())) {
+ // This would indicate that a nested landing pad wants to return
+ // to a block that is outlined into two different handlers.
+ assert(!LPadTargetBlocks.count(MappedBB));
+ LPadTargetBlocks[MappedBB] = cast<BasicBlock>(MapEntry.second);
+ }
+ }
+ }
+ } // End if (CatchAction)
Action->setHandlerBlockOrFunc(Handler);
@@ -787,9 +985,8 @@ void LandingPadMap::mapLandingPad(const LandingPadInst *LPad) {
// The landingpad instruction returns an aggregate value. Typically, its
// value will be passed to a pair of extract value instructions and the
- // results of those extracts are often passed to store instructions.
- // In unoptimized code the stored value will often be loaded and then stored
- // again.
+ // results of those extracts will have been promoted to reg values before
+ // this routine is called.
for (auto *U : LPad->users()) {
const ExtractValueInst *Extract = dyn_cast<ExtractValueInst>(U);
if (!Extract)
@@ -800,36 +997,17 @@ void LandingPadMap::mapLandingPad(const LandingPadInst *LPad) {
assert((Idx == 0 || Idx == 1) &&
"Unexpected operation: extracting an unknown landing pad element");
if (Idx == 0) {
- // Element 0 doesn't directly corresponds to anything in the WinEH
- // scheme.
- // It will be stored to a memory location, then later loaded and finally
- // the loaded value will be used as the argument to an
- // llvm.eh.begincatch
- // call. We're tracking it here so that we can skip the store and load.
ExtractedEHPtrs.push_back(Extract);
} else if (Idx == 1) {
- // Element 1 corresponds to the filter selector. We'll map it to 1 for
- // matching purposes, but it will also probably be stored to memory and
- // reloaded, so we need to track the instuction so that we can map the
- // loaded value too.
ExtractedSelectors.push_back(Extract);
}
-
- // Look for stores of the extracted values.
- for (auto *EU : Extract->users()) {
- if (auto *Store = dyn_cast<StoreInst>(EU)) {
- if (Idx == 1) {
- SelectorStores.push_back(Store);
- SelectorStoreAddrs.push_back(Store->getPointerOperand());
- } else {
- EHPtrStores.push_back(Store);
- EHPtrStoreAddrs.push_back(Store->getPointerOperand());
- }
- }
- }
}
}
+bool LandingPadMap::isOriginLandingPadBlock(const BasicBlock *BB) const {
+ return BB->getLandingPadInst() == OriginLPad;
+}
+
bool LandingPadMap::isLandingPadSpecificInst(const Instruction *Inst) const {
if (Inst == OriginLPad)
return true;
@@ -841,47 +1019,16 @@ bool LandingPadMap::isLandingPadSpecificInst(const Instruction *Inst) const {
if (Inst == Extract)
return true;
}
- for (auto *Store : EHPtrStores) {
- if (Inst == Store)
- return true;
- }
- for (auto *Store : SelectorStores) {
- if (Inst == Store)
- return true;
- }
-
return false;
}
-void LandingPadMap::remapSelector(ValueToValueMapTy &VMap,
- Value *MappedValue) const {
- // Remap all selector extract instructions to the specified value.
+void LandingPadMap::remapEHValues(ValueToValueMapTy &VMap, Value *EHPtrValue,
+ Value *SelectorValue) const {
+ // Remap all landing pad extract instructions to the specified values.
+ for (auto *Extract : ExtractedEHPtrs)
+ VMap[Extract] = EHPtrValue;
for (auto *Extract : ExtractedSelectors)
- VMap[Extract] = MappedValue;
-}
-
-bool LandingPadMap::mapIfEHLoad(const LoadInst *Load,
- SmallVectorImpl<const StoreInst *> &Stores,
- SmallVectorImpl<const Value *> &StoreAddrs) {
- // This makes the assumption that a store we've previously seen dominates
- // this load instruction. That might seem like a rather huge assumption,
- // but given the way that landingpads are constructed its fairly safe.
- // FIXME: Add debug/assert code that verifies this.
- const Value *LoadAddr = Load->getPointerOperand();
- for (auto *StoreAddr : StoreAddrs) {
- if (LoadAddr == StoreAddr) {
- // Handle the common debug scenario where this loaded value is stored
- // to a different location.
- for (auto *U : Load->users()) {
- if (auto *Store = dyn_cast<StoreInst>(U)) {
- Stores.push_back(Store);
- StoreAddrs.push_back(Store->getPointerOperand());
- }
- }
- return true;
- }
- }
- return false;
+ VMap[Extract] = SelectorValue;
}
CloningDirector::CloningAction WinEHCloningDirectorBase::handleInstruction(
@@ -891,40 +1038,13 @@ CloningDirector::CloningAction WinEHCloningDirectorBase::handleInstruction(
if (LPadMap.isLandingPadSpecificInst(Inst))
return CloningDirector::SkipInstruction;
- if (auto *Load = dyn_cast<LoadInst>(Inst)) {
- // Look for loads of (previously suppressed) landingpad values.
- // The EHPtr load can be mapped to an undef value as it should only be used
- // as an argument to llvm.eh.begincatch, but the selector value needs to be
- // mapped to a constant value of 1. This value will be used to simplify the
- // branching to always flow to the current handler.
- if (LPadMap.mapIfSelectorLoad(Load)) {
- VMap[Inst] = ConstantInt::get(SelectorIDType, 1);
- return CloningDirector::SkipInstruction;
- }
- if (LPadMap.mapIfEHPtrLoad(Load)) {
- VMap[Inst] = UndefValue::get(Int8PtrType);
- return CloningDirector::SkipInstruction;
- }
-
- // Any other loads just get cloned.
- return CloningDirector::CloneInstruction;
- }
-
// Nested landing pads will be cloned as stubs, with just the
// landingpad instruction and an unreachable instruction. When
// all landingpads have been outlined, we'll replace this with the
// llvm.eh.actions call and indirect branch created when the
// landing pad was outlined.
- if (auto *NestedLPad = dyn_cast<LandingPadInst>(Inst)) {
- Instruction *NewInst = NestedLPad->clone();
- if (NestedLPad->hasName())
- NewInst->setName(NestedLPad->getName());
- // FIXME: Store this mapping somewhere else also.
- VMap[NestedLPad] = NewInst;
- BasicBlock::InstListType &InstList = NewBB->getInstList();
- InstList.push_back(NewInst);
- InstList.push_back(new UnreachableInst(NewBB->getContext()));
- return CloningDirector::StopCloningBB;
+ if (auto *LPad = dyn_cast<LandingPadInst>(Inst)) {
+ return handleLandingPad(VMap, LPad, NewBB);
}
if (auto *Invoke = dyn_cast<InvokeInst>(Inst))
@@ -944,6 +1064,20 @@ CloningDirector::CloningAction WinEHCloningDirectorBase::handleInstruction(
return CloningDirector::CloneInstruction;
}
+CloningDirector::CloningAction WinEHCatchDirector::handleLandingPad(
+ ValueToValueMapTy &VMap, const LandingPadInst *LPad, BasicBlock *NewBB) {
+ Instruction *NewInst = LPad->clone();
+ if (LPad->hasName())
+ NewInst->setName(LPad->getName());
+ // Save this correlation for later processing.
+ NestedLPtoOriginalLP[cast<LandingPadInst>(NewInst)] = LPad;
+ VMap[LPad] = NewInst;
+ BasicBlock::InstListType &InstList = NewBB->getInstList();
+ InstList.push_back(NewInst);
+ InstList.push_back(new UnreachableInst(NewBB->getContext()));
+ return CloningDirector::StopCloningBB;
+}
+
CloningDirector::CloningAction WinEHCatchDirector::handleBeginCatch(
ValueToValueMapTy &VMap, const Instruction *Inst, BasicBlock *NewBB) {
// The argument to the call is some form of the first element of the
@@ -958,6 +1092,11 @@ CloningDirector::CloningAction WinEHCatchDirector::handleBeginCatch(
"llvm.eh.begincatch found while "
"outlining catch handler.");
ExceptionObjectVar = Inst->getOperand(1)->stripPointerCasts();
+ if (isa<ConstantPointerNull>(ExceptionObjectVar))
+ return CloningDirector::SkipInstruction;
+ assert(cast<AllocaInst>(ExceptionObjectVar)->isStaticAlloca() &&
+ "catch parameter is not static alloca");
+ Materializer.escapeCatchObject(ExceptionObjectVar);
return CloningDirector::SkipInstruction;
}
@@ -971,27 +1110,32 @@ WinEHCatchDirector::handleEndCatch(ValueToValueMapTy &VMap,
// The end catch call can occur in one of two places: either in a
// landingpad block that is part of the catch handlers exception mechanism,
- // or at the end of the catch block. If it occurs in a landing pad, we must
- // skip it and continue so that the landing pad gets cloned.
- // FIXME: This case isn't fully supported yet and shouldn't turn up in any
- // of the test cases until it is.
- if (IntrinCall->getParent()->isLandingPad())
+ // or at the end of the catch block. However, a catch-all handler may call
+ // end catch from the original landing pad. If the call occurs in a nested
+ // landing pad block, we must skip it and continue so that the landing pad
+ // gets cloned.
+ auto *ParentBB = IntrinCall->getParent();
+ if (ParentBB->isLandingPad() && !LPadMap.isOriginLandingPadBlock(ParentBB))
return CloningDirector::SkipInstruction;
- // If an end catch occurs anywhere else the next instruction should be an
- // unconditional branch instruction that we want to replace with a return
- // to the the address of the branch target.
- const BasicBlock *EndCatchBB = IntrinCall->getParent();
- const TerminatorInst *Terminator = EndCatchBB->getTerminator();
- const BranchInst *Branch = dyn_cast<BranchInst>(Terminator);
- assert(Branch && Branch->isUnconditional());
- assert(std::next(BasicBlock::const_iterator(IntrinCall)) ==
- BasicBlock::const_iterator(Branch));
-
- BasicBlock *ContinueLabel = Branch->getSuccessor(0);
- ReturnInst::Create(NewBB->getContext(), BlockAddress::get(ContinueLabel),
- NewBB);
- ReturnTargets.push_back(ContinueLabel);
+ // If an end catch occurs anywhere else we want to terminate the handler
+ // with a return to the code that follows the endcatch call. If the
+ // next instruction is not an unconditional branch, we need to split the
+ // block to provide a clear target for the return instruction.
+ BasicBlock *ContinueBB;
+ auto Next = std::next(BasicBlock::const_iterator(IntrinCall));
+ const BranchInst *Branch = dyn_cast<BranchInst>(Next);
+ if (!Branch || !Branch->isUnconditional()) {
+ // We're interrupting the cloning process at this location, so the
+ // const_cast we're doing here will not cause a problem.
+ ContinueBB = SplitBlock(const_cast<BasicBlock *>(ParentBB),
+ const_cast<Instruction *>(cast<Instruction>(Next)));
+ } else {
+ ContinueBB = Branch->getSuccessor(0);
+ }
+
+ ReturnInst::Create(NewBB->getContext(), BlockAddress::get(ContinueBB), NewBB);
+ ReturnTargets.push_back(ContinueBB);
// We just added a terminator to the cloned block.
// Tell the caller to stop processing the current basic block so that
@@ -1029,6 +1173,20 @@ WinEHCatchDirector::handleResume(ValueToValueMapTy &VMap,
return CloningDirector::StopCloningBB;
}
+CloningDirector::CloningAction WinEHCleanupDirector::handleLandingPad(
+ ValueToValueMapTy &VMap, const LandingPadInst *LPad, BasicBlock *NewBB) {
+ // The MS runtime will terminate the process if an exception occurs in a
+ // cleanup handler, so we shouldn't encounter landing pads in the actual
+ // cleanup code, but they may appear in catch blocks. Depending on where
+ // we started cloning we may see one, but it will get dropped during dead
+ // block pruning.
+ Instruction *NewInst = new UnreachableInst(NewBB->getContext());
+ VMap[LPad] = NewInst;
+ BasicBlock::InstListType &InstList = NewBB->getInstList();
+ InstList.push_back(NewInst);
+ return CloningDirector::StopCloningBB;
+}
+
CloningDirector::CloningAction WinEHCleanupDirector::handleBeginCatch(
ValueToValueMapTy &VMap, const Instruction *Inst, BasicBlock *NewBB) {
// Catch blocks within cleanup handlers will always be unreachable.
@@ -1041,12 +1199,9 @@ CloningDirector::CloningAction WinEHCleanupDirector::handleBeginCatch(
CloningDirector::CloningAction WinEHCleanupDirector::handleEndCatch(
ValueToValueMapTy &VMap, const Instruction *Inst, BasicBlock *NewBB) {
- // Catch blocks within cleanup handlers will always be unreachable.
- // We'll insert an unreachable instruction now, but it will be pruned
- // before the cloning process is complete.
- BasicBlock::InstListType &InstList = NewBB->getInstList();
- InstList.push_back(new UnreachableInst(NewBB->getContext()));
- return CloningDirector::StopCloningBB;
+ // Cleanup handlers nested within catch handlers may begin with a call to
+ // eh.endcatch. We can just ignore that instruction.
+ return CloningDirector::SkipInstruction;
}
CloningDirector::CloningAction WinEHCleanupDirector::handleTypeIdFor(
@@ -1080,6 +1235,9 @@ CloningDirector::CloningAction WinEHCleanupDirector::handleInvoke(
NewCall->setDebugLoc(Invoke->getDebugLoc());
VMap[Invoke] = NewCall;
+ // Remap the operands.
+ llvm::RemapInstruction(NewCall, VMap, RF_None, nullptr, &Materializer);
+
// Insert an unconditional branch to the normal destination.
BranchInst::Create(Invoke->getNormalDest(), NewBB);
@@ -1088,7 +1246,7 @@ CloningDirector::CloningAction WinEHCleanupDirector::handleInvoke(
// We just added a terminator to the cloned block.
// Tell the caller to stop processing the current basic block.
- return CloningDirector::StopCloningBB;
+ return CloningDirector::CloneSuccessors;
}
CloningDirector::CloningAction WinEHCleanupDirector::handleResume(
@@ -1104,7 +1262,8 @@ CloningDirector::CloningAction WinEHCleanupDirector::handleResume(
WinEHFrameVariableMaterializer::WinEHFrameVariableMaterializer(
Function *OutlinedFn, FrameVarInfoMap &FrameVarInfo)
: FrameVarInfo(FrameVarInfo), Builder(OutlinedFn->getContext()) {
- Builder.SetInsertPoint(&OutlinedFn->getEntryBlock());
+ BasicBlock *EntryBB = &OutlinedFn->getEntryBlock();
+ Builder.SetInsertPoint(EntryBB, EntryBB->getFirstInsertionPt());
}
Value *WinEHFrameVariableMaterializer::materializeValueFor(Value *V) {
@@ -1139,6 +1298,15 @@ Value *WinEHFrameVariableMaterializer::materializeValueFor(Value *V) {
return nullptr;
}
+void WinEHFrameVariableMaterializer::escapeCatchObject(Value *V) {
+ // Catch parameter objects have to live in the parent frame. When we see a use
+ // of a catch parameter, add a sentinel to the multimap to indicate that it's
+ // used from another handler. This will prevent us from trying to sink the
+ // alloca into the handler and ensure that the catch parameter is present in
+ // the call to llvm.frameescape.
+ FrameVarInfo[V].push_back(getCatchObjectSentinel());
+}
+
// This function maps the catch and cleanup handlers that are reachable from the
// specified landing pad. The landing pad sequence will have this basic shape:
//
@@ -1176,13 +1344,7 @@ void WinEHPrepare::mapLandingPadBlocks(LandingPadInst *LPad,
DEBUG(dbgs() << "Mapping landing pad: " << BB->getName() << "\n");
if (NumClauses == 0) {
- // This landing pad contains only cleanup code.
- CleanupHandler *Action = new CleanupHandler(BB);
- CleanupHandlerMap[BB] = Action;
- Actions.insertCleanupHandler(Action);
- DEBUG(dbgs() << " Assuming cleanup code in block " << BB->getName()
- << "\n");
- assert(LPad->isCleanup());
+ findCleanupHandlers(Actions, BB, nullptr);
return;
}
@@ -1202,14 +1364,8 @@ void WinEHPrepare::mapLandingPadBlocks(LandingPadInst *LPad,
// exceptions but code called from catches can. For SEH, it isn't
// important if some finally code before a catch-all is executed out of
// line or after recovering from the exception.
- if (Personality == EHPersonality::MSVC_CXX) {
- if (auto *CleanupAction = findCleanupHandler(BB, BB)) {
- // Add a cleanup entry to the list
- Actions.insertCleanupHandler(CleanupAction);
- DEBUG(dbgs() << " Found cleanup code in block "
- << CleanupAction->getStartBlock()->getName() << "\n");
- }
- }
+ if (Personality == EHPersonality::MSVC_CXX)
+ findCleanupHandlers(Actions, BB, BB);
// Add the catch handler to the action list.
CatchHandler *Action =
@@ -1226,13 +1382,7 @@ void WinEHPrepare::mapLandingPadBlocks(LandingPadInst *LPad,
CatchHandler *CatchAction = findCatchHandler(BB, NextBB, VisitedBlocks);
// See if there is any interesting code executed before the dispatch.
- if (auto *CleanupAction =
- findCleanupHandler(BB, CatchAction->getStartBlock())) {
- // Add a cleanup entry to the list
- Actions.insertCleanupHandler(CleanupAction);
- DEBUG(dbgs() << " Found cleanup code in block "
- << CleanupAction->getStartBlock()->getName() << "\n");
- }
+ findCleanupHandlers(Actions, BB, CatchAction->getStartBlock());
assert(CatchAction);
++HandlersFound;
@@ -1248,12 +1398,7 @@ void WinEHPrepare::mapLandingPadBlocks(LandingPadInst *LPad,
// If we didn't wind up in a catch-all, see if there is any interesting code
// executed before the resume.
- if (auto *CleanupAction = findCleanupHandler(BB, BB)) {
- // Add a cleanup entry to the list
- Actions.insertCleanupHandler(CleanupAction);
- DEBUG(dbgs() << " Found cleanup code in block "
- << CleanupAction->getStartBlock()->getName() << "\n");
- }
+ findCleanupHandlers(Actions, BB, BB);
// It's possible that some optimization moved code into a landingpad that
// wasn't
@@ -1313,20 +1458,56 @@ CatchHandler *WinEHPrepare::findCatchHandler(BasicBlock *BB,
return nullptr;
}
-// These are helper functions to combine repeated code from findCleanupHandler.
-static CleanupHandler *createCleanupHandler(CleanupHandlerMapTy &CleanupHandlerMap,
- BasicBlock *BB) {
+// These are helper functions to combine repeated code from findCleanupHandlers.
+static void createCleanupHandler(LandingPadActions &Actions,
+ CleanupHandlerMapTy &CleanupHandlerMap,
+ BasicBlock *BB) {
CleanupHandler *Action = new CleanupHandler(BB);
CleanupHandlerMap[BB] = Action;
- return Action;
+ Actions.insertCleanupHandler(Action);
+ DEBUG(dbgs() << " Found cleanup code in block "
+ << Action->getStartBlock()->getName() << "\n");
+}
+
+static bool isFrameAddressCall(Value *V) {
+ return match(V, m_Intrinsic<Intrinsic::frameaddress>(m_SpecificInt(0)));
+}
+
+static CallSite matchOutlinedFinallyCall(BasicBlock *BB,
+ Instruction *MaybeCall) {
+ // Look for finally blocks that Clang has already outlined for us.
+ // %fp = call i8* @llvm.frameaddress(i32 0)
+ // call void @"fin$parent"(iN 1, i8* %fp)
+ if (isFrameAddressCall(MaybeCall) && MaybeCall != BB->getTerminator())
+ MaybeCall = MaybeCall->getNextNode();
+ CallSite FinallyCall(MaybeCall);
+ if (!FinallyCall || FinallyCall.arg_size() != 2)
+ return CallSite();
+ if (!match(FinallyCall.getArgument(0), m_SpecificInt(1)))
+ return CallSite();
+ if (!isFrameAddressCall(FinallyCall.getArgument(1)))
+ return CallSite();
+ return FinallyCall;
+}
+
+static BasicBlock *followSingleUnconditionalBranches(BasicBlock *BB) {
+ // Skip single ubr blocks.
+ while (BB->getFirstNonPHIOrDbg() == BB->getTerminator()) {
+ auto *Br = dyn_cast<BranchInst>(BB->getTerminator());
+ if (Br && Br->isUnconditional())
+ BB = Br->getSuccessor(0);
+ else
+ return BB;
+ }
+ return BB;
}
// This function searches starting with the input block for the next block that
// contains code that is not part of a catch handler and would not be eliminated
// during handler outlining.
//
-CleanupHandler *WinEHPrepare::findCleanupHandler(BasicBlock *StartBB,
- BasicBlock *EndBB) {
+void WinEHPrepare::findCleanupHandlers(LandingPadActions &Actions,
+ BasicBlock *StartBB, BasicBlock *EndBB) {
// Here we will skip over the following:
//
// landing pad prolog:
@@ -1343,6 +1524,7 @@ CleanupHandler *WinEHPrepare::findCleanupHandler(BasicBlock *StartBB,
// Anything other than an unconditional branch will kick us out of this loop
// one way or another.
while (BB) {
+ BB = followSingleUnconditionalBranches(BB);
// If we've already scanned this block, don't scan it again. If it is
// a cleanup block, there will be an action in the CleanupHandlerMap.
// If we've scanned it and it is not a cleanup block, there will be a
@@ -1351,7 +1533,12 @@ CleanupHandler *WinEHPrepare::findCleanupHandler(BasicBlock *StartBB,
// avoid creating a null entry for blocks we haven't scanned.
if (CleanupHandlerMap.count(BB)) {
if (auto *Action = CleanupHandlerMap[BB]) {
- return cast<CleanupHandler>(Action);
+ Actions.insertCleanupHandler(Action);
+ DEBUG(dbgs() << " Found cleanup code in block "
+ << Action->getStartBlock()->getName() << "\n");
+ // FIXME: This cleanup might chain into another, and we need to discover
+ // that.
+ return;
} else {
// Here we handle the case where the cleanup handler map contains a
// value for this block but the value is a nullptr. This means that
@@ -1363,11 +1550,9 @@ CleanupHandler *WinEHPrepare::findCleanupHandler(BasicBlock *StartBB,
// would terminate the search for cleanup code, so the unconditional
// branch is the only case for which we might need to continue
// searching.
- if (BB == EndBB)
- return nullptr;
- BasicBlock *SuccBB;
- if (!match(BB->getTerminator(), m_UnconditionalBr(SuccBB)))
- return nullptr;
+ BasicBlock *SuccBB = followSingleUnconditionalBranches(BB);
+ if (SuccBB == BB || SuccBB == EndBB)
+ return;
BB = SuccBB;
continue;
}
@@ -1390,26 +1575,23 @@ CleanupHandler *WinEHPrepare::findCleanupHandler(BasicBlock *StartBB,
}
// Look for the bare resume pattern:
- // %exn2 = load i8** %exn.slot
- // %sel2 = load i32* %ehselector.slot
- // %lpad.val1 = insertvalue { i8*, i32 } undef, i8* %exn2, 0
- // %lpad.val2 = insertvalue { i8*, i32 } %lpad.val1, i32 %sel2, 1
+ // %lpad.val1 = insertvalue { i8*, i32 } undef, i8* %exn, 0
+ // %lpad.val2 = insertvalue { i8*, i32 } %lpad.val1, i32 %sel, 1
// resume { i8*, i32 } %lpad.val2
if (auto *Resume = dyn_cast<ResumeInst>(Terminator)) {
InsertValueInst *Insert1 = nullptr;
InsertValueInst *Insert2 = nullptr;
Value *ResumeVal = Resume->getOperand(0);
- // If there is only one landingpad, we may use the lpad directly with no
- // insertions.
- if (isa<LandingPadInst>(ResumeVal))
- return nullptr;
- if (!isa<PHINode>(ResumeVal)) {
+ // If the resume value isn't a phi or landingpad value, it should be a
+ // series of insertions. Identify them so we can avoid them when scanning
+ // for cleanups.
+ if (!isa<PHINode>(ResumeVal) && !isa<LandingPadInst>(ResumeVal)) {
Insert2 = dyn_cast<InsertValueInst>(ResumeVal);
if (!Insert2)
- return createCleanupHandler(CleanupHandlerMap, BB);
+ return createCleanupHandler(Actions, CleanupHandlerMap, BB);
Insert1 = dyn_cast<InsertValueInst>(Insert2->getAggregateOperand());
if (!Insert1)
- return createCleanupHandler(CleanupHandlerMap, BB);
+ return createCleanupHandler(Actions, CleanupHandlerMap, BB);
}
for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(), IE = BB->end();
II != IE; ++II) {
@@ -1420,66 +1602,133 @@ CleanupHandler *WinEHPrepare::findCleanupHandler(BasicBlock *StartBB,
continue;
if (!Inst->hasOneUse() ||
(Inst->user_back() != Insert1 && Inst->user_back() != Insert2)) {
- return createCleanupHandler(CleanupHandlerMap, BB);
+ return createCleanupHandler(Actions, CleanupHandlerMap, BB);
}
}
- return nullptr;
+ return;
}
BranchInst *Branch = dyn_cast<BranchInst>(Terminator);
- if (Branch) {
- if (Branch->isConditional()) {
- // Look for the selector dispatch.
- // %sel = load i32* %ehselector.slot
- // %2 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIf to i8*))
- // %matches = icmp eq i32 %sel12, %2
- // br i1 %matches, label %catch14, label %eh.resume
- CmpInst *Compare = dyn_cast<CmpInst>(Branch->getCondition());
- if (!Compare || !Compare->isEquality())
- return createCleanupHandler(CleanupHandlerMap, BB);
- for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(),
- IE = BB->end();
- II != IE; ++II) {
- Instruction *Inst = II;
- if (LPadMap && LPadMap->isLandingPadSpecificInst(Inst))
- continue;
- if (Inst == Compare || Inst == Branch)
- continue;
- if (!Inst->hasOneUse() || (Inst->user_back() != Compare))
- return createCleanupHandler(CleanupHandlerMap, BB);
- if (match(Inst, m_Intrinsic<Intrinsic::eh_typeid_for>()))
- continue;
- if (!isa<LoadInst>(Inst))
- return createCleanupHandler(CleanupHandlerMap, BB);
- }
- // The selector dispatch block should always terminate our search.
- assert(BB == EndBB);
- return nullptr;
- } else {
- // Look for empty blocks with unconditional branches.
- for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(),
- IE = BB->end();
- II != IE; ++II) {
- Instruction *Inst = II;
- if (LPadMap && LPadMap->isLandingPadSpecificInst(Inst))
- continue;
- if (Inst == Branch)
- continue;
- if (match(Inst, m_Intrinsic<Intrinsic::eh_endcatch>()))
- continue;
- // Anything else makes this interesting cleanup code.
- return createCleanupHandler(CleanupHandlerMap, BB);
+ if (Branch && Branch->isConditional()) {
+ // Look for the selector dispatch.
+ // %2 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIf to i8*))
+ // %matches = icmp eq i32 %sel, %2
+ // br i1 %matches, label %catch14, label %eh.resume
+ CmpInst *Compare = dyn_cast<CmpInst>(Branch->getCondition());
+ if (!Compare || !Compare->isEquality())
+ return createCleanupHandler(Actions, CleanupHandlerMap, BB);
+ for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(), IE = BB->end();
+ II != IE; ++II) {
+ Instruction *Inst = II;
+ if (LPadMap && LPadMap->isLandingPadSpecificInst(Inst))
+ continue;
+ if (Inst == Compare || Inst == Branch)
+ continue;
+ if (match(Inst, m_Intrinsic<Intrinsic::eh_typeid_for>()))
+ continue;
+ return createCleanupHandler(Actions, CleanupHandlerMap, BB);
+ }
+ // The selector dispatch block should always terminate our search.
+ assert(BB == EndBB);
+ return;
+ }
+
+ if (isAsynchronousEHPersonality(Personality)) {
+ // If this is a landingpad block, split the block at the first non-landing
+ // pad instruction.
+ Instruction *MaybeCall = BB->getFirstNonPHIOrDbg();
+ if (LPadMap) {
+ while (MaybeCall != BB->getTerminator() &&
+ LPadMap->isLandingPadSpecificInst(MaybeCall))
+ MaybeCall = MaybeCall->getNextNode();
+ }
+
+ // Look for outlined finally calls.
+ if (CallSite FinallyCall = matchOutlinedFinallyCall(BB, MaybeCall)) {
+ Function *Fin = FinallyCall.getCalledFunction();
+ assert(Fin && "outlined finally call should be direct");
+ auto *Action = new CleanupHandler(BB);
+ Action->setHandlerBlockOrFunc(Fin);
+ Actions.insertCleanupHandler(Action);
+ CleanupHandlerMap[BB] = Action;
+ DEBUG(dbgs() << " Found frontend-outlined finally call to "
+ << Fin->getName() << " in block "
+ << Action->getStartBlock()->getName() << "\n");
+
+ // Split the block if there were more interesting instructions and look
+ // for finally calls in the normal successor block.
+ BasicBlock *SuccBB = BB;
+ if (FinallyCall.getInstruction() != BB->getTerminator() &&
+ FinallyCall.getInstruction()->getNextNode() != BB->getTerminator()) {
+ SuccBB = BB->splitBasicBlock(FinallyCall.getInstruction()->getNextNode());
+ } else {
+ if (FinallyCall.isInvoke()) {
+ SuccBB = cast<InvokeInst>(FinallyCall.getInstruction())->getNormalDest();
+ } else {
+ SuccBB = BB->getUniqueSuccessor();
+ assert(SuccBB && "splitOutlinedFinallyCalls didn't insert a branch");
+ }
}
+ BB = SuccBB;
if (BB == EndBB)
- return nullptr;
- // The branch was unconditional.
- BB = Branch->getSuccessor(0);
+ return;
+ continue;
+ }
+ }
+
+ // Anything else is either a catch block or interesting cleanup code.
+ for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(), IE = BB->end();
+ II != IE; ++II) {
+ Instruction *Inst = II;
+ if (LPadMap && LPadMap->isLandingPadSpecificInst(Inst))
+ continue;
+ // Unconditional branches fall through to this loop.
+ if (Inst == Branch)
continue;
- } // End else of if branch was conditional
- } // End if Branch
+ // If this is a catch block, there is no cleanup code to be found.
+ if (match(Inst, m_Intrinsic<Intrinsic::eh_begincatch>()))
+ return;
+ // If this a nested landing pad, it may contain an endcatch call.
+ if (match(Inst, m_Intrinsic<Intrinsic::eh_endcatch>()))
+ return;
+ // Anything else makes this interesting cleanup code.
+ return createCleanupHandler(Actions, CleanupHandlerMap, BB);
+ }
- // Anything else makes this interesting cleanup code.
- return createCleanupHandler(CleanupHandlerMap, BB);
+ // Only unconditional branches in empty blocks should get this far.
+ assert(Branch && Branch->isUnconditional());
+ if (BB == EndBB)
+ return;
+ BB = Branch->getSuccessor(0);
}
- return nullptr;
+}
+
+// This is a public function, declared in WinEHFuncInfo.h and is also
+// referenced by WinEHNumbering in FunctionLoweringInfo.cpp.
+void llvm::parseEHActions(const IntrinsicInst *II,
+ SmallVectorImpl<ActionHandler *> &Actions) {
+ for (unsigned I = 0, E = II->getNumArgOperands(); I != E;) {
+ uint64_t ActionKind =
+ cast<ConstantInt>(II->getArgOperand(I))->getZExtValue();
+ if (ActionKind == /*catch=*/1) {
+ auto *Selector = cast<Constant>(II->getArgOperand(I + 1));
+ ConstantInt *EHObjIndex = cast<ConstantInt>(II->getArgOperand(I + 2));
+ int64_t EHObjIndexVal = EHObjIndex->getSExtValue();
+ Constant *Handler = cast<Constant>(II->getArgOperand(I + 3));
+ I += 4;
+ auto *CH = new CatchHandler(/*BB=*/nullptr, Selector, /*NextBB=*/nullptr);
+ CH->setHandlerBlockOrFunc(Handler);
+ CH->setExceptionVarIndex(EHObjIndexVal);
+ Actions.push_back(CH);
+ } else if (ActionKind == 0) {
+ Constant *Handler = cast<Constant>(II->getArgOperand(I + 1));
+ I += 2;
+ auto *CH = new CleanupHandler(/*BB=*/nullptr);
+ CH->setHandlerBlockOrFunc(Handler);
+ Actions.push_back(CH);
+ } else {
+ llvm_unreachable("Expected either a catch or cleanup handler!");
+ }
+ }
+ std::reverse(Actions.begin(), Actions.end());
}