aboutsummaryrefslogtreecommitdiffstats
path: root/lib/IR/AsmWriter.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/IR/AsmWriter.cpp')
-rw-r--r--lib/IR/AsmWriter.cpp278
1 files changed, 272 insertions, 6 deletions
diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp
index a7499bc..1961a20 100644
--- a/lib/IR/AsmWriter.cpp
+++ b/lib/IR/AsmWriter.cpp
@@ -49,6 +49,213 @@ AssemblyAnnotationWriter::~AssemblyAnnotationWriter() {}
// Helper Functions
//===----------------------------------------------------------------------===//
+namespace {
+struct OrderMap {
+ DenseMap<const Value *, std::pair<unsigned, bool>> IDs;
+
+ unsigned size() const { return IDs.size(); }
+ std::pair<unsigned, bool> &operator[](const Value *V) { return IDs[V]; }
+ std::pair<unsigned, bool> lookup(const Value *V) const {
+ return IDs.lookup(V);
+ }
+ void index(const Value *V) {
+ // Explicitly sequence get-size and insert-value operations to avoid UB.
+ unsigned ID = IDs.size() + 1;
+ IDs[V].first = ID;
+ }
+};
+}
+
+static void orderValue(const Value *V, OrderMap &OM) {
+ if (OM.lookup(V).first)
+ return;
+
+ if (const Constant *C = dyn_cast<Constant>(V))
+ if (C->getNumOperands() && !isa<GlobalValue>(C))
+ for (const Value *Op : C->operands())
+ if (!isa<BasicBlock>(Op) && !isa<GlobalValue>(Op))
+ orderValue(Op, OM);
+
+ // Note: we cannot cache this lookup above, since inserting into the map
+ // changes the map's size, and thus affects the other IDs.
+ OM.index(V);
+}
+
+static OrderMap orderModule(const Module *M) {
+ // This needs to match the order used by ValueEnumerator::ValueEnumerator()
+ // and ValueEnumerator::incorporateFunction().
+ OrderMap OM;
+
+ for (const GlobalVariable &G : M->globals()) {
+ if (G.hasInitializer())
+ if (!isa<GlobalValue>(G.getInitializer()))
+ orderValue(G.getInitializer(), OM);
+ orderValue(&G, OM);
+ }
+ for (const GlobalAlias &A : M->aliases()) {
+ if (!isa<GlobalValue>(A.getAliasee()))
+ orderValue(A.getAliasee(), OM);
+ orderValue(&A, OM);
+ }
+ for (const Function &F : *M) {
+ if (F.hasPrefixData())
+ if (!isa<GlobalValue>(F.getPrefixData()))
+ orderValue(F.getPrefixData(), OM);
+ orderValue(&F, OM);
+
+ if (F.isDeclaration())
+ continue;
+
+ for (const Argument &A : F.args())
+ orderValue(&A, OM);
+ for (const BasicBlock &BB : F) {
+ orderValue(&BB, OM);
+ for (const Instruction &I : BB) {
+ for (const Value *Op : I.operands())
+ if ((isa<Constant>(*Op) && !isa<GlobalValue>(*Op)) ||
+ isa<InlineAsm>(*Op))
+ orderValue(Op, OM);
+ orderValue(&I, OM);
+ }
+ }
+ }
+ return OM;
+}
+
+static void predictValueUseListOrderImpl(const Value *V, const Function *F,
+ unsigned ID, const OrderMap &OM,
+ UseListOrderStack &Stack) {
+ // Predict use-list order for this one.
+ typedef std::pair<const Use *, unsigned> Entry;
+ SmallVector<Entry, 64> List;
+ for (const Use &U : V->uses())
+ // Check if this user will be serialized.
+ if (OM.lookup(U.getUser()).first)
+ List.push_back(std::make_pair(&U, List.size()));
+
+ if (List.size() < 2)
+ // We may have lost some users.
+ return;
+
+ bool GetsReversed =
+ !isa<GlobalVariable>(V) && !isa<Function>(V) && !isa<BasicBlock>(V);
+ if (auto *BA = dyn_cast<BlockAddress>(V))
+ ID = OM.lookup(BA->getBasicBlock()).first;
+ std::sort(List.begin(), List.end(), [&](const Entry &L, const Entry &R) {
+ const Use *LU = L.first;
+ const Use *RU = R.first;
+ if (LU == RU)
+ return false;
+
+ auto LID = OM.lookup(LU->getUser()).first;
+ auto RID = OM.lookup(RU->getUser()).first;
+
+ // If ID is 4, then expect: 7 6 5 1 2 3.
+ if (LID < RID) {
+ if (GetsReversed)
+ if (RID <= ID)
+ return true;
+ return false;
+ }
+ if (RID < LID) {
+ if (GetsReversed)
+ if (LID <= ID)
+ return false;
+ return true;
+ }
+
+ // LID and RID are equal, so we have different operands of the same user.
+ // Assume operands are added in order for all instructions.
+ if (GetsReversed)
+ if (LID <= ID)
+ return LU->getOperandNo() < RU->getOperandNo();
+ return LU->getOperandNo() > RU->getOperandNo();
+ });
+
+ if (std::is_sorted(
+ List.begin(), List.end(),
+ [](const Entry &L, const Entry &R) { return L.second < R.second; }))
+ // Order is already correct.
+ return;
+
+ // Store the shuffle.
+ Stack.emplace_back(V, F, List.size());
+ assert(List.size() == Stack.back().Shuffle.size() && "Wrong size");
+ for (size_t I = 0, E = List.size(); I != E; ++I)
+ Stack.back().Shuffle[I] = List[I].second;
+}
+
+static void predictValueUseListOrder(const Value *V, const Function *F,
+ OrderMap &OM, UseListOrderStack &Stack) {
+ auto &IDPair = OM[V];
+ assert(IDPair.first && "Unmapped value");
+ if (IDPair.second)
+ // Already predicted.
+ return;
+
+ // Do the actual prediction.
+ IDPair.second = true;
+ if (!V->use_empty() && std::next(V->use_begin()) != V->use_end())
+ predictValueUseListOrderImpl(V, F, IDPair.first, OM, Stack);
+
+ // Recursive descent into constants.
+ if (const Constant *C = dyn_cast<Constant>(V))
+ if (C->getNumOperands()) // Visit GlobalValues.
+ for (const Value *Op : C->operands())
+ if (isa<Constant>(Op)) // Visit GlobalValues.
+ predictValueUseListOrder(Op, F, OM, Stack);
+}
+
+static UseListOrderStack predictUseListOrder(const Module *M) {
+ OrderMap OM = orderModule(M);
+
+ // Use-list orders need to be serialized after all the users have been added
+ // to a value, or else the shuffles will be incomplete. Store them per
+ // function in a stack.
+ //
+ // Aside from function order, the order of values doesn't matter much here.
+ UseListOrderStack Stack;
+
+ // We want to visit the functions backward now so we can list function-local
+ // constants in the last Function they're used in. Module-level constants
+ // have already been visited above.
+ for (auto I = M->rbegin(), E = M->rend(); I != E; ++I) {
+ const Function &F = *I;
+ if (F.isDeclaration())
+ continue;
+ for (const BasicBlock &BB : F)
+ predictValueUseListOrder(&BB, &F, OM, Stack);
+ for (const Argument &A : F.args())
+ predictValueUseListOrder(&A, &F, OM, Stack);
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB)
+ for (const Value *Op : I.operands())
+ if (isa<Constant>(*Op) || isa<InlineAsm>(*Op)) // Visit GlobalValues.
+ predictValueUseListOrder(Op, &F, OM, Stack);
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB)
+ predictValueUseListOrder(&I, &F, OM, Stack);
+ }
+
+ // Visit globals last.
+ for (const GlobalVariable &G : M->globals())
+ predictValueUseListOrder(&G, nullptr, OM, Stack);
+ for (const Function &F : *M)
+ predictValueUseListOrder(&F, nullptr, OM, Stack);
+ for (const GlobalAlias &A : M->aliases())
+ predictValueUseListOrder(&A, nullptr, OM, Stack);
+ for (const GlobalVariable &G : M->globals())
+ if (G.hasInitializer())
+ predictValueUseListOrder(G.getInitializer(), nullptr, OM, Stack);
+ for (const GlobalAlias &A : M->aliases())
+ predictValueUseListOrder(A.getAliasee(), nullptr, OM, Stack);
+ for (const Function &F : *M)
+ if (F.hasPrefixData())
+ predictValueUseListOrder(F.getPrefixData(), nullptr, OM, Stack);
+
+ return Stack;
+}
+
static const Module *getModuleFromVal(const Value *V) {
if (const Argument *MA = dyn_cast<Argument>(V))
return MA->getParent() ? MA->getParent()->getParent() : nullptr;
@@ -78,6 +285,7 @@ static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
case CallingConv::X86_StdCall: Out << "x86_stdcallcc"; break;
case CallingConv::X86_FastCall: Out << "x86_fastcallcc"; break;
case CallingConv::X86_ThisCall: Out << "x86_thiscallcc"; break;
+ case CallingConv::X86_VectorCall:Out << "x86_vectorcallcc"; break;
case CallingConv::Intel_OCL_BI: Out << "intel_ocl_bicc"; break;
case CallingConv::ARM_APCS: Out << "arm_apcscc"; break;
case CallingConv::ARM_AAPCS: Out << "arm_aapcscc"; break;
@@ -347,6 +555,8 @@ public:
FunctionProcessed = false;
}
+ const Function *getFunction() const { return TheFunction; }
+
/// After calling incorporateFunction, use this method to remove the
/// most recently incorporated function from the SlotTracker. This
/// will reset the state of the machine back to just the module contents.
@@ -508,7 +718,7 @@ void SlotTracker::processFunction() {
ST_DEBUG("Inserting Instructions:\n");
- SmallVector<std::pair<unsigned, MDNode*>, 4> MDForInst;
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDForInst;
// Add all of the basic blocks and instructions with no names.
for (Function::const_iterator BB = TheFunction->begin(),
@@ -1279,6 +1489,9 @@ void AssemblyWriter::writeParamOperand(const Value *Operand,
void AssemblyWriter::printModule(const Module *M) {
Machine.initialize();
+ if (shouldPreserveAssemblyUseListOrder())
+ UseListOrders = predictUseListOrder(M);
+
if (!M->getModuleIdentifier().empty() &&
// Don't print the ID if it will start a new line (which would
// require a comment char before it).
@@ -1339,9 +1552,13 @@ void AssemblyWriter::printModule(const Module *M) {
I != E; ++I)
printAlias(I);
+ // Output global use-lists.
+ printUseLists(nullptr);
+
// Output all of the functions.
for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I)
printFunction(I);
+ assert(UseListOrders.empty() && "All use-lists should have been consumed");
// Output all attribute groups.
if (!Machine.as_empty()) {
@@ -1509,6 +1726,7 @@ void AssemblyWriter::printAlias(const GlobalAlias *GA) {
PrintLLVMName(Out, GA);
Out << " = ";
}
+ PrintLinkage(GA->getLinkage(), Out);
PrintVisibility(GA->getVisibility(), Out);
PrintDLLStorageClass(GA->getDLLStorageClass(), Out);
PrintThreadLocalModel(GA->getThreadLocalMode(), Out);
@@ -1517,8 +1735,6 @@ void AssemblyWriter::printAlias(const GlobalAlias *GA) {
Out << "alias ";
- PrintLinkage(GA->getLinkage(), Out);
-
const Constant *Aliasee = GA->getAliasee();
if (!Aliasee) {
@@ -1693,6 +1909,9 @@ void AssemblyWriter::printFunction(const Function *F) {
for (Function::const_iterator I = F->begin(), E = F->end(); I != E; ++I)
printBasicBlock(I);
+ // Output the function's use-lists.
+ printUseLists(F);
+
Out << "}\n";
}
@@ -1956,6 +2175,14 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << ", ";
writeParamOperand(CI->getArgOperand(op), PAL, op + 1);
}
+
+ // Emit an ellipsis if this is a musttail call in a vararg function. This
+ // is only to aid readability, musttail calls forward varargs by default.
+ if (CI->isMustTailCall() && CI->getParent() &&
+ CI->getParent()->getParent() &&
+ CI->getParent()->getParent()->isVarArg())
+ Out << ", ...";
+
Out << ')';
if (PAL.hasAttributes(AttributeSet::FunctionIndex))
Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttributes());
@@ -2088,7 +2315,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
}
// Print Metadata info.
- SmallVector<std::pair<unsigned, MDNode*>, 4> InstMD;
+ SmallVector<std::pair<unsigned, MDNode *>, 4> InstMD;
I.getAllMetadata(InstMD);
if (!InstMD.empty()) {
SmallVector<StringRef, 8> MDNames;
@@ -2114,7 +2341,7 @@ static void WriteMDNodeComment(const MDNode *Node,
return;
Value *Op = Node->getOperand(0);
- if (!Op || !isa<ConstantInt>(Op) || cast<ConstantInt>(Op)->getBitWidth() < 32)
+ if (!Op || !isa<MDString>(Op))
return;
DIDescriptor Desc(Node);
@@ -2170,6 +2397,45 @@ void AssemblyWriter::writeAllAttributeGroups() {
} // namespace llvm
+void AssemblyWriter::printUseListOrder(const UseListOrder &Order) {
+ bool IsInFunction = Machine.getFunction();
+ if (IsInFunction)
+ Out << " ";
+
+ Out << "uselistorder";
+ if (const BasicBlock *BB =
+ IsInFunction ? nullptr : dyn_cast<BasicBlock>(Order.V)) {
+ Out << "_bb ";
+ writeOperand(BB->getParent(), false);
+ Out << ", ";
+ writeOperand(BB, false);
+ } else {
+ Out << " ";
+ writeOperand(Order.V, true);
+ }
+ Out << ", { ";
+
+ assert(Order.Shuffle.size() >= 2 && "Shuffle too small");
+ Out << Order.Shuffle[0];
+ for (unsigned I = 1, E = Order.Shuffle.size(); I != E; ++I)
+ Out << ", " << Order.Shuffle[I];
+ Out << " }\n";
+}
+
+void AssemblyWriter::printUseLists(const Function *F) {
+ auto hasMore =
+ [&]() { return !UseListOrders.empty() && UseListOrders.back().F == F; };
+ if (!hasMore())
+ // Nothing to do.
+ return;
+
+ Out << "\n; uselistorder directives\n";
+ while (hasMore()) {
+ printUseListOrder(UseListOrders.back());
+ UseListOrders.pop_back();
+ }
+}
+
//===----------------------------------------------------------------------===//
// External Interface declarations
//===----------------------------------------------------------------------===//
@@ -2291,7 +2557,7 @@ void Value::printAsOperand(raw_ostream &O, bool PrintType, const Module *M) cons
void Value::dump() const { print(dbgs()); dbgs() << '\n'; }
// Type::dump - allow easy printing of Types from the debugger.
-void Type::dump() const { print(dbgs()); }
+void Type::dump() const { print(dbgs()); dbgs() << '\n'; }
// Module::dump() - Allow printing of Modules from the debugger.
void Module::dump() const { print(dbgs(), nullptr); }