aboutsummaryrefslogtreecommitdiffstats
path: root/lib/VMCore
diff options
context:
space:
mode:
authorLogan Chien <loganchien@google.com>2011-10-20 00:08:13 +0800
committerLogan Chien <loganchien@google.com>2011-10-20 00:09:35 +0800
commit0ebc07a576037e4e36f68bf5cece32740ca120c0 (patch)
treec2e40648043d01498ee25af839a071193561e425 /lib/VMCore
parent62383e889e0b06fd12a6b88311717cd33a1925c4 (diff)
parentcdd8e46bec4e975d00a5abea808d8eb4138515c5 (diff)
downloadexternal_llvm-0ebc07a576037e4e36f68bf5cece32740ca120c0.zip
external_llvm-0ebc07a576037e4e36f68bf5cece32740ca120c0.tar.gz
external_llvm-0ebc07a576037e4e36f68bf5cece32740ca120c0.tar.bz2
Merge with LLVM upstream 2011/10/20 (r142530)
Conflicts: lib/Support/Unix/Host.inc Change-Id: Idc00db3b63912dca6348bddd9f8a1af2a8d5d147
Diffstat (limited to 'lib/VMCore')
-rw-r--r--lib/VMCore/AsmWriter.cpp250
-rw-r--r--lib/VMCore/Attributes.cpp4
-rw-r--r--lib/VMCore/AutoUpgrade.cpp387
-rw-r--r--lib/VMCore/BasicBlock.cpp34
-rw-r--r--lib/VMCore/CMakeLists.txt5
-rw-r--r--lib/VMCore/ConstantFold.cpp50
-rw-r--r--lib/VMCore/Constants.cpp110
-rw-r--r--lib/VMCore/Core.cpp155
-rw-r--r--lib/VMCore/DebugLoc.cpp2
-rw-r--r--lib/VMCore/Function.cpp36
-rw-r--r--lib/VMCore/GCOV.cpp281
-rw-r--r--lib/VMCore/Globals.cpp2
-rw-r--r--lib/VMCore/Instruction.cpp64
-rw-r--r--lib/VMCore/Instructions.cpp506
-rw-r--r--lib/VMCore/LLVMContext.cpp2
-rw-r--r--lib/VMCore/Makefile4
-rw-r--r--lib/VMCore/Module.cpp34
-rw-r--r--lib/VMCore/PassManager.cpp61
-rw-r--r--lib/VMCore/PassRegistry.cpp1
-rw-r--r--lib/VMCore/Type.cpp50
-rw-r--r--lib/VMCore/ValueTypes.cpp6
-rw-r--r--lib/VMCore/Verifier.cpp158
22 files changed, 1788 insertions, 414 deletions
diff --git a/lib/VMCore/AsmWriter.cpp b/lib/VMCore/AsmWriter.cpp
index 1ce0447..18308f2 100644
--- a/lib/VMCore/AsmWriter.cpp
+++ b/lib/VMCore/AsmWriter.cpp
@@ -58,7 +58,7 @@ static const Module *getModuleFromVal(const Value *V) {
const Function *M = I->getParent() ? I->getParent()->getParent() : 0;
return M ? M->getParent() : 0;
}
-
+
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
return GV->getParent();
return 0;
@@ -142,18 +142,18 @@ public:
/// NamedTypes - The named types that are used by the current module.
std::vector<StructType*> NamedTypes;
-
+
/// NumberedTypes - The numbered types, along with their value.
DenseMap<StructType*, unsigned> NumberedTypes;
-
+
TypePrinting() {}
~TypePrinting() {}
-
+
void incorporateTypes(const Module &M);
-
+
void print(Type *Ty, raw_ostream &OS);
-
+
void printStructBody(StructType *Ty, raw_ostream &OS);
};
} // end anonymous namespace.
@@ -161,25 +161,25 @@ public:
void TypePrinting::incorporateTypes(const Module &M) {
M.findUsedStructTypes(NamedTypes);
-
+
// The list of struct types we got back includes all the struct types, split
// the unnamed ones out to a numbering and remove the anonymous structs.
unsigned NextNumber = 0;
-
+
std::vector<StructType*>::iterator NextToUse = NamedTypes.begin(), I, E;
for (I = NamedTypes.begin(), E = NamedTypes.end(); I != E; ++I) {
StructType *STy = *I;
-
+
// Ignore anonymous types.
- if (STy->isAnonymous())
+ if (STy->isLiteral())
continue;
-
+
if (STy->getName().empty())
NumberedTypes[STy] = NextNumber++;
else
*NextToUse++ = STy;
}
-
+
NamedTypes.erase(NextToUse, NamedTypes.end());
}
@@ -220,13 +220,13 @@ void TypePrinting::print(Type *Ty, raw_ostream &OS) {
}
case Type::StructTyID: {
StructType *STy = cast<StructType>(Ty);
-
- if (STy->isAnonymous())
+
+ if (STy->isLiteral())
return printStructBody(STy, OS);
if (!STy->getName().empty())
return PrintLLVMName(OS, STy->getName(), LocalPrefix);
-
+
DenseMap<StructType*, unsigned>::iterator I = NumberedTypes.find(STy);
if (I != NumberedTypes.end())
OS << '%' << I->second;
@@ -267,10 +267,10 @@ void TypePrinting::printStructBody(StructType *STy, raw_ostream &OS) {
OS << "opaque";
return;
}
-
+
if (STy->isPacked())
OS << '<';
-
+
if (STy->getNumElements() == 0) {
OS << "{}";
} else {
@@ -281,7 +281,7 @@ void TypePrinting::printStructBody(StructType *STy, raw_ostream &OS) {
OS << ", ";
print(*I, OS);
}
-
+
OS << " }";
}
if (STy->isPacked())
@@ -386,7 +386,8 @@ static SlotTracker *createSlotTracker(const Value *V) {
return new SlotTracker(FA->getParent());
if (const Instruction *I = dyn_cast<Instruction>(V))
- return new SlotTracker(I->getParent()->getParent());
+ if (I->getParent())
+ return new SlotTracker(I->getParent()->getParent());
if (const BasicBlock *BB = dyn_cast<BasicBlock>(V))
return new SlotTracker(BB->getParent());
@@ -419,7 +420,7 @@ static SlotTracker *createSlotTracker(const Value *V) {
// Module level constructor. Causes the contents of the Module (sans functions)
// to be added to the slot table.
SlotTracker::SlotTracker(const Module *M)
- : TheModule(M), TheFunction(0), FunctionProcessed(false),
+ : TheModule(M), TheFunction(0), FunctionProcessed(false),
mNext(0), fNext(0), mdnNext(0) {
}
@@ -490,12 +491,12 @@ void SlotTracker::processFunction() {
E = TheFunction->end(); BB != E; ++BB) {
if (!BB->hasName())
CreateFunctionSlot(BB);
-
+
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E;
++I) {
if (!I->getType()->isVoidTy() && !I->hasName())
CreateFunctionSlot(I);
-
+
// Intrinsics can directly use metadata. We allow direct calls to any
// llvm.foo function here, because the target may not be linked into the
// optimizer.
@@ -658,6 +659,23 @@ static const char *getPredicateText(unsigned predicate) {
return pred;
}
+static void writeAtomicRMWOperation(raw_ostream &Out,
+ AtomicRMWInst::BinOp Op) {
+ switch (Op) {
+ default: Out << " <unknown operation " << Op << ">"; break;
+ case AtomicRMWInst::Xchg: Out << " xchg"; break;
+ case AtomicRMWInst::Add: Out << " add"; break;
+ case AtomicRMWInst::Sub: Out << " sub"; break;
+ case AtomicRMWInst::And: Out << " and"; break;
+ case AtomicRMWInst::Nand: Out << " nand"; break;
+ case AtomicRMWInst::Or: Out << " or"; break;
+ case AtomicRMWInst::Xor: Out << " xor"; break;
+ case AtomicRMWInst::Max: Out << " max"; break;
+ case AtomicRMWInst::Min: Out << " min"; break;
+ case AtomicRMWInst::UMax: Out << " umax"; break;
+ case AtomicRMWInst::UMin: Out << " umin"; break;
+ }
+}
static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
if (const OverflowingBinaryOperator *OBO =
@@ -792,7 +810,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
Out << "zeroinitializer";
return;
}
-
+
if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV)) {
Out << "blockaddress(";
WriteAsOperandInternal(Out, BA->getFunction(), &TypePrinter, Machine,
@@ -939,13 +957,13 @@ static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node,
else {
TypePrinter->print(V->getType(), Out);
Out << ' ';
- WriteAsOperandInternal(Out, Node->getOperand(mi),
+ WriteAsOperandInternal(Out, Node->getOperand(mi),
TypePrinter, Machine, Context);
}
if (mi + 1 != me)
Out << ", ";
}
-
+
Out << "}";
}
@@ -990,7 +1008,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
WriteMDNodeBodyInternal(Out, N, TypePrinter, Machine, Context);
return;
}
-
+
if (!Machine) {
if (N->isFunctionLocal())
Machine = new SlotTracker(N->getFunction());
@@ -1020,26 +1038,35 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
char Prefix = '%';
int Slot;
+ // If we have a SlotTracker, use it.
if (Machine) {
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
Slot = Machine->getGlobalSlot(GV);
Prefix = '@';
} else {
Slot = Machine->getLocalSlot(V);
+
+ // If the local value didn't succeed, then we may be referring to a value
+ // from a different function. Translate it, as this can happen when using
+ // address of blocks.
+ if (Slot == -1)
+ if ((Machine = createSlotTracker(V))) {
+ Slot = Machine->getLocalSlot(V);
+ delete Machine;
+ }
}
- } else {
- Machine = createSlotTracker(V);
- if (Machine) {
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
- Slot = Machine->getGlobalSlot(GV);
- Prefix = '@';
- } else {
- Slot = Machine->getLocalSlot(V);
- }
- delete Machine;
+ } else if ((Machine = createSlotTracker(V))) {
+ // Otherwise, create one to get the # and then destroy it.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ Slot = Machine->getGlobalSlot(GV);
+ Prefix = '@';
} else {
- Slot = -1;
+ Slot = Machine->getLocalSlot(V);
}
+ delete Machine;
+ Machine = 0;
+ } else {
+ Slot = -1;
}
if (Slot != -1)
@@ -1081,7 +1108,7 @@ class AssemblyWriter {
const Module *TheModule;
TypePrinting TypePrinter;
AssemblyAnnotationWriter *AnnotationWriter;
-
+
public:
inline AssemblyWriter(formatted_raw_ostream &o, SlotTracker &Mac,
const Module *M,
@@ -1093,11 +1120,12 @@ public:
void printMDNodeBody(const MDNode *MD);
void printNamedMDNode(const NamedMDNode *NMD);
-
+
void printModule(const Module *M);
void writeOperand(const Value *Op, bool PrintType);
void writeParamOperand(const Value *Operand, Attributes Attrs);
+ void writeAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope);
void writeAllMDNodes();
@@ -1128,6 +1156,28 @@ void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) {
WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule);
}
+void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
+ SynchronizationScope SynchScope) {
+ if (Ordering == NotAtomic)
+ return;
+
+ switch (SynchScope) {
+ default: Out << " <bad scope " << int(SynchScope) << ">"; break;
+ case SingleThread: Out << " singlethread"; break;
+ case CrossThread: break;
+ }
+
+ switch (Ordering) {
+ default: Out << " <bad ordering " << int(Ordering) << ">"; break;
+ case Unordered: Out << " unordered"; break;
+ case Monotonic: Out << " monotonic"; break;
+ case Acquire: Out << " acquire"; break;
+ case Release: Out << " release"; break;
+ case AcquireRelease: Out << " acq_rel"; break;
+ case SequentiallyConsistent: Out << " seq_cst"; break;
+ }
+}
+
void AssemblyWriter::writeParamOperand(const Value *Operand,
Attributes Attrs) {
if (Operand == 0) {
@@ -1216,7 +1266,7 @@ void AssemblyWriter::printModule(const Module *M) {
// Output named metadata.
if (!M->named_metadata_empty()) Out << '\n';
-
+
for (Module::const_named_metadata_iterator I = M->named_metadata_begin(),
E = M->named_metadata_end(); I != E; ++I)
printNamedMDNode(I);
@@ -1357,26 +1407,8 @@ void AssemblyWriter::printAlias(const GlobalAlias *GA) {
if (Aliasee == 0) {
TypePrinter.print(GA->getType(), Out);
Out << " <<NULL ALIASEE>>";
- } else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Aliasee)) {
- TypePrinter.print(GV->getType(), Out);
- Out << ' ';
- PrintLLVMName(Out, GV);
- } else if (const Function *F = dyn_cast<Function>(Aliasee)) {
- TypePrinter.print(F->getFunctionType(), Out);
- Out << "* ";
-
- WriteAsOperandInternal(Out, F, &TypePrinter, &Machine, F->getParent());
- } else if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(Aliasee)) {
- TypePrinter.print(GA->getType(), Out);
- Out << ' ';
- PrintLLVMName(Out, GA);
} else {
- const ConstantExpr *CE = cast<ConstantExpr>(Aliasee);
- // The only valid GEP is an all zero GEP.
- assert((CE->getOpcode() == Instruction::BitCast ||
- CE->getOpcode() == Instruction::GetElementPtr) &&
- "Unsupported aliasee");
- writeOperand(CE, false);
+ writeOperand(Aliasee, !isa<ConstantExpr>(Aliasee));
}
printInfoComment(*GA);
@@ -1387,29 +1419,29 @@ void AssemblyWriter::printTypeIdentities() {
if (TypePrinter.NumberedTypes.empty() &&
TypePrinter.NamedTypes.empty())
return;
-
+
Out << '\n';
-
+
// We know all the numbers that each type is used and we know that it is a
// dense assignment. Convert the map to an index table.
std::vector<StructType*> NumberedTypes(TypePrinter.NumberedTypes.size());
- for (DenseMap<StructType*, unsigned>::iterator I =
+ for (DenseMap<StructType*, unsigned>::iterator I =
TypePrinter.NumberedTypes.begin(), E = TypePrinter.NumberedTypes.end();
I != E; ++I) {
assert(I->second < NumberedTypes.size() && "Didn't get a dense numbering?");
NumberedTypes[I->second] = I->first;
}
-
+
// Emit all numbered types.
for (unsigned i = 0, e = NumberedTypes.size(); i != e; ++i) {
Out << '%' << i << " = type ";
-
+
// Make sure we print out at least one level of the type structure, so
// that we do not get %2 = type %2
TypePrinter.printStructBody(NumberedTypes[i], Out);
Out << '\n';
}
-
+
for (unsigned i = 0, e = TypePrinter.NamedTypes.size(); i != e; ++i) {
PrintLLVMName(Out, TypePrinter.NamedTypes[i]->getName(), LocalPrefix);
Out << " = type ";
@@ -1628,18 +1660,24 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << '%' << SlotNum << " = ";
}
- // If this is a volatile load or store, print out the volatile marker.
- if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) ||
- (isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile())) {
- Out << "volatile ";
- } else if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall()) {
- // If this is a call, check if it's a tail call.
+ if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall())
Out << "tail ";
- }
// Print out the opcode...
Out << I.getOpcodeName();
+ // If this is an atomic load or store, print out the atomic marker.
+ if ((isa<LoadInst>(I) && cast<LoadInst>(I).isAtomic()) ||
+ (isa<StoreInst>(I) && cast<StoreInst>(I).isAtomic()))
+ Out << " atomic";
+
+ // If this is a volatile operation, print out the volatile marker.
+ if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) ||
+ (isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile()) ||
+ (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isVolatile()) ||
+ (isa<AtomicRMWInst>(I) && cast<AtomicRMWInst>(I).isVolatile()))
+ Out << " volatile";
+
// Print out optimization information.
WriteOptimizationInfo(Out, &I);
@@ -1647,6 +1685,10 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
if (const CmpInst *CI = dyn_cast<CmpInst>(&I))
Out << ' ' << getPredicateText(CI->getPredicate());
+ // Print out the atomicrmw operation
+ if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I))
+ writeAtomicRMWOperation(Out, RMWI->getOperation());
+
// Print out the type of the operands...
const Value *Operand = I.getNumOperands() ? I.getOperand(0) : 0;
@@ -1661,18 +1703,20 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeOperand(BI.getSuccessor(1), true);
} else if (isa<SwitchInst>(I)) {
+ SwitchInst& SI(cast<SwitchInst>(I));
// Special case switch instruction to get formatting nice and correct.
Out << ' ';
- writeOperand(Operand , true);
+ writeOperand(SI.getCondition(), true);
Out << ", ";
- writeOperand(I.getOperand(1), true);
+ writeOperand(SI.getDefaultDest(), true);
Out << " [";
-
- for (unsigned op = 2, Eop = I.getNumOperands(); op < Eop; op += 2) {
+ // Skip the first item since that's the default case.
+ unsigned NumCases = SI.getNumCases();
+ for (unsigned i = 1; i < NumCases; ++i) {
Out << "\n ";
- writeOperand(I.getOperand(op ), true);
+ writeOperand(SI.getCaseValue(i), true);
Out << ", ";
- writeOperand(I.getOperand(op+1), true);
+ writeOperand(SI.getSuccessor(i), true);
}
Out << "\n ]";
} else if (isa<IndirectBrInst>(I)) {
@@ -1680,7 +1724,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << ' ';
writeOperand(Operand, true);
Out << ", [";
-
+
for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
if (i != 1)
Out << ", ";
@@ -1709,6 +1753,24 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeOperand(I.getOperand(1), true);
for (const unsigned *i = IVI->idx_begin(), *e = IVI->idx_end(); i != e; ++i)
Out << ", " << *i;
+ } else if (const LandingPadInst *LPI = dyn_cast<LandingPadInst>(&I)) {
+ Out << ' ';
+ TypePrinter.print(I.getType(), Out);
+ Out << " personality ";
+ writeOperand(I.getOperand(0), true); Out << '\n';
+
+ if (LPI->isCleanup())
+ Out << " cleanup";
+
+ for (unsigned i = 0, e = LPI->getNumClauses(); i != e; ++i) {
+ if (i != 0 || LPI->isCleanup()) Out << "\n";
+ if (LPI->isCatch(i))
+ Out << " catch ";
+ else
+ Out << " filter ";
+
+ writeOperand(LPI->getClause(i), true);
+ }
} else if (isa<ReturnInst>(I) && !Operand) {
Out << " void";
} else if (const CallInst *CI = dyn_cast<CallInst>(&I)) {
@@ -1878,11 +1940,23 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
}
}
- // Print post operand alignment for load/store.
- if (isa<LoadInst>(I) && cast<LoadInst>(I).getAlignment()) {
- Out << ", align " << cast<LoadInst>(I).getAlignment();
- } else if (isa<StoreInst>(I) && cast<StoreInst>(I).getAlignment()) {
- Out << ", align " << cast<StoreInst>(I).getAlignment();
+ // Print atomic ordering/alignment for memory operations
+ if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
+ if (LI->isAtomic())
+ writeAtomic(LI->getOrdering(), LI->getSynchScope());
+ if (LI->getAlignment())
+ Out << ", align " << LI->getAlignment();
+ } else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
+ if (SI->isAtomic())
+ writeAtomic(SI->getOrdering(), SI->getSynchScope());
+ if (SI->getAlignment())
+ Out << ", align " << SI->getAlignment();
+ } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
+ writeAtomic(CXI->getOrdering(), CXI->getSynchScope());
+ } else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
+ writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope());
+ } else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) {
+ writeAtomic(FI->getOrdering(), FI->getSynchScope());
}
// Print Metadata info.
@@ -1916,7 +1990,7 @@ static void WriteMDNodeComment(const MDNode *Node,
APInt Tag = Val & ~APInt(Val.getBitWidth(), LLVMDebugVersionMask);
if (Val.ult(LLVMDebugVersion))
return;
-
+
Out.PadToColumn(50);
if (Tag == dwarf::DW_TAG_user_base)
Out << "; [ DW_TAG_user_base ]";
@@ -1932,7 +2006,7 @@ void AssemblyWriter::writeAllMDNodes() {
for (SlotTracker::mdn_iterator I = Machine.mdn_begin(), E = Machine.mdn_end();
I != E; ++I)
Nodes[I->second] = cast<MDNode>(I->first);
-
+
for (unsigned i = 0, e = Nodes.size(); i != e; ++i) {
Out << '!' << i << " = metadata ";
printMDNodeBody(Nodes[i]);
@@ -1970,10 +2044,10 @@ void Type::print(raw_ostream &OS) const {
}
TypePrinting TP;
TP.print(const_cast<Type*>(this), OS);
-
+
// If the type is a named struct type, print the body as well.
if (StructType *STy = dyn_cast<StructType>(const_cast<Type*>(this)))
- if (!STy->isAnonymous()) {
+ if (!STy->isLiteral()) {
OS << " = type ";
TP.printStructBody(STy, OS);
}
diff --git a/lib/VMCore/Attributes.cpp b/lib/VMCore/Attributes.cpp
index b728b92..485be75 100644
--- a/lib/VMCore/Attributes.cpp
+++ b/lib/VMCore/Attributes.cpp
@@ -38,6 +38,8 @@ std::string Attribute::getAsString(Attributes Attrs) {
Result += "nounwind ";
if (Attrs & Attribute::UWTable)
Result += "uwtable ";
+ if (Attrs & Attribute::ReturnsTwice)
+ Result += "returns_twice ";
if (Attrs & Attribute::InReg)
Result += "inreg ";
if (Attrs & Attribute::NoAlias)
@@ -72,8 +74,6 @@ std::string Attribute::getAsString(Attributes Attrs) {
Result += "noimplicitfloat ";
if (Attrs & Attribute::Naked)
Result += "naked ";
- if (Attrs & Attribute::Hotpatch)
- Result += "hotpatch ";
if (Attrs & Attribute::NonLazyBind)
Result += "nonlazybind ";
if (Attrs & Attribute::StackAlignment) {
diff --git a/lib/VMCore/AutoUpgrade.cpp b/lib/VMCore/AutoUpgrade.cpp
index d987280..b849d3e 100644
--- a/lib/VMCore/AutoUpgrade.cpp
+++ b/lib/VMCore/AutoUpgrade.cpp
@@ -14,11 +14,15 @@
#include "llvm/AutoUpgrade.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
+#include "llvm/Instruction.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/CallSite.h"
+#include "llvm/Support/CFG.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/IRBuilder.h"
#include <cstring>
@@ -39,6 +43,43 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
switch (Name[0]) {
default: break;
+ case 'a':
+ if (Name.startswith("atomic.cmp.swap") ||
+ Name.startswith("atomic.swap") ||
+ Name.startswith("atomic.load.add") ||
+ Name.startswith("atomic.load.sub") ||
+ Name.startswith("atomic.load.and") ||
+ Name.startswith("atomic.load.nand") ||
+ Name.startswith("atomic.load.or") ||
+ Name.startswith("atomic.load.xor") ||
+ Name.startswith("atomic.load.max") ||
+ Name.startswith("atomic.load.min") ||
+ Name.startswith("atomic.load.umax") ||
+ Name.startswith("atomic.load.umin"))
+ return true;
+ case 'i':
+ // This upgrades the old llvm.init.trampoline to the new
+ // llvm.init.trampoline and llvm.adjust.trampoline pair.
+ if (Name == "init.trampoline") {
+ // The new llvm.init.trampoline returns nothing.
+ if (FTy->getReturnType()->isVoidTy())
+ break;
+
+ assert(FTy->getNumParams() == 3 && "old init.trampoline takes 3 args!");
+
+ // Change the name of the old intrinsic so that we can play with its type.
+ std::string NameTmp = F->getName();
+ F->setName("");
+ NewFn = cast<Function>(M->getOrInsertFunction(
+ NameTmp,
+ Type::getVoidTy(M->getContext()),
+ FTy->getParamType(0), FTy->getParamType(1),
+ FTy->getParamType(2), (Type *)0));
+ return true;
+ }
+ case 'm':
+ if (Name == "memory.barrier")
+ return true;
case 'p':
// This upgrades the llvm.prefetch intrinsic to accept one more parameter,
// which is a instruction / data cache identifier. The old version only
@@ -182,6 +223,80 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
// Remove intrinsic.
CI->eraseFromParent();
+ } else if (F->getName().startswith("llvm.atomic.cmp.swap")) {
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+ Value *Val = Builder.CreateAtomicCmpXchg(CI->getArgOperand(0),
+ CI->getArgOperand(1),
+ CI->getArgOperand(2),
+ Monotonic);
+
+ // Replace intrinsic.
+ Val->takeName(CI);
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(Val);
+ CI->eraseFromParent();
+ } else if (F->getName().startswith("llvm.atomic")) {
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+
+ AtomicRMWInst::BinOp Op;
+ if (F->getName().startswith("llvm.atomic.swap"))
+ Op = AtomicRMWInst::Xchg;
+ else if (F->getName().startswith("llvm.atomic.load.add"))
+ Op = AtomicRMWInst::Add;
+ else if (F->getName().startswith("llvm.atomic.load.sub"))
+ Op = AtomicRMWInst::Sub;
+ else if (F->getName().startswith("llvm.atomic.load.and"))
+ Op = AtomicRMWInst::And;
+ else if (F->getName().startswith("llvm.atomic.load.nand"))
+ Op = AtomicRMWInst::Nand;
+ else if (F->getName().startswith("llvm.atomic.load.or"))
+ Op = AtomicRMWInst::Or;
+ else if (F->getName().startswith("llvm.atomic.load.xor"))
+ Op = AtomicRMWInst::Xor;
+ else if (F->getName().startswith("llvm.atomic.load.max"))
+ Op = AtomicRMWInst::Max;
+ else if (F->getName().startswith("llvm.atomic.load.min"))
+ Op = AtomicRMWInst::Min;
+ else if (F->getName().startswith("llvm.atomic.load.umax"))
+ Op = AtomicRMWInst::UMax;
+ else if (F->getName().startswith("llvm.atomic.load.umin"))
+ Op = AtomicRMWInst::UMin;
+ else
+ llvm_unreachable("Unknown atomic");
+
+ Value *Val = Builder.CreateAtomicRMW(Op, CI->getArgOperand(0),
+ CI->getArgOperand(1),
+ Monotonic);
+
+ // Replace intrinsic.
+ Val->takeName(CI);
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(Val);
+ CI->eraseFromParent();
+ } else if (F->getName() == "llvm.memory.barrier") {
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+
+ // Note that this conversion ignores the "device" bit; it was not really
+ // well-defined, and got abused because nobody paid enough attention to
+ // get it right. In practice, this probably doesn't matter; application
+ // code generally doesn't need anything stronger than
+ // SequentiallyConsistent (and realistically, SequentiallyConsistent
+ // is lowered to a strong enough barrier for almost anything).
+
+ if (cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue())
+ Builder.CreateFence(SequentiallyConsistent);
+ else if (!cast<ConstantInt>(CI->getArgOperand(0))->getZExtValue())
+ Builder.CreateFence(Release);
+ else if (!cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue())
+ Builder.CreateFence(Acquire);
+ else
+ Builder.CreateFence(AcquireRelease);
+
+ // Remove intrinsic.
+ CI->eraseFromParent();
} else {
llvm_unreachable("Unknown function for CallInst upgrade.");
}
@@ -212,6 +327,32 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
CI->eraseFromParent();
break;
}
+ case Intrinsic::init_trampoline: {
+
+ // Transform
+ // %tramp = call i8* llvm.init.trampoline (i8* x, i8* y, i8* z)
+ // to
+ // call void llvm.init.trampoline (i8* %x, i8* %y, i8* %z)
+ // %tramp = call i8* llvm.adjust.trampoline (i8* %x)
+
+ Function *AdjustTrampolineFn =
+ cast<Function>(Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::adjust_trampoline));
+
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI);
+
+ Builder.CreateCall3(NewFn, CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2));
+
+ CallInst *AdjustCall = Builder.CreateCall(AdjustTrampolineFn,
+ CI->getArgOperand(0),
+ CI->getName());
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(AdjustCall);
+ CI->eraseFromParent();
+ break;
+ }
}
}
@@ -279,3 +420,249 @@ void llvm::CheckDebugInfoIntrinsics(Module *M) {
}
}
}
+
+/// FindExnAndSelIntrinsics - Find the eh_exception and eh_selector intrinsic
+/// calls reachable from the unwind basic block.
+static void FindExnAndSelIntrinsics(BasicBlock *BB, CallInst *&Exn,
+ CallInst *&Sel,
+ SmallPtrSet<BasicBlock*, 8> &Visited) {
+ if (!Visited.insert(BB)) return;
+
+ for (BasicBlock::iterator
+ I = BB->begin(), E = BB->end(); I != E; ++I) {
+ if (CallInst *CI = dyn_cast<CallInst>(I)) {
+ switch (CI->getCalledFunction()->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::eh_exception:
+ assert(!Exn && "Found more than one eh.exception call!");
+ Exn = CI;
+ break;
+ case Intrinsic::eh_selector:
+ assert(!Sel && "Found more than one eh.selector call!");
+ Sel = CI;
+ break;
+ }
+
+ if (Exn && Sel) return;
+ }
+ }
+
+ if (Exn && Sel) return;
+
+ for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
+ FindExnAndSelIntrinsics(*I, Exn, Sel, Visited);
+ if (Exn && Sel) return;
+ }
+}
+
+/// TransferClausesToLandingPadInst - Transfer the exception handling clauses
+/// from the eh_selector call to the new landingpad instruction.
+static void TransferClausesToLandingPadInst(LandingPadInst *LPI,
+ CallInst *EHSel) {
+ LLVMContext &Context = LPI->getContext();
+ unsigned N = EHSel->getNumArgOperands();
+
+ for (unsigned i = N - 1; i > 1; --i) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(EHSel->getArgOperand(i))){
+ unsigned FilterLength = CI->getZExtValue();
+ unsigned FirstCatch = i + FilterLength + !FilterLength;
+ assert(FirstCatch <= N && "Invalid filter length");
+
+ if (FirstCatch < N)
+ for (unsigned j = FirstCatch; j < N; ++j) {
+ Value *Val = EHSel->getArgOperand(j);
+ if (!Val->hasName() || Val->getName() != "llvm.eh.catch.all.value") {
+ LPI->addClause(EHSel->getArgOperand(j));
+ } else {
+ GlobalVariable *GV = cast<GlobalVariable>(Val);
+ LPI->addClause(GV->getInitializer());
+ }
+ }
+
+ if (!FilterLength) {
+ // Cleanup.
+ LPI->setCleanup(true);
+ } else {
+ // Filter.
+ SmallVector<Constant *, 4> TyInfo;
+ TyInfo.reserve(FilterLength - 1);
+ for (unsigned j = i + 1; j < FirstCatch; ++j)
+ TyInfo.push_back(cast<Constant>(EHSel->getArgOperand(j)));
+ ArrayType *AType =
+ ArrayType::get(!TyInfo.empty() ? TyInfo[0]->getType() :
+ PointerType::getUnqual(Type::getInt8Ty(Context)),
+ TyInfo.size());
+ LPI->addClause(ConstantArray::get(AType, TyInfo));
+ }
+
+ N = i;
+ }
+ }
+
+ if (N > 2)
+ for (unsigned j = 2; j < N; ++j) {
+ Value *Val = EHSel->getArgOperand(j);
+ if (!Val->hasName() || Val->getName() != "llvm.eh.catch.all.value") {
+ LPI->addClause(EHSel->getArgOperand(j));
+ } else {
+ GlobalVariable *GV = cast<GlobalVariable>(Val);
+ LPI->addClause(GV->getInitializer());
+ }
+ }
+}
+
+/// This function upgrades the old pre-3.0 exception handling system to the new
+/// one. N.B. This will be removed in 3.1.
+void llvm::UpgradeExceptionHandling(Module *M) {
+ Function *EHException = M->getFunction("llvm.eh.exception");
+ Function *EHSelector = M->getFunction("llvm.eh.selector");
+ if (!EHException || !EHSelector)
+ return;
+
+ LLVMContext &Context = M->getContext();
+ Type *ExnTy = PointerType::getUnqual(Type::getInt8Ty(Context));
+ Type *SelTy = Type::getInt32Ty(Context);
+ Type *LPadSlotTy = StructType::get(ExnTy, SelTy, NULL);
+
+ // This map links the invoke instruction with the eh.exception and eh.selector
+ // calls associated with it.
+ DenseMap<InvokeInst*, std::pair<Value*, Value*> > InvokeToIntrinsicsMap;
+ for (Module::iterator
+ I = M->begin(), E = M->end(); I != E; ++I) {
+ Function &F = *I;
+
+ for (Function::iterator
+ II = F.begin(), IE = F.end(); II != IE; ++II) {
+ BasicBlock *BB = &*II;
+ InvokeInst *Inst = dyn_cast<InvokeInst>(BB->getTerminator());
+ if (!Inst) continue;
+ BasicBlock *UnwindDest = Inst->getUnwindDest();
+ if (UnwindDest->isLandingPad()) continue; // Already converted.
+
+ SmallPtrSet<BasicBlock*, 8> Visited;
+ CallInst *Exn = 0;
+ CallInst *Sel = 0;
+ FindExnAndSelIntrinsics(UnwindDest, Exn, Sel, Visited);
+ assert(Exn && Sel && "Cannot find eh.exception and eh.selector calls!");
+ InvokeToIntrinsicsMap[Inst] = std::make_pair(Exn, Sel);
+ }
+ }
+
+ // This map stores the slots where the exception object and selector value are
+ // stored within a function.
+ DenseMap<Function*, std::pair<Value*, Value*> > FnToLPadSlotMap;
+ SmallPtrSet<Instruction*, 32> DeadInsts;
+ for (DenseMap<InvokeInst*, std::pair<Value*, Value*> >::iterator
+ I = InvokeToIntrinsicsMap.begin(), E = InvokeToIntrinsicsMap.end();
+ I != E; ++I) {
+ InvokeInst *Invoke = I->first;
+ BasicBlock *UnwindDest = Invoke->getUnwindDest();
+ Function *F = UnwindDest->getParent();
+ std::pair<Value*, Value*> EHIntrinsics = I->second;
+ CallInst *Exn = cast<CallInst>(EHIntrinsics.first);
+ CallInst *Sel = cast<CallInst>(EHIntrinsics.second);
+
+ // Store the exception object and selector value in the entry block.
+ Value *ExnSlot = 0;
+ Value *SelSlot = 0;
+ if (!FnToLPadSlotMap[F].first) {
+ BasicBlock *Entry = &F->front();
+ ExnSlot = new AllocaInst(ExnTy, "exn", Entry->getTerminator());
+ SelSlot = new AllocaInst(SelTy, "sel", Entry->getTerminator());
+ FnToLPadSlotMap[F] = std::make_pair(ExnSlot, SelSlot);
+ } else {
+ ExnSlot = FnToLPadSlotMap[F].first;
+ SelSlot = FnToLPadSlotMap[F].second;
+ }
+
+ if (!UnwindDest->getSinglePredecessor()) {
+ // The unwind destination doesn't have a single predecessor. Create an
+ // unwind destination which has only one predecessor.
+ BasicBlock *NewBB = BasicBlock::Create(Context, "new.lpad",
+ UnwindDest->getParent());
+ BranchInst::Create(UnwindDest, NewBB);
+ Invoke->setUnwindDest(NewBB);
+
+ // Fix up any PHIs in the original unwind destination block.
+ for (BasicBlock::iterator
+ II = UnwindDest->begin(); isa<PHINode>(II); ++II) {
+ PHINode *PN = cast<PHINode>(II);
+ int Idx = PN->getBasicBlockIndex(Invoke->getParent());
+ if (Idx == -1) continue;
+ PN->setIncomingBlock(Idx, NewBB);
+ }
+
+ UnwindDest = NewBB;
+ }
+
+ IRBuilder<> Builder(Context);
+ Builder.SetInsertPoint(UnwindDest, UnwindDest->getFirstInsertionPt());
+
+ Value *PersFn = Sel->getArgOperand(1);
+ LandingPadInst *LPI = Builder.CreateLandingPad(LPadSlotTy, PersFn, 0);
+ Value *LPExn = Builder.CreateExtractValue(LPI, 0);
+ Value *LPSel = Builder.CreateExtractValue(LPI, 1);
+ Builder.CreateStore(LPExn, ExnSlot);
+ Builder.CreateStore(LPSel, SelSlot);
+
+ TransferClausesToLandingPadInst(LPI, Sel);
+
+ DeadInsts.insert(Exn);
+ DeadInsts.insert(Sel);
+ }
+
+ // Replace the old intrinsic calls with the values from the landingpad
+ // instruction(s). These values were stored in allocas for us to use here.
+ for (DenseMap<InvokeInst*, std::pair<Value*, Value*> >::iterator
+ I = InvokeToIntrinsicsMap.begin(), E = InvokeToIntrinsicsMap.end();
+ I != E; ++I) {
+ std::pair<Value*, Value*> EHIntrinsics = I->second;
+ CallInst *Exn = cast<CallInst>(EHIntrinsics.first);
+ CallInst *Sel = cast<CallInst>(EHIntrinsics.second);
+ BasicBlock *Parent = Exn->getParent();
+
+ std::pair<Value*,Value*> ExnSelSlots = FnToLPadSlotMap[Parent->getParent()];
+
+ IRBuilder<> Builder(Context);
+ Builder.SetInsertPoint(Parent, Exn);
+ LoadInst *LPExn = Builder.CreateLoad(ExnSelSlots.first, "exn.load");
+ LoadInst *LPSel = Builder.CreateLoad(ExnSelSlots.second, "sel.load");
+
+ Exn->replaceAllUsesWith(LPExn);
+ Sel->replaceAllUsesWith(LPSel);
+ }
+
+ // Remove the dead instructions.
+ for (SmallPtrSet<Instruction*, 32>::iterator
+ I = DeadInsts.begin(), E = DeadInsts.end(); I != E; ++I) {
+ Instruction *Inst = *I;
+ Inst->eraseFromParent();
+ }
+
+ // Replace calls to "llvm.eh.resume" with the 'resume' instruction. Load the
+ // exception and selector values from the stored place.
+ Function *EHResume = M->getFunction("llvm.eh.resume");
+ if (!EHResume) return;
+
+ while (!EHResume->use_empty()) {
+ CallInst *Resume = cast<CallInst>(EHResume->use_back());
+ BasicBlock *BB = Resume->getParent();
+
+ IRBuilder<> Builder(Context);
+ Builder.SetInsertPoint(BB, Resume);
+
+ Value *LPadVal =
+ Builder.CreateInsertValue(UndefValue::get(LPadSlotTy),
+ Resume->getArgOperand(0), 0, "lpad.val");
+ LPadVal = Builder.CreateInsertValue(LPadVal, Resume->getArgOperand(1),
+ 1, "lpad.val");
+ Builder.CreateResume(LPadVal);
+
+ // Remove all instructions after the 'resume.'
+ BasicBlock::iterator I = Resume;
+ while (I != BB->end()) {
+ Instruction *Inst = &*I++;
+ Inst->eraseFromParent();
+ }
+ }
+}
diff --git a/lib/VMCore/BasicBlock.cpp b/lib/VMCore/BasicBlock.cpp
index 70265c8..d0aa275 100644
--- a/lib/VMCore/BasicBlock.cpp
+++ b/lib/VMCore/BasicBlock.cpp
@@ -53,7 +53,7 @@ BasicBlock::BasicBlock(LLVMContext &C, const Twine &Name, Function *NewParent,
} else if (NewParent) {
NewParent->getBasicBlockList().push_back(this);
}
-
+
setName(Name);
}
@@ -76,7 +76,7 @@ BasicBlock::~BasicBlock() {
BA->destroyConstant();
}
}
-
+
assert(getParent() == 0 && "BasicBlock still linked into the program!");
dropAllReferences();
InstList.clear();
@@ -167,6 +167,12 @@ Instruction* BasicBlock::getFirstNonPHIOrDbgOrLifetime() {
return &*i;
}
+BasicBlock::iterator BasicBlock::getFirstInsertionPt() {
+ iterator InsertPt = getFirstNonPHI();
+ if (isa<LandingPadInst>(InsertPt)) ++InsertPt;
+ return InsertPt;
+}
+
void BasicBlock::dropAllReferences() {
for(iterator I = begin(), E = end(); I != E; ++I)
I->dropAllReferences();
@@ -184,8 +190,8 @@ BasicBlock *BasicBlock::getSinglePredecessor() {
/// getUniquePredecessor - If this basic block has a unique predecessor block,
/// return the block, otherwise return a null pointer.
-/// Note that unique predecessor doesn't mean single edge, there can be
-/// multiple edges from the unique predecessor to this block (for example
+/// Note that unique predecessor doesn't mean single edge, there can be
+/// multiple edges from the unique predecessor to this block (for example
/// a switch statement with multiple cases having the same destination).
BasicBlock *BasicBlock::getUniquePredecessor() {
pred_iterator PI = pred_begin(this), E = pred_end(this);
@@ -336,11 +342,27 @@ void BasicBlock::replaceSuccessorsPhiUsesWith(BasicBlock *New) {
return;
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
BasicBlock *Succ = TI->getSuccessor(i);
- for (iterator II = Succ->begin(); PHINode *PN = dyn_cast<PHINode>(II);
- ++II) {
+ // N.B. Succ might not be a complete BasicBlock, so don't assume
+ // that it ends with a non-phi instruction.
+ for (iterator II = Succ->begin(), IE = Succ->end(); II != IE; ++II) {
+ PHINode *PN = dyn_cast<PHINode>(II);
+ if (!PN)
+ break;
int i;
while ((i = PN->getBasicBlockIndex(this)) >= 0)
PN->setIncomingBlock(i, New);
}
}
}
+
+/// isLandingPad - Return true if this basic block is a landing pad. I.e., it's
+/// the destination of the 'unwind' edge of an invoke instruction.
+bool BasicBlock::isLandingPad() const {
+ return isa<LandingPadInst>(getFirstNonPHI());
+}
+
+/// getLandingPadInst() - Return the landingpad instruction associated with
+/// the landing pad.
+LandingPadInst *BasicBlock::getLandingPadInst() {
+ return dyn_cast<LandingPadInst>(getFirstNonPHI());
+}
diff --git a/lib/VMCore/CMakeLists.txt b/lib/VMCore/CMakeLists.txt
index f60dd06..0404297 100644
--- a/lib/VMCore/CMakeLists.txt
+++ b/lib/VMCore/CMakeLists.txt
@@ -8,10 +8,11 @@ add_llvm_library(LLVMCore
ConstantFold.cpp
Constants.cpp
Core.cpp
- DebugLoc.cpp
DebugInfoProbe.cpp
+ DebugLoc.cpp
Dominators.cpp
Function.cpp
+ GCOV.cpp
GVMaterializer.cpp
Globals.cpp
IRBuilder.cpp
@@ -36,3 +37,5 @@ add_llvm_library(LLVMCore
ValueTypes.cpp
Verifier.cpp
)
+
+add_llvm_library_dependencies(LLVMCore LLVMSupport)
diff --git a/lib/VMCore/ConstantFold.cpp b/lib/VMCore/ConstantFold.cpp
index 5b9d2ca..30bae71 100644
--- a/lib/VMCore/ConstantFold.cpp
+++ b/lib/VMCore/ConstantFold.cpp
@@ -127,8 +127,7 @@ static Constant *FoldBitCast(Constant *V, Type *DestTy) {
if (ElTy == DPTy->getElementType())
// This GEP is inbounds because all indices are zero.
- return ConstantExpr::getInBoundsGetElementPtr(V, &IdxList[0],
- IdxList.size());
+ return ConstantExpr::getInBoundsGetElementPtr(V, IdxList);
}
// Handle casts from one vector constant to another. We know that the src
@@ -762,10 +761,14 @@ Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val,
if (ConstantVector *CVal = dyn_cast<ConstantVector>(Val)) {
if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Idx)) {
+ uint64_t Index = CIdx->getZExtValue();
+ if (Index >= CVal->getNumOperands())
+ // ee({w,x,y,z}, wrong_value) -> undef
+ return UndefValue::get(cast<VectorType>(Val->getType())->getElementType());
return CVal->getOperand(CIdx->getZExtValue());
} else if (isa<UndefValue>(Idx)) {
- // ee({w,x,y,z}, undef) -> w (an arbitrary value).
- return CVal->getOperand(0);
+ // ee({w,x,y,z}, undef) -> undef
+ return UndefValue::get(cast<VectorType>(Val->getType())->getElementType());
}
}
return 0;
@@ -2146,9 +2149,9 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
/// isInBoundsIndices - Test whether the given sequence of *normalized* indices
/// is "inbounds".
template<typename IndexTy>
-static bool isInBoundsIndices(IndexTy const *Idxs, size_t NumIdx) {
+static bool isInBoundsIndices(ArrayRef<IndexTy> Idxs) {
// No indices means nothing that could be out of bounds.
- if (NumIdx == 0) return true;
+ if (Idxs.empty()) return true;
// If the first index is zero, it's in bounds.
if (cast<Constant>(Idxs[0])->isNullValue()) return true;
@@ -2157,7 +2160,7 @@ static bool isInBoundsIndices(IndexTy const *Idxs, size_t NumIdx) {
// by the one-past-the-end rule.
if (!cast<ConstantInt>(Idxs[0])->isOne())
return false;
- for (unsigned i = 1, e = NumIdx; i != e; ++i)
+ for (unsigned i = 1, e = Idxs.size(); i != e; ++i)
if (!cast<Constant>(Idxs[i])->isNullValue())
return false;
return true;
@@ -2174,7 +2177,7 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
if (isa<UndefValue>(C)) {
PointerType *Ptr = cast<PointerType>(C->getType());
- Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs.begin(), Idxs.end());
+ Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs);
assert(Ty != 0 && "Invalid indices for GEP!");
return UndefValue::get(PointerType::get(Ty, Ptr->getAddressSpace()));
}
@@ -2188,8 +2191,7 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
}
if (isNull) {
PointerType *Ptr = cast<PointerType>(C->getType());
- Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs.begin(),
- Idxs.end());
+ Type *Ty = GetElementPtrInst::getIndexedType(Ptr, Idxs);
assert(Ty != 0 && "Invalid indices for GEP!");
return ConstantPointerNull::get(PointerType::get(Ty,
Ptr->getAddressSpace()));
@@ -2232,13 +2234,10 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
NewIndices.push_back(Combined);
NewIndices.append(Idxs.begin() + 1, Idxs.end());
- return (inBounds && cast<GEPOperator>(CE)->isInBounds()) ?
- ConstantExpr::getInBoundsGetElementPtr(CE->getOperand(0),
- &NewIndices[0],
- NewIndices.size()) :
- ConstantExpr::getGetElementPtr(CE->getOperand(0),
- &NewIndices[0],
- NewIndices.size());
+ return
+ ConstantExpr::getGetElementPtr(CE->getOperand(0), NewIndices,
+ inBounds &&
+ cast<GEPOperator>(CE)->isInBounds());
}
}
@@ -2254,11 +2253,9 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
if (ArrayType *CAT =
dyn_cast<ArrayType>(cast<PointerType>(C->getType())->getElementType()))
if (CAT->getElementType() == SAT->getElementType())
- return inBounds ?
- ConstantExpr::getInBoundsGetElementPtr(
- (Constant*)CE->getOperand(0), Idxs.data(), Idxs.size()) :
- ConstantExpr::getGetElementPtr(
- (Constant*)CE->getOperand(0), Idxs.data(), Idxs.size());
+ return
+ ConstantExpr::getGetElementPtr((Constant*)CE->getOperand(0),
+ Idxs, inBounds);
}
}
@@ -2313,17 +2310,14 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
if (!NewIdxs.empty()) {
for (unsigned i = 0, e = Idxs.size(); i != e; ++i)
if (!NewIdxs[i]) NewIdxs[i] = cast<Constant>(Idxs[i]);
- return inBounds ?
- ConstantExpr::getInBoundsGetElementPtr(C, NewIdxs.data(),
- NewIdxs.size()) :
- ConstantExpr::getGetElementPtr(C, NewIdxs.data(), NewIdxs.size());
+ return ConstantExpr::getGetElementPtr(C, NewIdxs, inBounds);
}
// If all indices are known integers and normalized, we can do a simple
// check for the "inbounds" property.
if (!Unknown && !inBounds &&
- isa<GlobalVariable>(C) && isInBoundsIndices(Idxs.data(), Idxs.size()))
- return ConstantExpr::getInBoundsGetElementPtr(C, Idxs.data(), Idxs.size());
+ isa<GlobalVariable>(C) && isInBoundsIndices(Idxs))
+ return ConstantExpr::getInBoundsGetElementPtr(C, Idxs);
return 0;
}
diff --git a/lib/VMCore/Constants.cpp b/lib/VMCore/Constants.cpp
index d790c33..a84a046 100644
--- a/lib/VMCore/Constants.cpp
+++ b/lib/VMCore/Constants.cpp
@@ -62,6 +62,21 @@ bool Constant::isNullValue() const {
return isa<ConstantAggregateZero>(this) || isa<ConstantPointerNull>(this);
}
+bool Constant::isAllOnesValue() const {
+ // Check for -1 integers
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return CI->isMinusOne();
+
+ // Check for FP which are bitcasted from -1 integers
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->getValueAPF().bitcastToAPInt().isAllOnesValue();
+
+ // Check for constant vectors
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
+ return CV->isAllOnesValue();
+
+ return false;
+}
// Constructor to create a '0' constant of arbitrary type...
Constant *Constant::getNullValue(Type *Ty) {
switch (Ty->getTypeID()) {
@@ -90,7 +105,7 @@ Constant *Constant::getNullValue(Type *Ty) {
return ConstantAggregateZero::get(Ty);
default:
// Function, Label, or Opaque type?
- assert(!"Cannot create a null constant of that type!");
+ assert(0 && "Cannot create a null constant of that type!");
return 0;
}
}
@@ -126,7 +141,7 @@ Constant *Constant::getAllOnesValue(Type *Ty) {
SmallVector<Constant*, 16> Elts;
VectorType *VTy = cast<VectorType>(Ty);
Elts.resize(VTy->getNumElements(), getAllOnesValue(VTy->getElementType()));
- assert(Elts[0] && "Not a vector integer type!");
+ assert(Elts[0] && "Invalid AllOnes value!");
return cast<ConstantVector>(ConstantVector::get(Elts));
}
@@ -573,21 +588,16 @@ bool ConstantFP::isExactlyValue(const APFloat &V) const {
//===----------------------------------------------------------------------===//
-ConstantArray::ConstantArray(ArrayType *T,
- const std::vector<Constant*> &V)
+ConstantArray::ConstantArray(ArrayType *T, ArrayRef<Constant *> V)
: Constant(T, ConstantArrayVal,
OperandTraits<ConstantArray>::op_end(this) - V.size(),
V.size()) {
assert(V.size() == T->getNumElements() &&
"Invalid initializer vector for constant array");
- Use *OL = OperandList;
- for (std::vector<Constant*>::const_iterator I = V.begin(), E = V.end();
- I != E; ++I, ++OL) {
- Constant *C = *I;
- assert(C->getType() == T->getElementType() &&
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ assert(V[i]->getType() == T->getElementType() &&
"Initializer for array element doesn't match array element type!");
- *OL = C;
- }
+ std::copy(V.begin(), V.end(), op_begin());
}
Constant *ConstantArray::get(ArrayType *Ty, ArrayRef<Constant*> V) {
@@ -653,21 +663,16 @@ StructType *ConstantStruct::getTypeForElements(ArrayRef<Constant*> V,
}
-ConstantStruct::ConstantStruct(StructType *T,
- const std::vector<Constant*> &V)
+ConstantStruct::ConstantStruct(StructType *T, ArrayRef<Constant *> V)
: Constant(T, ConstantStructVal,
OperandTraits<ConstantStruct>::op_end(this) - V.size(),
V.size()) {
- assert((T->isOpaque() || V.size() == T->getNumElements()) &&
+ assert(V.size() == T->getNumElements() &&
"Invalid initializer vector for constant structure");
- Use *OL = OperandList;
- for (std::vector<Constant*>::const_iterator I = V.begin(), E = V.end();
- I != E; ++I, ++OL) {
- Constant *C = *I;
- assert((T->isOpaque() || C->getType() == T->getElementType(I-V.begin())) &&
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ assert((T->isOpaque() || V[i]->getType() == T->getElementType(i)) &&
"Initializer for struct element doesn't match struct element type!");
- *OL = C;
- }
+ std::copy(V.begin(), V.end(), op_begin());
}
// ConstantStruct accessors.
@@ -682,7 +687,7 @@ Constant *ConstantStruct::get(StructType *ST, ArrayRef<Constant*> V) {
return ConstantAggregateZero::get(ST);
}
-Constant* ConstantStruct::get(StructType *T, ...) {
+Constant *ConstantStruct::get(StructType *T, ...) {
va_list ap;
SmallVector<Constant*, 8> Values;
va_start(ap, T);
@@ -692,19 +697,14 @@ Constant* ConstantStruct::get(StructType *T, ...) {
return get(T, Values);
}
-ConstantVector::ConstantVector(VectorType *T,
- const std::vector<Constant*> &V)
+ConstantVector::ConstantVector(VectorType *T, ArrayRef<Constant *> V)
: Constant(T, ConstantVectorVal,
OperandTraits<ConstantVector>::op_end(this) - V.size(),
V.size()) {
- Use *OL = OperandList;
- for (std::vector<Constant*>::const_iterator I = V.begin(), E = V.end();
- I != E; ++I, ++OL) {
- Constant *C = *I;
- assert(C->getType() == T->getElementType() &&
+ for (size_t i = 0, e = V.size(); i != e; i++)
+ assert(V[i]->getType() == T->getElementType() &&
"Initializer for vector element doesn't match vector element type!");
- *OL = C;
- }
+ std::copy(V.begin(), V.end(), op_begin());
}
// ConstantVector accessors.
@@ -839,13 +839,13 @@ ConstantExpr::getWithOperandReplaced(unsigned OpNo, Constant *Op) const {
for (unsigned i = 1, e = getNumOperands(); i != e; ++i)
Ops[i-1] = getOperand(i);
if (OpNo == 0)
- return cast<GEPOperator>(this)->isInBounds() ?
- ConstantExpr::getInBoundsGetElementPtr(Op, &Ops[0], Ops.size()) :
- ConstantExpr::getGetElementPtr(Op, &Ops[0], Ops.size());
+ return
+ ConstantExpr::getGetElementPtr(Op, Ops,
+ cast<GEPOperator>(this)->isInBounds());
Ops[OpNo-1] = Op;
- return cast<GEPOperator>(this)->isInBounds() ?
- ConstantExpr::getInBoundsGetElementPtr(getOperand(0), &Ops[0],Ops.size()):
- ConstantExpr::getGetElementPtr(getOperand(0), &Ops[0], Ops.size());
+ return
+ ConstantExpr::getGetElementPtr(getOperand(0), Ops,
+ cast<GEPOperator>(this)->isInBounds());
}
default:
assert(getNumOperands() == 2 && "Must be binary operator?");
@@ -891,9 +891,9 @@ getWithOperands(ArrayRef<Constant*> Ops, Type *Ty) const {
case Instruction::ShuffleVector:
return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
case Instruction::GetElementPtr:
- return cast<GEPOperator>(this)->isInBounds() ?
- ConstantExpr::getInBoundsGetElementPtr(Ops[0], &Ops[1], Ops.size()-1) :
- ConstantExpr::getGetElementPtr(Ops[0], &Ops[1], Ops.size()-1);
+ return
+ ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1),
+ cast<GEPOperator>(this)->isInBounds());
case Instruction::ICmp:
case Instruction::FCmp:
return ConstantExpr::getCompare(getPredicate(), Ops[0], Ops[1]);
@@ -1079,13 +1079,16 @@ bool ConstantVector::isAllOnesValue() const {
// Check out first element.
const Constant *Elt = getOperand(0);
const ConstantInt *CI = dyn_cast<ConstantInt>(Elt);
- if (!CI || !CI->isAllOnesValue()) return false;
+ const ConstantFP *CF = dyn_cast<ConstantFP>(Elt);
+
// Then make sure all remaining elements point to the same value.
for (unsigned I = 1, E = getNumOperands(); I < E; ++I)
if (getOperand(I) != Elt)
return false;
- return true;
+ // First value is all-ones.
+ return (CI && CI->isAllOnesValue()) ||
+ (CF && CF->isAllOnesValue());
}
/// getSplatValue - If this is a splat constant, where all of the
@@ -1518,7 +1521,7 @@ Constant *ConstantExpr::getSizeOf(Type* Ty) {
// Note that a non-inbounds gep is used, as null isn't within any object.
Constant *GEPIdx = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
Constant *GEP = getGetElementPtr(
- Constant::getNullValue(PointerType::getUnqual(Ty)), &GEPIdx, 1);
+ Constant::getNullValue(PointerType::getUnqual(Ty)), GEPIdx);
return getPtrToInt(GEP,
Type::getInt64Ty(Ty->getContext()));
}
@@ -1532,7 +1535,7 @@ Constant *ConstantExpr::getAlignOf(Type* Ty) {
Constant *Zero = ConstantInt::get(Type::getInt64Ty(Ty->getContext()), 0);
Constant *One = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
Constant *Indices[2] = { Zero, One };
- Constant *GEP = getGetElementPtr(NullPtr, Indices, 2);
+ Constant *GEP = getGetElementPtr(NullPtr, Indices);
return getPtrToInt(GEP,
Type::getInt64Ty(Ty->getContext()));
}
@@ -1550,7 +1553,7 @@ Constant *ConstantExpr::getOffsetOf(Type* Ty, Constant *FieldNo) {
FieldNo
};
Constant *GEP = getGetElementPtr(
- Constant::getNullValue(PointerType::getUnqual(Ty)), GEPIdx, 2);
+ Constant::getNullValue(PointerType::getUnqual(Ty)), GEPIdx);
return getPtrToInt(GEP,
Type::getInt64Ty(Ty->getContext()));
}
@@ -1592,15 +1595,13 @@ Constant *ConstantExpr::getSelect(Constant *C, Constant *V1, Constant *V2) {
return pImpl->ExprConstants.getOrCreate(V1->getType(), Key);
}
-Constant *ConstantExpr::getGetElementPtr(Constant *C, Value* const *Idxs,
- unsigned NumIdx, bool InBounds) {
- if (Constant *FC = ConstantFoldGetElementPtr(C, InBounds,
- makeArrayRef(Idxs, NumIdx)))
+Constant *ConstantExpr::getGetElementPtr(Constant *C, ArrayRef<Value *> Idxs,
+ bool InBounds) {
+ if (Constant *FC = ConstantFoldGetElementPtr(C, InBounds, Idxs))
return FC; // Fold a few common cases.
// Get the result type of the getelementptr!
- Type *Ty =
- GetElementPtrInst::getIndexedType(C->getType(), Idxs, Idxs+NumIdx);
+ Type *Ty = GetElementPtrInst::getIndexedType(C->getType(), Idxs);
assert(Ty && "GEP indices invalid!");
unsigned AS = cast<PointerType>(C->getType())->getAddressSpace();
Type *ReqTy = Ty->getPointerTo(AS);
@@ -1609,9 +1610,9 @@ Constant *ConstantExpr::getGetElementPtr(Constant *C, Value* const *Idxs,
"Non-pointer type for constant GetElementPtr expression");
// Look up the constant in the table first to ensure uniqueness
std::vector<Constant*> ArgVec;
- ArgVec.reserve(NumIdx+1);
+ ArgVec.reserve(1 + Idxs.size());
ArgVec.push_back(C);
- for (unsigned i = 0; i != NumIdx; ++i)
+ for (unsigned i = 0, e = Idxs.size(); i != e; ++i)
ArgVec.push_back(cast<Constant>(Idxs[i]));
const ExprMapKeyType Key(Instruction::GetElementPtr, ArgVec, 0,
InBounds ? GEPOperator::IsInBounds : 0);
@@ -2092,8 +2093,7 @@ void ConstantExpr::replaceUsesOfWithOnConstant(Value *From, Value *ToV,
if (Val == From) Val = To;
Indices.push_back(Val);
}
- Replacement = ConstantExpr::getGetElementPtr(Pointer,
- &Indices[0], Indices.size(),
+ Replacement = ConstantExpr::getGetElementPtr(Pointer, Indices,
cast<GEPOperator>(this)->isInBounds());
} else if (getOpcode() == Instruction::ExtractValue) {
Constant *Agg = getOperand(0);
diff --git a/lib/VMCore/Core.cpp b/lib/VMCore/Core.cpp
index 35ba43a..a505e4b 100644
--- a/lib/VMCore/Core.cpp
+++ b/lib/VMCore/Core.cpp
@@ -167,6 +167,11 @@ LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty) {
}
}
+LLVMBool LLVMTypeIsSized(LLVMTypeRef Ty)
+{
+ return unwrap(Ty)->isSized();
+}
+
LLVMContextRef LLVMGetTypeContext(LLVMTypeRef Ty) {
return wrap(&unwrap(Ty)->getContext());
}
@@ -299,7 +304,15 @@ LLVMTypeRef LLVMStructType(LLVMTypeRef *ElementTypes,
LLVMTypeRef LLVMStructCreateNamed(LLVMContextRef C, const char *Name)
{
- return wrap(StructType::createNamed(*unwrap(C), Name));
+ return wrap(StructType::create(*unwrap(C), Name));
+}
+
+const char *LLVMGetStructName(LLVMTypeRef Ty)
+{
+ StructType *Type = unwrap<StructType>(Ty);
+ if (!Type->hasName())
+ return 0;
+ return Type->getName().data();
}
void LLVMStructSetBody(LLVMTypeRef StructTy, LLVMTypeRef *ElementTypes,
@@ -448,7 +461,10 @@ LLVMValueRef LLVMGetUsedValue(LLVMUseRef U) {
/*--.. Operations on Users .................................................--*/
LLVMValueRef LLVMGetOperand(LLVMValueRef Val, unsigned Index) {
- return wrap(unwrap<User>(Val)->getOperand(Index));
+ Value *V = unwrap(Val);
+ if (MDNode *MD = dyn_cast<MDNode>(V))
+ return wrap(MD->getOperand(Index));
+ return wrap(cast<User>(V)->getOperand(Index));
}
void LLVMSetOperand(LLVMValueRef Val, unsigned Index, LLVMValueRef Op) {
@@ -456,7 +472,10 @@ void LLVMSetOperand(LLVMValueRef Val, unsigned Index, LLVMValueRef Op) {
}
int LLVMGetNumOperands(LLVMValueRef Val) {
- return unwrap<User>(Val)->getNumOperands();
+ Value *V = unwrap(Val);
+ if (MDNode *MD = dyn_cast<MDNode>(V))
+ return MD->getNumOperands();
+ return cast<User>(V)->getNumOperands();
}
/*--.. Operations on constants of any type .................................--*/
@@ -513,6 +532,32 @@ LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count) {
return LLVMMDNodeInContext(LLVMGetGlobalContext(), Vals, Count);
}
+const char *LLVMGetMDString(LLVMValueRef V, unsigned* Len) {
+ if (const MDString *S = dyn_cast<MDString>(unwrap(V))) {
+ *Len = S->getString().size();
+ return S->getString().data();
+ }
+ *Len = 0;
+ return 0;
+}
+
+unsigned LLVMGetNamedMetadataNumOperands(LLVMModuleRef M, const char* name)
+{
+ if (NamedMDNode *N = unwrap(M)->getNamedMetadata(name)) {
+ return N->getNumOperands();
+ }
+ return 0;
+}
+
+void LLVMGetNamedMetadataOperands(LLVMModuleRef M, const char* name, LLVMValueRef *Dest)
+{
+ NamedMDNode *N = unwrap(M)->getNamedMetadata(name);
+ if (!N)
+ return;
+ for (unsigned i=0;i<N->getNumOperands();i++)
+ Dest[i] = wrap(N->getOperand(i));
+}
+
/*--.. Operations on scalar constants ......................................--*/
LLVMValueRef LLVMConstInt(LLVMTypeRef IntTy, unsigned long long N,
@@ -609,10 +654,35 @@ LLVMValueRef LLVMConstVector(LLVMValueRef *ScalarConstantVals, unsigned Size) {
return wrap(ConstantVector::get(makeArrayRef(
unwrap<Constant>(ScalarConstantVals, Size), Size)));
}
+
+/*-- Opcode mapping */
+
+static LLVMOpcode map_to_llvmopcode(int opcode)
+{
+ switch (opcode) {
+ default:
+ assert(0 && "Unhandled Opcode.");
+#define HANDLE_INST(num, opc, clas) case num: return LLVM##opc;
+#include "llvm/Instruction.def"
+#undef HANDLE_INST
+ }
+}
+
+static int map_from_llvmopcode(LLVMOpcode code)
+{
+ switch (code) {
+ default:
+ assert(0 && "Unhandled Opcode.");
+#define HANDLE_INST(num, opc, clas) case LLVM##opc: return num;
+#include "llvm/Instruction.def"
+#undef HANDLE_INST
+ }
+}
+
/*--.. Constant expressions ................................................--*/
LLVMOpcode LLVMGetConstOpcode(LLVMValueRef ConstantVal) {
- return (LLVMOpcode)unwrap<ConstantExpr>(ConstantVal)->getOpcode();
+ return map_to_llvmopcode(unwrap<ConstantExpr>(ConstantVal)->getOpcode());
}
LLVMValueRef LLVMAlignOf(LLVMTypeRef Ty) {
@@ -792,18 +862,19 @@ LLVMValueRef LLVMConstAShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal,
LLVMValueRef *ConstantIndices, unsigned NumIndices) {
+ ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
+ NumIndices);
return wrap(ConstantExpr::getGetElementPtr(unwrap<Constant>(ConstantVal),
- unwrap<Constant>(ConstantIndices,
- NumIndices),
- NumIndices));
+ IdxList));
}
LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal,
LLVMValueRef *ConstantIndices,
unsigned NumIndices) {
Constant* Val = unwrap<Constant>(ConstantVal);
- Constant** Idxs = unwrap<Constant>(ConstantIndices, NumIndices);
- return wrap(ConstantExpr::getInBoundsGetElementPtr(Val, Idxs, NumIndices));
+ ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
+ NumIndices);
+ return wrap(ConstantExpr::getInBoundsGetElementPtr(Val, IdxList));
}
LLVMValueRef LLVMConstTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
@@ -1381,6 +1452,10 @@ LLVMValueRef LLVMGetBasicBlockParent(LLVMBasicBlockRef BB) {
return wrap(unwrap(BB)->getParent());
}
+LLVMValueRef LLVMGetBasicBlockTerminator(LLVMBasicBlockRef BB) {
+ return wrap(unwrap(BB)->getTerminator());
+}
+
unsigned LLVMCountBasicBlocks(LLVMValueRef FnRef) {
return unwrap<Function>(FnRef)->size();
}
@@ -1453,6 +1528,10 @@ void LLVMDeleteBasicBlock(LLVMBasicBlockRef BBRef) {
unwrap(BBRef)->eraseFromParent();
}
+void LLVMRemoveBasicBlockFromParent(LLVMBasicBlockRef BBRef) {
+ unwrap(BBRef)->removeFromParent();
+}
+
void LLVMMoveBasicBlockBefore(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos) {
unwrap(BB)->moveBefore(unwrap(MovePos));
}
@@ -1499,6 +1578,25 @@ LLVMValueRef LLVMGetPreviousInstruction(LLVMValueRef Inst) {
return wrap(--I);
}
+void LLVMInstructionEraseFromParent(LLVMValueRef Inst) {
+ unwrap<Instruction>(Inst)->eraseFromParent();
+}
+
+LLVMIntPredicate LLVMGetICmpPredicate(LLVMValueRef Inst) {
+ if (ICmpInst *I = dyn_cast<ICmpInst>(unwrap(Inst)))
+ return (LLVMIntPredicate)I->getPredicate();
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(unwrap(Inst)))
+ if (CE->getOpcode() == Instruction::ICmp)
+ return (LLVMIntPredicate)CE->getPredicate();
+ return (LLVMIntPredicate)0;
+}
+
+LLVMOpcode LLVMGetInstructionOpcode(LLVMValueRef Inst) {
+ if (Instruction *C = dyn_cast<Instruction>(unwrap(Inst)))
+ return map_to_llvmopcode(C->getOpcode());
+ return (LLVMOpcode)0;
+}
+
/*--.. Call and invoke instructions ........................................--*/
unsigned LLVMGetInstructionCallConv(LLVMValueRef Instr) {
@@ -1552,6 +1650,12 @@ void LLVMSetTailCall(LLVMValueRef Call, LLVMBool isTailCall) {
unwrap<CallInst>(Call)->setTailCall(isTailCall);
}
+/*--.. Operations on switch instructions (only) ............................--*/
+
+LLVMBasicBlockRef LLVMGetSwitchDefaultDest(LLVMValueRef Switch) {
+ return wrap(unwrap<SwitchInst>(Switch)->getDefaultDest());
+}
+
/*--.. Operations on phi nodes .............................................--*/
void LLVMAddIncoming(LLVMValueRef PhiNode, LLVMValueRef *IncomingValues,
@@ -1682,8 +1786,16 @@ LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef B, LLVMValueRef Fn,
Name));
}
-LLVMValueRef LLVMBuildUnwind(LLVMBuilderRef B) {
- return wrap(unwrap(B)->CreateUnwind());
+LLVMValueRef LLVMBuildLandingPad(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef PersFn, unsigned NumClauses,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateLandingPad(unwrap(Ty),
+ cast<Function>(unwrap(PersFn)),
+ NumClauses, Name));
+}
+
+LLVMValueRef LLVMBuildResume(LLVMBuilderRef B, LLVMValueRef Exn) {
+ return wrap(unwrap(B)->CreateResume(unwrap(Exn)));
}
LLVMValueRef LLVMBuildUnreachable(LLVMBuilderRef B) {
@@ -1699,6 +1811,15 @@ void LLVMAddDestination(LLVMValueRef IndirectBr, LLVMBasicBlockRef Dest) {
unwrap<IndirectBrInst>(IndirectBr)->addDestination(unwrap(Dest));
}
+void LLVMAddClause(LLVMValueRef LandingPad, LLVMValueRef ClauseVal) {
+ unwrap<LandingPadInst>(LandingPad)->
+ addClause(cast<Constant>(unwrap(ClauseVal)));
+}
+
+void LLVMSetCleanup(LLVMValueRef LandingPad, LLVMBool Val) {
+ unwrap<LandingPadInst>(LandingPad)->setCleanup(Val);
+}
+
/*--.. Arithmetic ..........................................................--*/
LLVMValueRef LLVMBuildAdd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
@@ -1829,7 +1950,7 @@ LLVMValueRef LLVMBuildXor(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
LLVMValueRef LLVMBuildBinOp(LLVMBuilderRef B, LLVMOpcode Op,
LLVMValueRef LHS, LLVMValueRef RHS,
const char *Name) {
- return wrap(unwrap(B)->CreateBinOp(Instruction::BinaryOps(Op), unwrap(LHS),
+ return wrap(unwrap(B)->CreateBinOp(Instruction::BinaryOps(map_from_llvmopcode(Op)), unwrap(LHS),
unwrap(RHS), Name));
}
@@ -1908,15 +2029,15 @@ LLVMValueRef LLVMBuildStore(LLVMBuilderRef B, LLVMValueRef Val,
LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
LLVMValueRef *Indices, unsigned NumIndices,
const char *Name) {
- return wrap(unwrap(B)->CreateGEP(unwrap(Pointer), unwrap(Indices),
- unwrap(Indices) + NumIndices, Name));
+ ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
+ return wrap(unwrap(B)->CreateGEP(unwrap(Pointer), IdxList, Name));
}
LLVMValueRef LLVMBuildInBoundsGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
LLVMValueRef *Indices, unsigned NumIndices,
const char *Name) {
- return wrap(unwrap(B)->CreateInBoundsGEP(unwrap(Pointer), unwrap(Indices),
- unwrap(Indices) + NumIndices, Name));
+ ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
+ return wrap(unwrap(B)->CreateInBoundsGEP(unwrap(Pointer), IdxList, Name));
}
LLVMValueRef LLVMBuildStructGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
@@ -2016,7 +2137,7 @@ LLVMValueRef LLVMBuildTruncOrBitCast(LLVMBuilderRef B, LLVMValueRef Val,
LLVMValueRef LLVMBuildCast(LLVMBuilderRef B, LLVMOpcode Op, LLVMValueRef Val,
LLVMTypeRef DestTy, const char *Name) {
- return wrap(unwrap(B)->CreateCast(Instruction::CastOps(Op), unwrap(Val),
+ return wrap(unwrap(B)->CreateCast(Instruction::CastOps(map_from_llvmopcode(Op)), unwrap(Val),
unwrap(DestTy), Name));
}
diff --git a/lib/VMCore/DebugLoc.cpp b/lib/VMCore/DebugLoc.cpp
index b9c245d..328244f 100644
--- a/lib/VMCore/DebugLoc.cpp
+++ b/lib/VMCore/DebugLoc.cpp
@@ -240,7 +240,7 @@ int LLVMContextImpl::getOrAddScopeInlinedAtIdxEntry(MDNode *Scope, MDNode *IA,
/// deleted - The MDNode this is pointing to got deleted, so this pointer needs
/// to drop to null and we need remove our entry from the DenseMap.
void DebugRecVH::deleted() {
- // If this is a non-canonical reference, just drop the value to null, we know
+ // If this is a non-canonical reference, just drop the value to null, we know
// it doesn't have a map entry.
if (Idx == 0) {
setValPtr(0);
diff --git a/lib/VMCore/Function.cpp b/lib/VMCore/Function.cpp
index 1f59bf9..be0f056 100644
--- a/lib/VMCore/Function.cpp
+++ b/lib/VMCore/Function.cpp
@@ -17,6 +17,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/Support/CallSite.h"
+#include "llvm/Support/InstIterator.h"
#include "llvm/Support/LeakDetector.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/StringPool.h"
@@ -358,7 +359,7 @@ std::string Intrinsic::getName(ID id, ArrayRef<Type*> Tys) {
FunctionType *Intrinsic::getType(LLVMContext &Context,
ID id, ArrayRef<Type*> Tys) {
Type *ResultTy = NULL;
- std::vector<Type*> ArgTys;
+ SmallVector<Type*, 8> ArgTys;
bool IsVarArg = false;
#define GET_INTRINSIC_GENERATOR
@@ -412,32 +413,15 @@ bool Function::hasAddressTaken(const User* *PutOffender) const {
/// callsFunctionThatReturnsTwice - Return true if the function has a call to
/// setjmp or other function that gcc recognizes as "returning twice".
-///
-/// FIXME: Remove after <rdar://problem/8031714> is fixed.
-/// FIXME: Is the above FIXME valid?
bool Function::callsFunctionThatReturnsTwice() const {
- const Module *M = this->getParent();
- static const char *ReturnsTwiceFns[] = {
- "_setjmp",
- "setjmp",
- "sigsetjmp",
- "setjmp_syscall",
- "savectx",
- "qsetjmp",
- "vfork",
- "getcontext"
- };
-
- for (unsigned I = 0; I < array_lengthof(ReturnsTwiceFns); ++I)
- if (const Function *Callee = M->getFunction(ReturnsTwiceFns[I])) {
- if (!Callee->use_empty())
- for (Value::const_use_iterator
- I = Callee->use_begin(), E = Callee->use_end();
- I != E; ++I)
- if (const CallInst *CI = dyn_cast<CallInst>(*I))
- if (CI->getParent()->getParent() == this)
- return true;
- }
+ for (const_inst_iterator
+ I = inst_begin(this), E = inst_end(this); I != E; ++I) {
+ const CallInst* callInst = dyn_cast<CallInst>(&*I);
+ if (!callInst)
+ continue;
+ if (callInst->canReturnTwice())
+ return true;
+ }
return false;
}
diff --git a/lib/VMCore/GCOV.cpp b/lib/VMCore/GCOV.cpp
new file mode 100644
index 0000000..fc7f96f
--- /dev/null
+++ b/lib/VMCore/GCOV.cpp
@@ -0,0 +1,281 @@
+//===- GCOVr.cpp - LLVM coverage tool -------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// GCOV implements the interface to read and write coverage files that use
+// 'gcov' format.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/GCOV.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/MemoryObject.h"
+#include "llvm/Support/system_error.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// GCOVFile implementation.
+
+/// ~GCOVFile - Delete GCOVFile and its content.
+GCOVFile::~GCOVFile() {
+ DeleteContainerPointers(Functions);
+}
+
+/// isGCDAFile - Return true if Format identifies a .gcda file.
+static bool isGCDAFile(GCOVFormat Format) {
+ return Format == GCDA_402 || Format == GCDA_404;
+}
+
+/// isGCNOFile - Return true if Format identifies a .gcno file.
+static bool isGCNOFile(GCOVFormat Format) {
+ return Format == GCNO_402 || Format == GCNO_404;
+}
+
+/// read - Read GCOV buffer.
+bool GCOVFile::read(GCOVBuffer &Buffer) {
+ GCOVFormat Format = Buffer.readGCOVFormat();
+ if (Format == InvalidGCOV)
+ return false;
+
+ unsigned i = 0;
+ while (1) {
+ GCOVFunction *GFun = NULL;
+ if (isGCDAFile(Format)) {
+ // Use existing function while reading .gcda file.
+ assert (i < Functions.size() && ".gcda data does not match .gcno data");
+ GFun = Functions[i];
+ } else if (isGCNOFile(Format)){
+ GFun = new GCOVFunction();
+ Functions.push_back(GFun);
+ }
+ if (!GFun || !GFun->read(Buffer, Format))
+ break;
+ ++i;
+ }
+ return true;
+}
+
+/// dump - Dump GCOVFile content on standard out for debugging purposes.
+void GCOVFile::dump() {
+ for (SmallVector<GCOVFunction *, 16>::iterator I = Functions.begin(),
+ E = Functions.end(); I != E; ++I)
+ (*I)->dump();
+}
+
+/// collectLineCounts - Collect line counts. This must be used after
+/// reading .gcno and .gcda files.
+void GCOVFile::collectLineCounts(FileInfo &FI) {
+ for (SmallVector<GCOVFunction *, 16>::iterator I = Functions.begin(),
+ E = Functions.end(); I != E; ++I)
+ (*I)->collectLineCounts(FI);
+ FI.print();
+}
+
+//===----------------------------------------------------------------------===//
+// GCOVFunction implementation.
+
+/// ~GCOVFunction - Delete GCOVFunction and its content.
+GCOVFunction::~GCOVFunction() {
+ DeleteContainerPointers(Blocks);
+}
+
+/// read - Read a aunction from the buffer. Return false if buffer cursor
+/// does not point to a function tag.
+bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) {
+ if (!Buff.readFunctionTag())
+ return false;
+
+ Buff.readInt(); // Function header length
+ Ident = Buff.readInt();
+ Buff.readInt(); // Checksum #1
+ if (Format != GCNO_402)
+ Buff.readInt(); // Checksum #2
+
+ Name = Buff.readString();
+ if (Format == GCNO_402 || Format == GCNO_404)
+ Filename = Buff.readString();
+
+ if (Format == GCDA_402 || Format == GCDA_404) {
+ Buff.readArcTag();
+ uint32_t Count = Buff.readInt() / 2;
+ for (unsigned i = 0, e = Count; i != e; ++i) {
+ Blocks[i]->addCount(Buff.readInt64());
+ }
+ return true;;
+ }
+
+ LineNumber = Buff.readInt();
+
+ // read blocks.
+ assert (Buff.readBlockTag() && "Block Tag not found!");
+ uint32_t BlockCount = Buff.readInt();
+ for (int i = 0, e = BlockCount; i != e; ++i) {
+ Buff.readInt(); // Block flags;
+ Blocks.push_back(new GCOVBlock(i));
+ }
+
+ // read edges.
+ while (Buff.readEdgeTag()) {
+ uint32_t EdgeCount = (Buff.readInt() - 1) / 2;
+ uint32_t BlockNo = Buff.readInt();
+ assert (BlockNo < BlockCount && "Unexpected Block number!");
+ for (int i = 0, e = EdgeCount; i != e; ++i) {
+ Blocks[BlockNo]->addEdge(Buff.readInt());
+ Buff.readInt(); // Edge flag
+ }
+ }
+
+ // read line table.
+ while (Buff.readLineTag()) {
+ uint32_t LineTableLength = Buff.readInt();
+ uint32_t Size = Buff.getCursor() + LineTableLength*4;
+ uint32_t BlockNo = Buff.readInt();
+ assert (BlockNo < BlockCount && "Unexpected Block number!");
+ GCOVBlock *Block = Blocks[BlockNo];
+ Buff.readInt(); // flag
+ while (Buff.getCursor() != (Size - 4)) {
+ StringRef Filename = Buff.readString();
+ if (Buff.getCursor() == (Size - 4)) break;
+ while (uint32_t L = Buff.readInt())
+ Block->addLine(Filename, L);
+ }
+ Buff.readInt(); // flag
+ }
+ return true;
+}
+
+/// dump - Dump GCOVFunction content on standard out for debugging purposes.
+void GCOVFunction::dump() {
+ outs() << "===== " << Name << " @ " << Filename << ":" << LineNumber << "\n";
+ for (SmallVector<GCOVBlock *, 16>::iterator I = Blocks.begin(),
+ E = Blocks.end(); I != E; ++I)
+ (*I)->dump();
+}
+
+/// collectLineCounts - Collect line counts. This must be used after
+/// reading .gcno and .gcda files.
+void GCOVFunction::collectLineCounts(FileInfo &FI) {
+ for (SmallVector<GCOVBlock *, 16>::iterator I = Blocks.begin(),
+ E = Blocks.end(); I != E; ++I)
+ (*I)->collectLineCounts(FI);
+}
+
+//===----------------------------------------------------------------------===//
+// GCOVBlock implementation.
+
+/// ~GCOVBlock - Delete GCOVBlock and its content.
+GCOVBlock::~GCOVBlock() {
+ Edges.clear();
+ DeleteContainerSeconds(Lines);
+}
+
+void GCOVBlock::addLine(StringRef Filename, uint32_t LineNo) {
+ GCOVLines *&LinesForFile = Lines[Filename];
+ if (!LinesForFile)
+ LinesForFile = new GCOVLines();
+ LinesForFile->add(LineNo);
+}
+
+/// collectLineCounts - Collect line counts. This must be used after
+/// reading .gcno and .gcda files.
+void GCOVBlock::collectLineCounts(FileInfo &FI) {
+ for (StringMap<GCOVLines *>::iterator I = Lines.begin(),
+ E = Lines.end(); I != E; ++I)
+ I->second->collectLineCounts(FI, I->first(), Counter);
+}
+
+/// dump - Dump GCOVBlock content on standard out for debugging purposes.
+void GCOVBlock::dump() {
+ outs() << "Block : " << Number << " Counter : " << Counter << "\n";
+ if (!Edges.empty()) {
+ outs() << "\tEdges : ";
+ for (SmallVector<uint32_t, 16>::iterator I = Edges.begin(), E = Edges.end();
+ I != E; ++I)
+ outs() << (*I) << ",";
+ outs() << "\n";
+ }
+ if (!Lines.empty()) {
+ outs() << "\tLines : ";
+ for (StringMap<GCOVLines *>::iterator LI = Lines.begin(),
+ LE = Lines.end(); LI != LE; ++LI) {
+ outs() << LI->first() << " -> ";
+ LI->second->dump();
+ outs() << "\n";
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// GCOVLines implementation.
+
+/// collectLineCounts - Collect line counts. This must be used after
+/// reading .gcno and .gcda files.
+void GCOVLines::collectLineCounts(FileInfo &FI, StringRef Filename,
+ uint32_t Count) {
+ for (SmallVector<uint32_t, 16>::iterator I = Lines.begin(),
+ E = Lines.end(); I != E; ++I)
+ FI.addLineCount(Filename, *I, Count);
+}
+
+/// dump - Dump GCOVLines content on standard out for debugging purposes.
+void GCOVLines::dump() {
+ for (SmallVector<uint32_t, 16>::iterator I = Lines.begin(),
+ E = Lines.end(); I != E; ++I)
+ outs() << (*I) << ",";
+}
+
+//===----------------------------------------------------------------------===//
+// FileInfo implementation.
+
+/// addLineCount - Add line count for the given line number in a file.
+void FileInfo::addLineCount(StringRef Filename, uint32_t Line, uint32_t Count) {
+ if (LineInfo.find(Filename) == LineInfo.end()) {
+ OwningPtr<MemoryBuffer> Buff;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename, Buff)) {
+ errs() << Filename << ": " << ec.message() << "\n";
+ return;
+ }
+ StringRef AllLines = Buff.take()->getBuffer();
+ LineCounts L(AllLines.count('\n')+2);
+ L[Line-1] = Count;
+ LineInfo[Filename] = L;
+ return;
+ }
+ LineCounts &L = LineInfo[Filename];
+ L[Line-1] = Count;
+}
+
+/// print - Print source files with collected line count information.
+void FileInfo::print() {
+ for (StringMap<LineCounts>::iterator I = LineInfo.begin(), E = LineInfo.end();
+ I != E; ++I) {
+ StringRef Filename = I->first();
+ outs() << Filename << "\n";
+ LineCounts &L = LineInfo[Filename];
+ OwningPtr<MemoryBuffer> Buff;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename, Buff)) {
+ errs() << Filename << ": " << ec.message() << "\n";
+ return;
+ }
+ StringRef AllLines = Buff.take()->getBuffer();
+ for (unsigned i = 0, e = L.size(); i != e; ++i) {
+ if (L[i])
+ outs() << L[i] << ":\t";
+ else
+ outs() << " :\t";
+ std::pair<StringRef, StringRef> P = AllLines.split('\n');
+ if (AllLines != P.first)
+ outs() << P.first;
+ outs() << "\n";
+ AllLines = P.second;
+ }
+ }
+}
+
+
diff --git a/lib/VMCore/Globals.cpp b/lib/VMCore/Globals.cpp
index b8acc45..4254fb2 100644
--- a/lib/VMCore/Globals.cpp
+++ b/lib/VMCore/Globals.cpp
@@ -235,7 +235,7 @@ const GlobalValue *GlobalAlias::getAliasedGlobal() const {
CE->getOpcode() == Instruction::GetElementPtr) &&
"Unsupported aliasee");
- return dyn_cast<GlobalValue>(CE->getOperand(0));
+ return cast<GlobalValue>(CE->getOperand(0));
}
const GlobalValue *GlobalAlias::resolveAliasedGlobal(bool stopOnWeak) const {
diff --git a/lib/VMCore/Instruction.cpp b/lib/VMCore/Instruction.cpp
index 4627e71..73191c1 100644
--- a/lib/VMCore/Instruction.cpp
+++ b/lib/VMCore/Instruction.cpp
@@ -101,6 +101,7 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
case Switch: return "switch";
case IndirectBr: return "indirectbr";
case Invoke: return "invoke";
+ case Resume: return "resume";
case Unwind: return "unwind";
case Unreachable: return "unreachable";
@@ -127,6 +128,9 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
case Alloca: return "alloca";
case Load: return "load";
case Store: return "store";
+ case AtomicCmpXchg: return "cmpxchg";
+ case AtomicRMW: return "atomicrmw";
+ case Fence: return "fence";
case GetElementPtr: return "getelementptr";
// Convert instructions...
@@ -158,6 +162,7 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
case ShuffleVector: return "shufflevector";
case ExtractValue: return "extractvalue";
case InsertValue: return "insertvalue";
+ case LandingPad: return "landingpad";
default: return "<Invalid operator> ";
}
@@ -191,10 +196,14 @@ bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
// Check special state that is a part of some instructions.
if (const LoadInst *LI = dyn_cast<LoadInst>(this))
return LI->isVolatile() == cast<LoadInst>(I)->isVolatile() &&
- LI->getAlignment() == cast<LoadInst>(I)->getAlignment();
+ LI->getAlignment() == cast<LoadInst>(I)->getAlignment() &&
+ LI->getOrdering() == cast<LoadInst>(I)->getOrdering() &&
+ LI->getSynchScope() == cast<LoadInst>(I)->getSynchScope();
if (const StoreInst *SI = dyn_cast<StoreInst>(this))
return SI->isVolatile() == cast<StoreInst>(I)->isVolatile() &&
- SI->getAlignment() == cast<StoreInst>(I)->getAlignment();
+ SI->getAlignment() == cast<StoreInst>(I)->getAlignment() &&
+ SI->getOrdering() == cast<StoreInst>(I)->getOrdering() &&
+ SI->getSynchScope() == cast<StoreInst>(I)->getSynchScope();
if (const CmpInst *CI = dyn_cast<CmpInst>(this))
return CI->getPredicate() == cast<CmpInst>(I)->getPredicate();
if (const CallInst *CI = dyn_cast<CallInst>(this))
@@ -208,6 +217,18 @@ bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
return IVI->getIndices() == cast<InsertValueInst>(I)->getIndices();
if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(this))
return EVI->getIndices() == cast<ExtractValueInst>(I)->getIndices();
+ if (const FenceInst *FI = dyn_cast<FenceInst>(this))
+ return FI->getOrdering() == cast<FenceInst>(FI)->getOrdering() &&
+ FI->getSynchScope() == cast<FenceInst>(FI)->getSynchScope();
+ if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this))
+ return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() &&
+ CXI->getOrdering() == cast<AtomicCmpXchgInst>(I)->getOrdering() &&
+ CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope();
+ if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this))
+ return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() &&
+ RMWI->isVolatile() == cast<AtomicRMWInst>(I)->isVolatile() &&
+ RMWI->getOrdering() == cast<AtomicRMWInst>(I)->getOrdering() &&
+ RMWI->getSynchScope() == cast<AtomicRMWInst>(I)->getSynchScope();
return true;
}
@@ -230,10 +251,14 @@ bool Instruction::isSameOperationAs(const Instruction *I) const {
// Check special state that is a part of some instructions.
if (const LoadInst *LI = dyn_cast<LoadInst>(this))
return LI->isVolatile() == cast<LoadInst>(I)->isVolatile() &&
- LI->getAlignment() == cast<LoadInst>(I)->getAlignment();
+ LI->getAlignment() == cast<LoadInst>(I)->getAlignment() &&
+ LI->getOrdering() == cast<LoadInst>(I)->getOrdering() &&
+ LI->getSynchScope() == cast<LoadInst>(I)->getSynchScope();
if (const StoreInst *SI = dyn_cast<StoreInst>(this))
return SI->isVolatile() == cast<StoreInst>(I)->isVolatile() &&
- SI->getAlignment() == cast<StoreInst>(I)->getAlignment();
+ SI->getAlignment() == cast<StoreInst>(I)->getAlignment() &&
+ SI->getOrdering() == cast<StoreInst>(I)->getOrdering() &&
+ SI->getSynchScope() == cast<StoreInst>(I)->getSynchScope();
if (const CmpInst *CI = dyn_cast<CmpInst>(this))
return CI->getPredicate() == cast<CmpInst>(I)->getPredicate();
if (const CallInst *CI = dyn_cast<CallInst>(this))
@@ -248,6 +273,18 @@ bool Instruction::isSameOperationAs(const Instruction *I) const {
return IVI->getIndices() == cast<InsertValueInst>(I)->getIndices();
if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(this))
return EVI->getIndices() == cast<ExtractValueInst>(I)->getIndices();
+ if (const FenceInst *FI = dyn_cast<FenceInst>(this))
+ return FI->getOrdering() == cast<FenceInst>(I)->getOrdering() &&
+ FI->getSynchScope() == cast<FenceInst>(I)->getSynchScope();
+ if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(this))
+ return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I)->isVolatile() &&
+ CXI->getOrdering() == cast<AtomicCmpXchgInst>(I)->getOrdering() &&
+ CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I)->getSynchScope();
+ if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(this))
+ return RMWI->getOperation() == cast<AtomicRMWInst>(I)->getOperation() &&
+ RMWI->isVolatile() == cast<AtomicRMWInst>(I)->isVolatile() &&
+ RMWI->getOrdering() == cast<AtomicRMWInst>(I)->getOrdering() &&
+ RMWI->getSynchScope() == cast<AtomicRMWInst>(I)->getSynchScope();
return true;
}
@@ -280,13 +317,16 @@ bool Instruction::mayReadFromMemory() const {
default: return false;
case Instruction::VAArg:
case Instruction::Load:
+ case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
return true;
case Instruction::Call:
return !cast<CallInst>(this)->doesNotAccessMemory();
case Instruction::Invoke:
return !cast<InvokeInst>(this)->doesNotAccessMemory();
case Instruction::Store:
- return cast<StoreInst>(this)->isVolatile();
+ return !cast<StoreInst>(this)->isUnordered();
}
}
@@ -295,15 +335,18 @@ bool Instruction::mayReadFromMemory() const {
bool Instruction::mayWriteToMemory() const {
switch (getOpcode()) {
default: return false;
+ case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
case Instruction::Store:
case Instruction::VAArg:
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
return true;
case Instruction::Call:
return !cast<CallInst>(this)->onlyReadsMemory();
case Instruction::Invoke:
return !cast<InvokeInst>(this)->onlyReadsMemory();
case Instruction::Load:
- return cast<LoadInst>(this)->isVolatile();
+ return !cast<LoadInst>(this)->isUnordered();
}
}
@@ -312,7 +355,7 @@ bool Instruction::mayWriteToMemory() const {
bool Instruction::mayThrow() const {
if (const CallInst *CI = dyn_cast<CallInst>(this))
return !CI->doesNotThrow();
- return false;
+ return isa<ResumeInst>(this);
}
/// isAssociative - Return true if the instruction is associative:
@@ -372,7 +415,7 @@ bool Instruction::isSafeToSpeculativelyExecute() const {
}
case Load: {
const LoadInst *LI = cast<LoadInst>(this);
- if (LI->isVolatile())
+ if (!LI->isUnordered())
return false;
return LI->getPointerOperand()->isDereferenceablePointer();
}
@@ -392,6 +435,11 @@ bool Instruction::isSafeToSpeculativelyExecute() const {
case Switch:
case Unwind:
case Unreachable:
+ case Fence:
+ case LandingPad:
+ case AtomicRMW:
+ case AtomicCmpXchg:
+ case Resume:
return false; // Misc instructions which have effects
}
}
diff --git a/lib/VMCore/Instructions.cpp b/lib/VMCore/Instructions.cpp
index df4fc16..b3a7205 100644
--- a/lib/VMCore/Instructions.cpp
+++ b/lib/VMCore/Instructions.cpp
@@ -166,6 +166,88 @@ Value *PHINode::hasConstantValue() const {
return ConstantValue;
}
+//===----------------------------------------------------------------------===//
+// LandingPadInst Implementation
+//===----------------------------------------------------------------------===//
+
+LandingPadInst::LandingPadInst(Type *RetTy, Value *PersonalityFn,
+ unsigned NumReservedValues, const Twine &NameStr,
+ Instruction *InsertBefore)
+ : Instruction(RetTy, Instruction::LandingPad, 0, 0, InsertBefore) {
+ init(PersonalityFn, 1 + NumReservedValues, NameStr);
+}
+
+LandingPadInst::LandingPadInst(Type *RetTy, Value *PersonalityFn,
+ unsigned NumReservedValues, const Twine &NameStr,
+ BasicBlock *InsertAtEnd)
+ : Instruction(RetTy, Instruction::LandingPad, 0, 0, InsertAtEnd) {
+ init(PersonalityFn, 1 + NumReservedValues, NameStr);
+}
+
+LandingPadInst::LandingPadInst(const LandingPadInst &LP)
+ : Instruction(LP.getType(), Instruction::LandingPad,
+ allocHungoffUses(LP.getNumOperands()), LP.getNumOperands()),
+ ReservedSpace(LP.getNumOperands()) {
+ Use *OL = OperandList, *InOL = LP.OperandList;
+ for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
+ OL[I] = InOL[I];
+
+ setCleanup(LP.isCleanup());
+}
+
+LandingPadInst::~LandingPadInst() {
+ dropHungoffUses();
+}
+
+LandingPadInst *LandingPadInst::Create(Type *RetTy, Value *PersonalityFn,
+ unsigned NumReservedClauses,
+ const Twine &NameStr,
+ Instruction *InsertBefore) {
+ return new LandingPadInst(RetTy, PersonalityFn, NumReservedClauses, NameStr,
+ InsertBefore);
+}
+
+LandingPadInst *LandingPadInst::Create(Type *RetTy, Value *PersonalityFn,
+ unsigned NumReservedClauses,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ return new LandingPadInst(RetTy, PersonalityFn, NumReservedClauses, NameStr,
+ InsertAtEnd);
+}
+
+void LandingPadInst::init(Value *PersFn, unsigned NumReservedValues,
+ const Twine &NameStr) {
+ ReservedSpace = NumReservedValues;
+ NumOperands = 1;
+ OperandList = allocHungoffUses(ReservedSpace);
+ OperandList[0] = PersFn;
+ setName(NameStr);
+ setCleanup(false);
+}
+
+/// growOperands - grow operands - This grows the operand list in response to a
+/// push_back style of operation. This grows the number of ops by 2 times.
+void LandingPadInst::growOperands(unsigned Size) {
+ unsigned e = getNumOperands();
+ if (ReservedSpace >= e + Size) return;
+ ReservedSpace = (e + Size / 2) * 2;
+
+ Use *NewOps = allocHungoffUses(ReservedSpace);
+ Use *OldOps = OperandList;
+ for (unsigned i = 0; i != e; ++i)
+ NewOps[i] = OldOps[i];
+
+ OperandList = NewOps;
+ Use::zap(OldOps, OldOps + e, true);
+}
+
+void LandingPadInst::addClause(Value *Val) {
+ unsigned OpNo = getNumOperands();
+ growOperands(1);
+ assert(OpNo < ReservedSpace && "Growing didn't work!");
+ ++NumOperands;
+ OperandList[OpNo] = Val;
+}
//===----------------------------------------------------------------------===//
// CallInst Implementation
@@ -494,6 +576,9 @@ void InvokeInst::removeAttribute(unsigned i, Attributes attr) {
setAttributes(PAL);
}
+LandingPadInst *InvokeInst::getLandingPadInst() const {
+ return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
+}
//===----------------------------------------------------------------------===//
// ReturnInst Implementation
@@ -574,6 +659,41 @@ BasicBlock *UnwindInst::getSuccessorV(unsigned idx) const {
}
//===----------------------------------------------------------------------===//
+// ResumeInst Implementation
+//===----------------------------------------------------------------------===//
+
+ResumeInst::ResumeInst(const ResumeInst &RI)
+ : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Resume,
+ OperandTraits<ResumeInst>::op_begin(this), 1) {
+ Op<0>() = RI.Op<0>();
+}
+
+ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
+ : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
+ OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
+ Op<0>() = Exn;
+}
+
+ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
+ : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
+ OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
+ Op<0>() = Exn;
+}
+
+unsigned ResumeInst::getNumSuccessorsV() const {
+ return getNumSuccessors();
+}
+
+void ResumeInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
+ llvm_unreachable("ResumeInst has no successors!");
+}
+
+BasicBlock *ResumeInst::getSuccessorV(unsigned idx) const {
+ llvm_unreachable("ResumeInst has no successors!");
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
// UnreachableInst Implementation
//===----------------------------------------------------------------------===//
@@ -665,6 +785,27 @@ BranchInst::BranchInst(const BranchInst &BI) :
SubclassOptionalData = BI.SubclassOptionalData;
}
+void BranchInst::swapSuccessors() {
+ assert(isConditional() &&
+ "Cannot swap successors of an unconditional branch");
+ Op<-1>().swap(Op<-2>());
+
+ // Update profile metadata if present and it matches our structural
+ // expectations.
+ MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
+ if (!ProfileData || ProfileData->getNumOperands() != 3)
+ return;
+
+ // The first operand is the name. Fetch them backwards and build a new one.
+ Value *Ops[] = {
+ ProfileData->getOperand(0),
+ ProfileData->getOperand(2),
+ ProfileData->getOperand(1)
+ };
+ setMetadata(LLVMContext::MD_prof,
+ MDNode::get(ProfileData->getContext(), Ops));
+}
+
BasicBlock *BranchInst::getSuccessorV(unsigned idx) const {
return getSuccessor(idx);
}
@@ -787,6 +928,8 @@ bool AllocaInst::isStaticAlloca() const {
void LoadInst::AssertOK() {
assert(getOperand(0)->getType()->isPointerTy() &&
"Ptr must have pointer type.");
+ assert(!(isAtomic() && getAlignment() == 0) &&
+ "Alignment required for atomic load");
}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
@@ -794,6 +937,7 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
Load, Ptr, InsertBef) {
setVolatile(false);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
setName(Name);
}
@@ -803,6 +947,7 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE)
Load, Ptr, InsertAE) {
setVolatile(false);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
setName(Name);
}
@@ -813,6 +958,18 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
Load, Ptr, InsertBef) {
setVolatile(isVolatile);
setAlignment(0);
+ setAtomic(NotAtomic);
+ AssertOK();
+ setName(Name);
+}
+
+LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+ BasicBlock *InsertAE)
+ : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
+ Load, Ptr, InsertAE) {
+ setVolatile(isVolatile);
+ setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
setName(Name);
}
@@ -823,6 +980,7 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
Load, Ptr, InsertBef) {
setVolatile(isVolatile);
setAlignment(Align);
+ setAtomic(NotAtomic);
AssertOK();
setName(Name);
}
@@ -833,27 +991,43 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
Load, Ptr, InsertAE) {
setVolatile(isVolatile);
setAlignment(Align);
+ setAtomic(NotAtomic);
AssertOK();
setName(Name);
}
-LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBef)
+ : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
+ Load, Ptr, InsertBef) {
+ setVolatile(isVolatile);
+ setAlignment(Align);
+ setAtomic(Order, SynchScope);
+ AssertOK();
+ setName(Name);
+}
+
+LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope,
BasicBlock *InsertAE)
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
Load, Ptr, InsertAE) {
setVolatile(isVolatile);
- setAlignment(0);
+ setAlignment(Align);
+ setAtomic(Order, SynchScope);
AssertOK();
setName(Name);
}
-
-
LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef)
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
Load, Ptr, InsertBef) {
setVolatile(false);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
if (Name && Name[0]) setName(Name);
}
@@ -863,6 +1037,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE)
Load, Ptr, InsertAE) {
setVolatile(false);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
if (Name && Name[0]) setName(Name);
}
@@ -873,6 +1048,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
Load, Ptr, InsertBef) {
setVolatile(isVolatile);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
if (Name && Name[0]) setName(Name);
}
@@ -883,6 +1059,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
Load, Ptr, InsertAE) {
setVolatile(isVolatile);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
if (Name && Name[0]) setName(Name);
}
@@ -891,7 +1068,7 @@ void LoadInst::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
assert(Align <= MaximumAlignment &&
"Alignment is greater than MaximumAlignment!");
- setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
((Log2_32(Align)+1)<<1));
assert(getAlignment() == Align && "Alignment representation error!");
}
@@ -907,6 +1084,8 @@ void StoreInst::AssertOK() {
assert(getOperand(0)->getType() ==
cast<PointerType>(getOperand(1)->getType())->getElementType()
&& "Ptr must be a pointer to Val type!");
+ assert(!(isAtomic() && getAlignment() == 0) &&
+ "Alignment required for atomic load");
}
@@ -919,6 +1098,7 @@ StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
Op<1>() = addr;
setVolatile(false);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
}
@@ -931,6 +1111,7 @@ StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
Op<1>() = addr;
setVolatile(false);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
}
@@ -944,6 +1125,7 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
}
@@ -957,6 +1139,37 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
+ setAtomic(NotAtomic);
+ AssertOK();
+}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(val->getContext()), Store,
+ OperandTraits<StoreInst>::op_begin(this),
+ OperandTraits<StoreInst>::operands(this),
+ InsertBefore) {
+ Op<0>() = val;
+ Op<1>() = addr;
+ setVolatile(isVolatile);
+ setAlignment(Align);
+ setAtomic(Order, SynchScope);
+ AssertOK();
+}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(val->getContext()), Store,
+ OperandTraits<StoreInst>::op_begin(this),
+ OperandTraits<StoreInst>::operands(this),
+ InsertAtEnd) {
+ Op<0>() = val;
+ Op<1>() = addr;
+ setVolatile(isVolatile);
+ setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
}
@@ -970,10 +1183,13 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
+ setAtomic(NotAtomic);
AssertOK();
}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(val->getContext()), Store,
OperandTraits<StoreInst>::op_begin(this),
@@ -982,7 +1198,8 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<0>() = val;
Op<1>() = addr;
setVolatile(isVolatile);
- setAlignment(0);
+ setAlignment(Align);
+ setAtomic(Order, SynchScope);
AssertOK();
}
@@ -990,37 +1207,135 @@ void StoreInst::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
assert(Align <= MaximumAlignment &&
"Alignment is greater than MaximumAlignment!");
- setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
((Log2_32(Align)+1) << 1));
assert(getAlignment() == Align && "Alignment representation error!");
}
//===----------------------------------------------------------------------===//
-// GetElementPtrInst Implementation
+// AtomicCmpXchgInst Implementation
//===----------------------------------------------------------------------===//
-static unsigned retrieveAddrSpace(const Value *Val) {
- return cast<PointerType>(Val->getType())->getAddressSpace();
+void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope) {
+ Op<0>() = Ptr;
+ Op<1>() = Cmp;
+ Op<2>() = NewVal;
+ setOrdering(Ordering);
+ setSynchScope(SynchScope);
+
+ assert(getOperand(0) && getOperand(1) && getOperand(2) &&
+ "All operands must be non-null!");
+ assert(getOperand(0)->getType()->isPointerTy() &&
+ "Ptr must have pointer type!");
+ assert(getOperand(1)->getType() ==
+ cast<PointerType>(getOperand(0)->getType())->getElementType()
+ && "Ptr must be a pointer to Cmp type!");
+ assert(getOperand(2)->getType() ==
+ cast<PointerType>(getOperand(0)->getType())->getElementType()
+ && "Ptr must be a pointer to NewVal type!");
+ assert(Ordering != NotAtomic &&
+ "AtomicCmpXchg instructions must be atomic!");
+}
+
+AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBefore)
+ : Instruction(Cmp->getType(), AtomicCmpXchg,
+ OperandTraits<AtomicCmpXchgInst>::op_begin(this),
+ OperandTraits<AtomicCmpXchgInst>::operands(this),
+ InsertBefore) {
+ Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
}
-void GetElementPtrInst::init(Value *Ptr, Value* const *Idx, unsigned NumIdx,
- const Twine &Name) {
- assert(NumOperands == 1+NumIdx && "NumOperands not initialized?");
- Use *OL = OperandList;
- OL[0] = Ptr;
+AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Cmp->getType(), AtomicCmpXchg,
+ OperandTraits<AtomicCmpXchgInst>::op_begin(this),
+ OperandTraits<AtomicCmpXchgInst>::operands(this),
+ InsertAtEnd) {
+ Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
+}
+
+//===----------------------------------------------------------------------===//
+// AtomicRMWInst Implementation
+//===----------------------------------------------------------------------===//
- for (unsigned i = 0; i != NumIdx; ++i)
- OL[i+1] = Idx[i];
+void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope) {
+ Op<0>() = Ptr;
+ Op<1>() = Val;
+ setOperation(Operation);
+ setOrdering(Ordering);
+ setSynchScope(SynchScope);
- setName(Name);
+ assert(getOperand(0) && getOperand(1) &&
+ "All operands must be non-null!");
+ assert(getOperand(0)->getType()->isPointerTy() &&
+ "Ptr must have pointer type!");
+ assert(getOperand(1)->getType() ==
+ cast<PointerType>(getOperand(0)->getType())->getElementType()
+ && "Ptr must be a pointer to Val type!");
+ assert(Ordering != NotAtomic &&
+ "AtomicRMW instructions must be atomic!");
}
-void GetElementPtrInst::init(Value *Ptr, Value *Idx, const Twine &Name) {
- assert(NumOperands == 2 && "NumOperands not initialized?");
- Use *OL = OperandList;
- OL[0] = Ptr;
- OL[1] = Idx;
+AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBefore)
+ : Instruction(Val->getType(), AtomicRMW,
+ OperandTraits<AtomicRMWInst>::op_begin(this),
+ OperandTraits<AtomicRMWInst>::operands(this),
+ InsertBefore) {
+ Init(Operation, Ptr, Val, Ordering, SynchScope);
+}
+
+AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Val->getType(), AtomicRMW,
+ OperandTraits<AtomicRMWInst>::op_begin(this),
+ OperandTraits<AtomicRMWInst>::operands(this),
+ InsertAtEnd) {
+ Init(Operation, Ptr, Val, Ordering, SynchScope);
+}
+
+//===----------------------------------------------------------------------===//
+// FenceInst Implementation
+//===----------------------------------------------------------------------===//
+
+FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(C), Fence, 0, 0, InsertBefore) {
+ setOrdering(Ordering);
+ setSynchScope(SynchScope);
+}
+FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(C), Fence, 0, 0, InsertAtEnd) {
+ setOrdering(Ordering);
+ setSynchScope(SynchScope);
+}
+
+//===----------------------------------------------------------------------===//
+// GetElementPtrInst Implementation
+//===----------------------------------------------------------------------===//
+
+void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
+ const Twine &Name) {
+ assert(NumOperands == 1 + IdxList.size() && "NumOperands not initialized?");
+ OperandList[0] = Ptr;
+ std::copy(IdxList.begin(), IdxList.end(), op_begin() + 1);
setName(Name);
}
@@ -1029,34 +1344,10 @@ GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
OperandTraits<GetElementPtrInst>::op_end(this)
- GEPI.getNumOperands(),
GEPI.getNumOperands()) {
- Use *OL = OperandList;
- Use *GEPIOL = GEPI.OperandList;
- for (unsigned i = 0, E = NumOperands; i != E; ++i)
- OL[i] = GEPIOL[i];
+ std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
SubclassOptionalData = GEPI.SubclassOptionalData;
}
-GetElementPtrInst::GetElementPtrInst(Value *Ptr, Value *Idx,
- const Twine &Name, Instruction *InBe)
- : Instruction(PointerType::get(
- checkGEPType(getIndexedType(Ptr->getType(),Idx)), retrieveAddrSpace(Ptr)),
- GetElementPtr,
- OperandTraits<GetElementPtrInst>::op_end(this) - 2,
- 2, InBe) {
- init(Ptr, Idx, Name);
-}
-
-GetElementPtrInst::GetElementPtrInst(Value *Ptr, Value *Idx,
- const Twine &Name, BasicBlock *IAE)
- : Instruction(PointerType::get(
- checkGEPType(getIndexedType(Ptr->getType(),Idx)),
- retrieveAddrSpace(Ptr)),
- GetElementPtr,
- OperandTraits<GetElementPtrInst>::op_end(this) - 2,
- 2, IAE) {
- init(Ptr, Idx, Name);
-}
-
/// getIndexedType - Returns the type of the element that would be accessed with
/// a gep instruction with the specified parameters.
///
@@ -1067,14 +1358,13 @@ GetElementPtrInst::GetElementPtrInst(Value *Ptr, Value *Idx,
/// pointer type.
///
template <typename IndexTy>
-static Type *getIndexedTypeInternal(Type *Ptr, IndexTy const *Idxs,
- unsigned NumIdx) {
+static Type *getIndexedTypeInternal(Type *Ptr, ArrayRef<IndexTy> IdxList) {
PointerType *PTy = dyn_cast<PointerType>(Ptr);
if (!PTy) return 0; // Type isn't a pointer type!
Type *Agg = PTy->getElementType();
// Handle the special case of the empty set index set, which is always valid.
- if (NumIdx == 0)
+ if (IdxList.empty())
return Agg;
// If there is at least one index, the top level type must be sized, otherwise
@@ -1083,44 +1373,29 @@ static Type *getIndexedTypeInternal(Type *Ptr, IndexTy const *Idxs,
return 0;
unsigned CurIdx = 1;
- for (; CurIdx != NumIdx; ++CurIdx) {
+ for (; CurIdx != IdxList.size(); ++CurIdx) {
CompositeType *CT = dyn_cast<CompositeType>(Agg);
if (!CT || CT->isPointerTy()) return 0;
- IndexTy Index = Idxs[CurIdx];
+ IndexTy Index = IdxList[CurIdx];
if (!CT->indexValid(Index)) return 0;
Agg = CT->getTypeAtIndex(Index);
}
- return CurIdx == NumIdx ? Agg : 0;
+ return CurIdx == IdxList.size() ? Agg : 0;
}
-Type *GetElementPtrInst::getIndexedType(Type *Ptr, Value* const *Idxs,
- unsigned NumIdx) {
- return getIndexedTypeInternal(Ptr, Idxs, NumIdx);
+Type *GetElementPtrInst::getIndexedType(Type *Ptr, ArrayRef<Value *> IdxList) {
+ return getIndexedTypeInternal(Ptr, IdxList);
}
Type *GetElementPtrInst::getIndexedType(Type *Ptr,
- Constant* const *Idxs,
- unsigned NumIdx) {
- return getIndexedTypeInternal(Ptr, Idxs, NumIdx);
+ ArrayRef<Constant *> IdxList) {
+ return getIndexedTypeInternal(Ptr, IdxList);
}
-Type *GetElementPtrInst::getIndexedType(Type *Ptr,
- uint64_t const *Idxs,
- unsigned NumIdx) {
- return getIndexedTypeInternal(Ptr, Idxs, NumIdx);
+Type *GetElementPtrInst::getIndexedType(Type *Ptr, ArrayRef<uint64_t> IdxList) {
+ return getIndexedTypeInternal(Ptr, IdxList);
}
-Type *GetElementPtrInst::getIndexedType(Type *Ptr, Value *Idx) {
- PointerType *PTy = dyn_cast<PointerType>(Ptr);
- if (!PTy) return 0; // Type isn't a pointer type!
-
- // Check the pointer index.
- if (!PTy->indexValid(Idx)) return 0;
-
- return PTy->getElementType();
-}
-
-
/// hasAllZeroIndices - Return true if all of the indices of this GEP are
/// zeros. If so, the result pointer and the first operand have the same
/// value, just potentially different types.
@@ -1768,7 +2043,7 @@ bool CastInst::isNoopCast(Instruction::CastOps Opcode,
Type *IntPtrTy) {
switch (Opcode) {
default:
- assert(!"Invalid CastOp");
+ assert(0 && "Invalid CastOp");
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
@@ -1805,8 +2080,7 @@ bool CastInst::isNoopCast(Type *IntPtrTy) const {
/// If no such cast is permited, the function returns 0.
unsigned CastInst::isEliminableCastPair(
Instruction::CastOps firstOp, Instruction::CastOps secondOp,
- Type *SrcTy, Type *MidTy, Type *DstTy, Type *IntPtrTy)
-{
+ Type *SrcTy, Type *MidTy, Type *DstTy, Type *IntPtrTy) {
// Define the 144 possibilities for these two cast instructions. The values
// in this matrix determine what to do in a given situation and select the
// case in the switch below. The rows correspond to firstOp, the columns
@@ -1859,12 +2133,16 @@ unsigned CastInst::isEliminableCastPair(
};
// If either of the casts are a bitcast from scalar to vector, disallow the
- // merging.
- if ((firstOp == Instruction::BitCast &&
- isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
- (secondOp == Instruction::BitCast &&
- isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
- return 0; // Disallowed
+ // merging. However, bitcast of A->B->A are allowed.
+ bool isFirstBitcast = (firstOp == Instruction::BitCast);
+ bool isSecondBitcast = (secondOp == Instruction::BitCast);
+ bool chainedBitcast = (SrcTy == DstTy && isFirstBitcast && isSecondBitcast);
+
+ // Check if any of the bitcasts convert scalars<->vectors.
+ if ((isFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
+ (isSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
+ // Unless we are bitcasing to the original type, disallow optimizations.
+ if (!chainedBitcast) return 0;
int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
[secondOp-Instruction::CastOpsBegin];
@@ -1958,10 +2236,10 @@ unsigned CastInst::isEliminableCastPair(
case 99:
// cast combination can't happen (error in input). This is for all cases
// where the MidTy is not the same for the two cast instructions.
- assert(!"Invalid Cast Combination");
+ assert(0 && "Invalid Cast Combination");
return 0;
default:
- assert(!"Error in CastResults table!!!");
+ assert(0 && "Error in CastResults table!!!");
return 0;
}
return 0;
@@ -1985,7 +2263,7 @@ CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
default:
- assert(!"Invalid opcode provided");
+ assert(0 && "Invalid opcode provided");
}
return 0;
}
@@ -2008,7 +2286,7 @@ CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
default:
- assert(!"Invalid opcode provided");
+ assert(0 && "Invalid opcode provided");
}
return 0;
}
@@ -2292,17 +2570,17 @@ CastInst::getCastOpcode(
} else if (SrcTy->isIntegerTy()) {
return IntToPtr; // int -> ptr
} else {
- assert(!"Casting pointer to other than pointer or int");
+ assert(0 && "Casting pointer to other than pointer or int");
}
} else if (DestTy->isX86_MMXTy()) {
if (SrcTy->isVectorTy()) {
assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
return BitCast; // 64-bit vector to MMX
} else {
- assert(!"Illegal cast to X86_MMX");
+ assert(0 && "Illegal cast to X86_MMX");
}
} else {
- assert(!"Casting to type that is not first-class");
+ assert(0 && "Casting to type that is not first-class");
}
// If we fall through to here we probably hit an assertion cast above
@@ -2612,7 +2890,7 @@ bool CmpInst::isEquality() const {
CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
switch (pred) {
- default: assert(!"Unknown cmp predicate!");
+ default: assert(0 && "Unknown cmp predicate!");
case ICMP_EQ: return ICMP_NE;
case ICMP_NE: return ICMP_EQ;
case ICMP_UGT: return ICMP_ULE;
@@ -2645,7 +2923,7 @@ CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
switch (pred) {
- default: assert(! "Unknown icmp predicate!");
+ default: assert(0 && "Unknown icmp predicate!");
case ICMP_EQ: case ICMP_NE:
case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
return pred;
@@ -2658,7 +2936,7 @@ ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
switch (pred) {
- default: assert(! "Unknown icmp predicate!");
+ default: assert(0 && "Unknown icmp predicate!");
case ICMP_EQ: case ICMP_NE:
case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
return pred;
@@ -2734,7 +3012,7 @@ ICmpInst::makeConstantRange(Predicate pred, const APInt &C) {
CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
switch (pred) {
- default: assert(!"Unknown cmp predicate!");
+ default: assert(0 && "Unknown cmp predicate!");
case ICMP_EQ: case ICMP_NE:
return pred;
case ICMP_SGT: return ICMP_SLT;
@@ -3065,14 +3343,34 @@ AllocaInst *AllocaInst::clone_impl() const {
}
LoadInst *LoadInst::clone_impl() const {
- return new LoadInst(getOperand(0),
- Twine(), isVolatile(),
- getAlignment());
+ return new LoadInst(getOperand(0), Twine(), isVolatile(),
+ getAlignment(), getOrdering(), getSynchScope());
}
StoreInst *StoreInst::clone_impl() const {
- return new StoreInst(getOperand(0), getOperand(1),
- isVolatile(), getAlignment());
+ return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
+ getAlignment(), getOrdering(), getSynchScope());
+
+}
+
+AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const {
+ AtomicCmpXchgInst *Result =
+ new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
+ getOrdering(), getSynchScope());
+ Result->setVolatile(isVolatile());
+ return Result;
+}
+
+AtomicRMWInst *AtomicRMWInst::clone_impl() const {
+ AtomicRMWInst *Result =
+ new AtomicRMWInst(getOperation(),getOperand(0), getOperand(1),
+ getOrdering(), getSynchScope());
+ Result->setVolatile(isVolatile());
+ return Result;
+}
+
+FenceInst *FenceInst::clone_impl() const {
+ return new FenceInst(getContext(), getOrdering(), getSynchScope());
}
TruncInst *TruncInst::clone_impl() const {
@@ -3155,6 +3453,10 @@ PHINode *PHINode::clone_impl() const {
return new PHINode(*this);
}
+LandingPadInst *LandingPadInst::clone_impl() const {
+ return new LandingPadInst(*this);
+}
+
ReturnInst *ReturnInst::clone_impl() const {
return new(getNumOperands()) ReturnInst(*this);
}
@@ -3176,6 +3478,10 @@ InvokeInst *InvokeInst::clone_impl() const {
return new(getNumOperands()) InvokeInst(*this);
}
+ResumeInst *ResumeInst::clone_impl() const {
+ return new(1) ResumeInst(*this);
+}
+
UnwindInst *UnwindInst::clone_impl() const {
LLVMContext &Context = getContext();
return new UnwindInst(Context);
diff --git a/lib/VMCore/LLVMContext.cpp b/lib/VMCore/LLVMContext.cpp
index ebd1e0a..3ed2c2c 100644
--- a/lib/VMCore/LLVMContext.cpp
+++ b/lib/VMCore/LLVMContext.cpp
@@ -100,7 +100,7 @@ void LLVMContext::emitError(unsigned LocCookie, StringRef ErrorStr) {
}
// If we do have an error handler, we can report the error and keep going.
- SMDiagnostic Diag("", "error: " + ErrorStr.str());
+ SMDiagnostic Diag("", SourceMgr::DK_Error, ErrorStr.str());
pImpl->InlineAsmDiagHandler(Diag, pImpl->InlineAsmDiagContext, LocCookie);
}
diff --git a/lib/VMCore/Makefile b/lib/VMCore/Makefile
index 03a4fc7..2b9b0f2 100644
--- a/lib/VMCore/Makefile
+++ b/lib/VMCore/Makefile
@@ -20,9 +20,9 @@ GENFILE:=$(PROJ_OBJ_ROOT)/include/llvm/Intrinsics.gen
INTRINSICTD := $(PROJ_SRC_ROOT)/include/llvm/Intrinsics.td
INTRINSICTDS := $(wildcard $(PROJ_SRC_ROOT)/include/llvm/Intrinsics*.td)
-$(ObjDir)/Intrinsics.gen.tmp: $(ObjDir)/.dir $(INTRINSICTDS) $(TBLGEN)
+$(ObjDir)/Intrinsics.gen.tmp: $(ObjDir)/.dir $(INTRINSICTDS) $(LLVM_TBLGEN)
$(Echo) Building Intrinsics.gen.tmp from Intrinsics.td
- $(Verb) $(TableGen) $(call SYSPATH, $(INTRINSICTD)) -o $(call SYSPATH, $@) -gen-intrinsic
+ $(Verb) $(LLVMTableGen) $(call SYSPATH, $(INTRINSICTD)) -o $(call SYSPATH, $@) -gen-intrinsic
$(GENFILE): $(ObjDir)/Intrinsics.gen.tmp
$(Verb) $(CMP) -s $@ $< || ( $(CP) $< $@ && \
diff --git a/lib/VMCore/Module.cpp b/lib/VMCore/Module.cpp
index 25d5391..c29029b 100644
--- a/lib/VMCore/Module.cpp
+++ b/lib/VMCore/Module.cpp
@@ -32,25 +32,10 @@ using namespace llvm;
// Methods to implement the globals and functions lists.
//
-GlobalVariable *ilist_traits<GlobalVariable>::createSentinel() {
- GlobalVariable *Ret = new GlobalVariable(Type::getInt32Ty(getGlobalContext()),
- false, GlobalValue::ExternalLinkage);
- // This should not be garbage monitored.
- LeakDetector::removeGarbageObject(Ret);
- return Ret;
-}
-GlobalAlias *ilist_traits<GlobalAlias>::createSentinel() {
- GlobalAlias *Ret = new GlobalAlias(Type::getInt32Ty(getGlobalContext()),
- GlobalValue::ExternalLinkage);
- // This should not be garbage monitored.
- LeakDetector::removeGarbageObject(Ret);
- return Ret;
-}
-
// Explicit instantiations of SymbolTableListTraits since some of the methods
// are not in the public header file.
-template class llvm::SymbolTableListTraits<GlobalVariable, Module>;
template class llvm::SymbolTableListTraits<Function, Module>;
+template class llvm::SymbolTableListTraits<GlobalVariable, Module>;
template class llvm::SymbolTableListTraits<GlobalAlias, Module>;
//===----------------------------------------------------------------------===//
@@ -82,8 +67,10 @@ Module::Endianness Module::getEndianness() const {
Module::Endianness ret = AnyEndianness;
while (!temp.empty()) {
- StringRef token = DataLayout;
- tie(token, temp) = getToken(temp, "-");
+ std::pair<StringRef, StringRef> P = getToken(temp, "-");
+
+ StringRef token = P.first;
+ temp = P.second;
if (token[0] == 'e') {
ret = LittleEndian;
@@ -95,15 +82,16 @@ Module::Endianness Module::getEndianness() const {
return ret;
}
-/// Target Pointer Size information...
+/// Target Pointer Size information.
Module::PointerSize Module::getPointerSize() const {
StringRef temp = DataLayout;
Module::PointerSize ret = AnyPointerSize;
while (!temp.empty()) {
- StringRef token, signalToken;
- tie(token, temp) = getToken(temp, "-");
- tie(signalToken, token) = getToken(token, ":");
+ std::pair<StringRef, StringRef> TmpP = getToken(temp, "-");
+ temp = TmpP.second;
+ TmpP = getToken(TmpP.first, ":");
+ StringRef token = TmpP.second, signalToken = TmpP.first;
if (signalToken[0] == 'p') {
int size = 0;
@@ -549,5 +537,3 @@ namespace {
void Module::findUsedStructTypes(std::vector<StructType*> &StructTypes) const {
TypeFinder(StructTypes).run(*this);
}
-
-
diff --git a/lib/VMCore/PassManager.cpp b/lib/VMCore/PassManager.cpp
index 5cf2905..1aaf765 100644
--- a/lib/VMCore/PassManager.cpp
+++ b/lib/VMCore/PassManager.cpp
@@ -28,7 +28,6 @@
#include "llvm/Support/Mutex.h"
#include "llvm/ADT/StringMap.h"
#include <algorithm>
-#include <cstdio>
#include <map>
using namespace llvm;
@@ -167,8 +166,8 @@ class BBPassManager : public PMDataManager, public FunctionPass {
public:
static char ID;
- explicit BBPassManager(int Depth)
- : PMDataManager(Depth), FunctionPass(ID) {}
+ explicit BBPassManager()
+ : PMDataManager(), FunctionPass(ID) {}
/// Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the function, and if so, return true.
@@ -193,7 +192,7 @@ public:
// Print passes managed by this manager
void dumpPassStructure(unsigned Offset) {
- llvm::dbgs() << std::string(Offset*2, ' ') << "BasicBlockPass Manager\n";
+ llvm::dbgs().indent(Offset*2) << "BasicBlockPass Manager\n";
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
BasicBlockPass *BP = getContainedPass(Index);
BP->dumpPassStructure(Offset + 1);
@@ -228,9 +227,9 @@ private:
bool wasRun;
public:
static char ID;
- explicit FunctionPassManagerImpl(int Depth) :
- Pass(PT_PassManager, ID), PMDataManager(Depth),
- PMTopLevelManager(new FPPassManager(1)), wasRun(false) {}
+ explicit FunctionPassManagerImpl() :
+ Pass(PT_PassManager, ID), PMDataManager(),
+ PMTopLevelManager(new FPPassManager()), wasRun(false) {}
/// add - Add a pass to the queue of passes to run. This passes ownership of
/// the Pass to the PassManager. When the PassManager is destroyed, the pass
@@ -303,8 +302,8 @@ char FunctionPassManagerImpl::ID = 0;
class MPPassManager : public Pass, public PMDataManager {
public:
static char ID;
- explicit MPPassManager(int Depth) :
- Pass(PT_PassManager, ID), PMDataManager(Depth) { }
+ explicit MPPassManager() :
+ Pass(PT_PassManager, ID), PMDataManager() { }
// Delete on the fly managers.
virtual ~MPPassManager() {
@@ -349,7 +348,7 @@ public:
// Print passes managed by this manager
void dumpPassStructure(unsigned Offset) {
- llvm::dbgs() << std::string(Offset*2, ' ') << "ModulePass Manager\n";
+ llvm::dbgs().indent(Offset*2) << "ModulePass Manager\n";
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
ModulePass *MP = getContainedPass(Index);
MP->dumpPassStructure(Offset + 1);
@@ -388,9 +387,9 @@ class PassManagerImpl : public Pass,
public:
static char ID;
- explicit PassManagerImpl(int Depth) :
- Pass(PT_PassManager, ID), PMDataManager(Depth),
- PMTopLevelManager(new MPPassManager(1)) {}
+ explicit PassManagerImpl() :
+ Pass(PT_PassManager, ID), PMDataManager(),
+ PMTopLevelManager(new MPPassManager()) {}
/// add - Add a pass to the queue of passes to run. This passes ownership of
/// the Pass to the PassManager. When the PassManager is destroyed, the pass
@@ -1340,7 +1339,7 @@ bool BBPassManager::doFinalization(Function &F) {
/// Create new Function pass manager
FunctionPassManager::FunctionPassManager(Module *m) : M(m) {
- FPM = new FunctionPassManagerImpl(0);
+ FPM = new FunctionPassManagerImpl();
// FPM is the top level manager.
FPM->setTopLevelManager(FPM);
@@ -1475,7 +1474,7 @@ bool FunctionPassManagerImpl::run(Function &F) {
char FPPassManager::ID = 0;
/// Print passes managed by this manager
void FPPassManager::dumpPassStructure(unsigned Offset) {
- llvm::dbgs() << std::string(Offset*2, ' ') << "FunctionPass Manager\n";
+ dbgs().indent(Offset*2) << "FunctionPass Manager\n";
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
FunctionPass *FP = getContainedPass(Index);
FP->dumpPassStructure(Offset + 1);
@@ -1532,7 +1531,7 @@ bool FPPassManager::runOnModule(Module &M) {
bool Changed = doInitialization(M);
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
- runOnFunction(*I);
+ Changed |= runOnFunction(*I);
return doFinalization(M) || Changed;
}
@@ -1626,7 +1625,7 @@ void MPPassManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
FunctionPassManagerImpl *FPP = OnTheFlyManagers[P];
if (!FPP) {
- FPP = new FunctionPassManagerImpl(0);
+ FPP = new FunctionPassManagerImpl();
// FPP is the top level manager.
FPP->setTopLevelManager(FPP);
@@ -1635,9 +1634,11 @@ void MPPassManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
FPP->add(RequiredPass);
// Register P as the last user of RequiredPass.
- SmallVector<Pass *, 1> LU;
- LU.push_back(RequiredPass);
- FPP->setLastUser(LU, P);
+ if (RequiredPass) {
+ SmallVector<Pass *, 1> LU;
+ LU.push_back(RequiredPass);
+ FPP->setLastUser(LU, P);
+ }
}
/// Return function pass corresponding to PassInfo PI, that is
@@ -1677,7 +1678,7 @@ bool PassManagerImpl::run(Module &M) {
/// Create new pass manager
PassManager::PassManager() {
- PM = new PassManagerImpl(0);
+ PM = new PassManagerImpl();
// PM is the top level manager
PM->setTopLevelManager(PM);
}
@@ -1761,13 +1762,23 @@ void PMStack::pop() {
// Push PM on the stack and set its top level manager.
void PMStack::push(PMDataManager *PM) {
assert(PM && "Unable to push. Pass Manager expected");
+ assert(PM->getDepth()==0 && "Pass Manager depth set too early");
if (!this->empty()) {
+ assert(PM->getPassManagerType() > this->top()->getPassManagerType()
+ && "pushing bad pass manager to PMStack");
PMTopLevelManager *TPM = this->top()->getTopLevelManager();
assert(TPM && "Unable to find top level manager");
TPM->addIndirectPassManager(PM);
PM->setTopLevelManager(TPM);
+ PM->setDepth(this->top()->getDepth()+1);
+ }
+ else {
+ assert((PM->getPassManagerType() == PMT_ModulePassManager
+ || PM->getPassManagerType() == PMT_FunctionPassManager)
+ && "pushing bad pass manager to PMStack");
+ PM->setDepth(1);
}
S.push_back(PM);
@@ -1777,10 +1788,10 @@ void PMStack::push(PMDataManager *PM) {
void PMStack::dump() const {
for (std::vector<PMDataManager *>::const_iterator I = S.begin(),
E = S.end(); I != E; ++I)
- printf("%s ", (*I)->getAsPass()->getPassName());
+ dbgs() << (*I)->getAsPass()->getPassName() << ' ';
if (!S.empty())
- printf("\n");
+ dbgs() << '\n';
}
/// Find appropriate Module Pass Manager in the PM Stack and
@@ -1823,7 +1834,7 @@ void FunctionPass::assignPassManager(PMStack &PMS,
PMDataManager *PMD = PMS.top();
// [1] Create new Function Pass Manager
- FPP = new FPPassManager(PMD->getDepth() + 1);
+ FPP = new FPPassManager();
FPP->populateInheritedAnalysis(PMS);
// [2] Set up new manager's top level manager
@@ -1860,7 +1871,7 @@ void BasicBlockPass::assignPassManager(PMStack &PMS,
PMDataManager *PMD = PMS.top();
// [1] Create new Basic Block Manager
- BBP = new BBPassManager(PMD->getDepth() + 1);
+ BBP = new BBPassManager();
// [2] Set up new manager's top level manager
// Basic Block Pass Manager does not live by itself
diff --git a/lib/VMCore/PassRegistry.cpp b/lib/VMCore/PassRegistry.cpp
index fa92620..2df6557 100644
--- a/lib/VMCore/PassRegistry.cpp
+++ b/lib/VMCore/PassRegistry.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/Function.h"
#include <vector>
using namespace llvm;
diff --git a/lib/VMCore/Type.cpp b/lib/VMCore/Type.cpp
index bf8af07..10184bc 100644
--- a/lib/VMCore/Type.cpp
+++ b/lib/VMCore/Type.cpp
@@ -392,7 +392,7 @@ StructType *StructType::get(LLVMContext &Context, ArrayRef<Type*> ETypes,
// Value not found. Create a new type!
ST = new (Context.pImpl->TypeAllocator) StructType(Context);
- ST->setSubclassData(SCDB_IsAnonymous); // Anonymous struct.
+ ST->setSubclassData(SCDB_IsLiteral); // Literal struct.
ST->setBody(ETypes, isPacked);
return ST;
}
@@ -412,13 +412,6 @@ void StructType::setBody(ArrayRef<Type*> Elements, bool isPacked) {
NumContainedTys = Elements.size();
}
-StructType *StructType::createNamed(LLVMContext &Context, StringRef Name) {
- StructType *ST = new (Context.pImpl->TypeAllocator) StructType(Context);
- if (!Name.empty())
- ST->setName(Name);
- return ST;
-}
-
void StructType::setName(StringRef Name) {
if (Name == getName()) return;
@@ -461,6 +454,13 @@ void StructType::setName(StringRef Name) {
//===----------------------------------------------------------------------===//
// StructType Helper functions.
+StructType *StructType::create(LLVMContext &Context, StringRef Name) {
+ StructType *ST = new (Context.pImpl->TypeAllocator) StructType(Context);
+ if (!Name.empty())
+ ST->setName(Name);
+ return ST;
+}
+
StructType *StructType::get(LLVMContext &Context, bool isPacked) {
return get(Context, llvm::ArrayRef<Type*>(), isPacked);
}
@@ -478,21 +478,36 @@ StructType *StructType::get(Type *type, ...) {
return llvm::StructType::get(Ctx, StructFields);
}
-StructType *StructType::createNamed(LLVMContext &Context, StringRef Name,
- ArrayRef<Type*> Elements, bool isPacked) {
- StructType *ST = createNamed(Context, Name);
+StructType *StructType::create(LLVMContext &Context, ArrayRef<Type*> Elements,
+ StringRef Name, bool isPacked) {
+ StructType *ST = create(Context, Name);
ST->setBody(Elements, isPacked);
return ST;
}
-StructType *StructType::createNamed(StringRef Name, ArrayRef<Type*> Elements,
- bool isPacked) {
+StructType *StructType::create(LLVMContext &Context, ArrayRef<Type*> Elements) {
+ return create(Context, Elements, StringRef());
+}
+
+StructType *StructType::create(LLVMContext &Context) {
+ return create(Context, StringRef());
+}
+
+
+StructType *StructType::create(ArrayRef<Type*> Elements, StringRef Name,
+ bool isPacked) {
assert(!Elements.empty() &&
"This method may not be invoked with an empty list");
- return createNamed(Elements[0]->getContext(), Name, Elements, isPacked);
+ return create(Elements[0]->getContext(), Elements, Name, isPacked);
}
-StructType *StructType::createNamed(StringRef Name, Type *type, ...) {
+StructType *StructType::create(ArrayRef<Type*> Elements) {
+ assert(!Elements.empty() &&
+ "This method may not be invoked with an empty list");
+ return create(Elements[0]->getContext(), Elements, StringRef());
+}
+
+StructType *StructType::create(StringRef Name, Type *type, ...) {
assert(type != 0 && "Cannot create a struct type with no elements with this");
LLVMContext &Ctx = type->getContext();
va_list ap;
@@ -502,11 +517,12 @@ StructType *StructType::createNamed(StringRef Name, Type *type, ...) {
StructFields.push_back(type);
type = va_arg(ap, llvm::Type*);
}
- return llvm::StructType::createNamed(Ctx, Name, StructFields);
+ return llvm::StructType::create(Ctx, StructFields, Name);
}
+
StringRef StructType::getName() const {
- assert(!isAnonymous() && "Anonymous structs never have names");
+ assert(!isLiteral() && "Literal structs never have names");
if (SymbolTableEntry == 0) return StringRef();
return ((StringMapEntry<StructType*> *)SymbolTableEntry)->getKey();
diff --git a/lib/VMCore/ValueTypes.cpp b/lib/VMCore/ValueTypes.cpp
index 525228b..e13bd7d 100644
--- a/lib/VMCore/ValueTypes.cpp
+++ b/lib/VMCore/ValueTypes.cpp
@@ -19,6 +19,12 @@
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
+EVT EVT::changeExtendedVectorElementTypeToInteger() const {
+ LLVMContext &Context = LLVMTy->getContext();
+ EVT IntTy = getIntegerVT(Context, getVectorElementType().getSizeInBits());
+ return getVectorVT(Context, IntTy, getVectorNumElements());
+}
+
EVT EVT::getExtendedIntegerVT(LLVMContext &Context, unsigned BitWidth) {
EVT VT;
VT.LLVMTy = IntegerType::get(Context, BitWidth);
diff --git a/lib/VMCore/Verifier.cpp b/lib/VMCore/Verifier.cpp
index 4594916..9564b7d 100644
--- a/lib/VMCore/Verifier.cpp
+++ b/lib/VMCore/Verifier.cpp
@@ -35,6 +35,12 @@
// * It is illegal to have a ret instruction that returns a value that does not
// agree with the function return value type.
// * Function call argument types match the function prototype
+// * A landing pad is defined by a landingpad instruction, and can be jumped to
+// only by the unwind edge of an invoke instruction.
+// * A landingpad instruction must be the first non-PHI instruction in the
+// block.
+// * All landingpad instructions must use the same personality function with
+// the same function.
// * All other things that are tested by asserts spread about the code...
//
//===----------------------------------------------------------------------===//
@@ -131,18 +137,22 @@ namespace {
/// already.
SmallPtrSet<MDNode *, 32> MDNodes;
+ /// PersonalityFn - The personality function referenced by the
+ /// LandingPadInsts. All LandingPadInsts within the same function must use
+ /// the same personality function.
+ const Value *PersonalityFn;
+
Verifier()
- : FunctionPass(ID),
- Broken(false), RealPass(true), action(AbortProcessAction),
- Mod(0), Context(0), DT(0), MessagesStr(Messages) {
- initializeVerifierPass(*PassRegistry::getPassRegistry());
- }
+ : FunctionPass(ID), Broken(false), RealPass(true),
+ action(AbortProcessAction), Mod(0), Context(0), DT(0),
+ MessagesStr(Messages), PersonalityFn(0) {
+ initializeVerifierPass(*PassRegistry::getPassRegistry());
+ }
explicit Verifier(VerifierFailureAction ctn)
- : FunctionPass(ID),
- Broken(false), RealPass(true), action(ctn), Mod(0), Context(0), DT(0),
- MessagesStr(Messages) {
- initializeVerifierPass(*PassRegistry::getPassRegistry());
- }
+ : FunctionPass(ID), Broken(false), RealPass(true), action(ctn), Mod(0),
+ Context(0), DT(0), MessagesStr(Messages), PersonalityFn(0) {
+ initializeVerifierPass(*PassRegistry::getPassRegistry());
+ }
bool doInitialization(Module &M) {
Mod = &M;
@@ -165,6 +175,7 @@ namespace {
visit(F);
InstsInThisBlock.clear();
+ PersonalityFn = 0;
// If this is a real pass, in a pass manager, we must abort before
// returning back to the pass manager, or else the pass manager may try to
@@ -278,9 +289,13 @@ namespace {
void visitUserOp1(Instruction &I);
void visitUserOp2(Instruction &I) { visitUserOp1(I); }
void visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI);
+ void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
+ void visitAtomicRMWInst(AtomicRMWInst &RMWI);
+ void visitFenceInst(FenceInst &FI);
void visitAllocaInst(AllocaInst &AI);
void visitExtractValueInst(ExtractValueInst &EVI);
void visitInsertValueInst(InsertValueInst &IVI);
+ void visitLandingPadInst(LandingPadInst &LPI);
void VerifyCallSite(CallSite CS);
bool PerformTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty,
@@ -1152,6 +1167,12 @@ void Verifier::visitCallInst(CallInst &CI) {
void Verifier::visitInvokeInst(InvokeInst &II) {
VerifyCallSite(&II);
+
+ // Verify that there is a landingpad instruction as the first non-PHI
+ // instruction of the 'unwind' destination.
+ Assert1(II.getUnwindDest()->isLandingPad(),
+ "The unwind destination does not have a landingpad instruction!",&II);
+
visitTerminatorInst(II);
}
@@ -1274,10 +1295,13 @@ void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
}
void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
+ Assert1(cast<PointerType>(GEP.getOperand(0)->getType())
+ ->getElementType()->isSized(),
+ "GEP into unsized type!", &GEP);
+
SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
Type *ElTy =
- GetElementPtrInst::getIndexedType(GEP.getOperand(0)->getType(),
- Idxs.begin(), Idxs.end());
+ GetElementPtrInst::getIndexedType(GEP.getOperand(0)->getType(), Idxs);
Assert1(ElTy, "Invalid indices for GEP pointer type!", &GEP);
Assert2(GEP.getType()->isPointerTy() &&
cast<PointerType>(GEP.getType())->getElementType() == ElTy,
@@ -1291,6 +1315,15 @@ void Verifier::visitLoadInst(LoadInst &LI) {
Type *ElTy = PTy->getElementType();
Assert2(ElTy == LI.getType(),
"Load result type does not match pointer operand type!", &LI, ElTy);
+ if (LI.isAtomic()) {
+ Assert1(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease,
+ "Load cannot have Release ordering", &LI);
+ Assert1(LI.getAlignment() != 0,
+ "Atomic load must specify explicit alignment", &LI);
+ } else {
+ Assert1(LI.getSynchScope() == CrossThread,
+ "Non-atomic load cannot have SynchronizationScope specified", &LI);
+ }
visitInstruction(LI);
}
@@ -1301,6 +1334,15 @@ void Verifier::visitStoreInst(StoreInst &SI) {
Assert2(ElTy == SI.getOperand(0)->getType(),
"Stored value type does not match pointer operand type!",
&SI, ElTy);
+ if (SI.isAtomic()) {
+ Assert1(SI.getOrdering() != Acquire && SI.getOrdering() != AcquireRelease,
+ "Store cannot have Acquire ordering", &SI);
+ Assert1(SI.getAlignment() != 0,
+ "Atomic store must specify explicit alignment", &SI);
+ } else {
+ Assert1(SI.getSynchScope() == CrossThread,
+ "Non-atomic store cannot have SynchronizationScope specified", &SI);
+ }
visitInstruction(SI);
}
@@ -1316,6 +1358,49 @@ void Verifier::visitAllocaInst(AllocaInst &AI) {
visitInstruction(AI);
}
+void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
+ Assert1(CXI.getOrdering() != NotAtomic,
+ "cmpxchg instructions must be atomic.", &CXI);
+ Assert1(CXI.getOrdering() != Unordered,
+ "cmpxchg instructions cannot be unordered.", &CXI);
+ PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
+ Assert1(PTy, "First cmpxchg operand must be a pointer.", &CXI);
+ Type *ElTy = PTy->getElementType();
+ Assert2(ElTy == CXI.getOperand(1)->getType(),
+ "Expected value type does not match pointer operand type!",
+ &CXI, ElTy);
+ Assert2(ElTy == CXI.getOperand(2)->getType(),
+ "Stored value type does not match pointer operand type!",
+ &CXI, ElTy);
+ visitInstruction(CXI);
+}
+
+void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
+ Assert1(RMWI.getOrdering() != NotAtomic,
+ "atomicrmw instructions must be atomic.", &RMWI);
+ Assert1(RMWI.getOrdering() != Unordered,
+ "atomicrmw instructions cannot be unordered.", &RMWI);
+ PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
+ Assert1(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
+ Type *ElTy = PTy->getElementType();
+ Assert2(ElTy == RMWI.getOperand(1)->getType(),
+ "Argument value type does not match pointer operand type!",
+ &RMWI, ElTy);
+ Assert1(AtomicRMWInst::FIRST_BINOP <= RMWI.getOperation() &&
+ RMWI.getOperation() <= AtomicRMWInst::LAST_BINOP,
+ "Invalid binary operation!", &RMWI);
+ visitInstruction(RMWI);
+}
+
+void Verifier::visitFenceInst(FenceInst &FI) {
+ const AtomicOrdering Ordering = FI.getOrdering();
+ Assert1(Ordering == Acquire || Ordering == Release ||
+ Ordering == AcquireRelease || Ordering == SequentiallyConsistent,
+ "fence instructions may only have "
+ "acquire, release, acq_rel, or seq_cst ordering.", &FI);
+ visitInstruction(FI);
+}
+
void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
Assert1(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(),
EVI.getIndices()) ==
@@ -1334,6 +1419,55 @@ void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
visitInstruction(IVI);
}
+void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
+ BasicBlock *BB = LPI.getParent();
+
+ // The landingpad instruction is ill-formed if it doesn't have any clauses and
+ // isn't a cleanup.
+ Assert1(LPI.getNumClauses() > 0 || LPI.isCleanup(),
+ "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
+
+ // The landingpad instruction defines its parent as a landing pad block. The
+ // landing pad block may be branched to only by the unwind edge of an invoke.
+ for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
+ const InvokeInst *II = dyn_cast<InvokeInst>((*I)->getTerminator());
+ Assert1(II && II->getUnwindDest() == BB,
+ "Block containing LandingPadInst must be jumped to "
+ "only by the unwind edge of an invoke.", &LPI);
+ }
+
+ // The landingpad instruction must be the first non-PHI instruction in the
+ // block.
+ Assert1(LPI.getParent()->getLandingPadInst() == &LPI,
+ "LandingPadInst not the first non-PHI instruction in the block.",
+ &LPI);
+
+ // The personality functions for all landingpad instructions within the same
+ // function should match.
+ if (PersonalityFn)
+ Assert1(LPI.getPersonalityFn() == PersonalityFn,
+ "Personality function doesn't match others in function", &LPI);
+ PersonalityFn = LPI.getPersonalityFn();
+
+ // All operands must be constants.
+ Assert1(isa<Constant>(PersonalityFn), "Personality function is not constant!",
+ &LPI);
+ for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
+ Value *Clause = LPI.getClause(i);
+ Assert1(isa<Constant>(Clause), "Clause is not constant!", &LPI);
+ if (LPI.isCatch(i)) {
+ Assert1(isa<PointerType>(Clause->getType()),
+ "Catch operand does not have pointer type!", &LPI);
+ } else {
+ Assert1(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
+ Assert1(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
+ "Filter operand is not an array of constants!", &LPI);
+ }
+ }
+
+ visitInstruction(LPI);
+}
+
/// verifyInstruction - Verify that an instruction is well formed.
///
void Verifier::visitInstruction(Instruction &I) {