aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Transforms/Instrumentation
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms/Instrumentation')
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp361
-rw-r--r--lib/Transforms/Instrumentation/Android.mk1
-rw-r--r--lib/Transforms/Instrumentation/CMakeLists.txt1
-rw-r--r--lib/Transforms/Instrumentation/DataFlowSanitizer.cpp282
-rw-r--r--lib/Transforms/Instrumentation/DebugIR.cpp8
-rw-r--r--lib/Transforms/Instrumentation/DebugIR.h6
-rw-r--r--lib/Transforms/Instrumentation/GCOVProfiling.cpp14
-rw-r--r--lib/Transforms/Instrumentation/Instrumentation.cpp1
-rw-r--r--lib/Transforms/Instrumentation/MemorySanitizer.cpp140
-rw-r--r--lib/Transforms/Instrumentation/SanitizerCoverage.cpp294
-rw-r--r--lib/Transforms/Instrumentation/ThreadSanitizer.cpp42
11 files changed, 791 insertions, 359 deletions
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 5e5ddc1..38f587f 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -40,6 +40,7 @@
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Endian.h"
+#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
@@ -59,7 +60,8 @@ static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G.
static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41;
-static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa8000;
+static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
+static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 36;
static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
@@ -70,7 +72,7 @@ static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
static const char *const kAsanModuleCtorName = "asan.module_ctor";
static const char *const kAsanModuleDtorName = "asan.module_dtor";
-static const int kAsanCtorAndDtorPriority = 1;
+static const uint64_t kAsanCtorAndDtorPriority = 1;
static const char *const kAsanReportErrorTemplate = "__asan_report_";
static const char *const kAsanReportLoadN = "__asan_report_load_n";
static const char *const kAsanReportStoreN = "__asan_report_store_n";
@@ -80,8 +82,6 @@ static const char *const kAsanUnregisterGlobalsName =
static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
static const char *const kAsanInitName = "__asan_init_v4";
-static const char *const kAsanCovModuleInitName = "__sanitizer_cov_module_init";
-static const char *const kAsanCovName = "__sanitizer_cov";
static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp";
static const char *const kAsanPtrSub = "__sanitizer_ptr_sub";
static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return";
@@ -89,6 +89,7 @@ static const int kMaxAsanStackMallocSizeClass = 10;
static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_";
static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_";
static const char *const kAsanGenPrefix = "__asan_gen_";
+static const char *const kSanCovGenPrefix = "__sancov_gen_";
static const char *const kAsanPoisonStackMemoryName =
"__asan_poison_stack_memory";
static const char *const kAsanUnpoisonStackMemoryName =
@@ -133,13 +134,6 @@ static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
// This flag may need to be replaced with -f[no]asan-globals.
static cl::opt<bool> ClGlobals("asan-globals",
cl::desc("Handle global objects"), cl::Hidden, cl::init(true));
-static cl::opt<int> ClCoverage("asan-coverage",
- cl::desc("ASan coverage. 0: none, 1: entry block, 2: all blocks"),
- cl::Hidden, cl::init(false));
-static cl::opt<int> ClCoverageBlockThreshold("asan-coverage-block-threshold",
- cl::desc("Add coverage instrumentation only to the entry block if there "
- "are more than this number of blocks."),
- cl::Hidden, cl::init(1500));
static cl::opt<bool> ClInitializers("asan-initialization-order",
cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true));
static cl::opt<bool> ClInvalidPointerPairs("asan-detect-invalid-pointer-pair",
@@ -212,10 +206,40 @@ STATISTIC(NumOptimizedAccessesToGlobalVar,
"Number of optimized accesses to global vars");
namespace {
+/// Frontend-provided metadata for source location.
+struct LocationMetadata {
+ StringRef Filename;
+ int LineNo;
+ int ColumnNo;
+
+ LocationMetadata() : Filename(), LineNo(0), ColumnNo(0) {}
+
+ bool empty() const { return Filename.empty(); }
+
+ void parse(MDNode *MDN) {
+ assert(MDN->getNumOperands() == 3);
+ MDString *MDFilename = cast<MDString>(MDN->getOperand(0));
+ Filename = MDFilename->getString();
+ LineNo = cast<ConstantInt>(MDN->getOperand(1))->getLimitedValue();
+ ColumnNo = cast<ConstantInt>(MDN->getOperand(2))->getLimitedValue();
+ }
+};
+
/// Frontend-provided metadata for global variables.
class GlobalsMetadata {
public:
+ struct Entry {
+ Entry()
+ : SourceLoc(), Name(), IsDynInit(false),
+ IsBlacklisted(false) {}
+ LocationMetadata SourceLoc;
+ StringRef Name;
+ bool IsDynInit;
+ bool IsBlacklisted;
+ };
+
GlobalsMetadata() : inited_(false) {}
+
void init(Module& M) {
assert(!inited_);
inited_ = true;
@@ -223,76 +247,38 @@ class GlobalsMetadata {
if (!Globals)
return;
for (auto MDN : Globals->operands()) {
- // Format of the metadata node for the global:
- // {
- // global,
- // source_location,
- // i1 is_dynamically_initialized,
- // i1 is_blacklisted
- // }
- assert(MDN->getNumOperands() == 4);
+ // Metadata node contains the global and the fields of "Entry".
+ assert(MDN->getNumOperands() == 5);
Value *V = MDN->getOperand(0);
// The optimizer may optimize away a global entirely.
if (!V)
continue;
GlobalVariable *GV = cast<GlobalVariable>(V);
- if (Value *Loc = MDN->getOperand(1)) {
- GlobalVariable *GVLoc = cast<GlobalVariable>(Loc);
- // We may already know the source location for GV, if it was merged
- // with another global.
- if (SourceLocation.insert(std::make_pair(GV, GVLoc)).second)
- addSourceLocationGlobal(GVLoc);
+ // We can already have an entry for GV if it was merged with another
+ // global.
+ Entry &E = Entries[GV];
+ if (Value *Loc = MDN->getOperand(1))
+ E.SourceLoc.parse(cast<MDNode>(Loc));
+ if (Value *Name = MDN->getOperand(2)) {
+ MDString *MDName = cast<MDString>(Name);
+ E.Name = MDName->getString();
}
- ConstantInt *IsDynInit = cast<ConstantInt>(MDN->getOperand(2));
- if (IsDynInit->isOne())
- DynInitGlobals.insert(GV);
- ConstantInt *IsBlacklisted = cast<ConstantInt>(MDN->getOperand(3));
- if (IsBlacklisted->isOne())
- BlacklistedGlobals.insert(GV);
+ ConstantInt *IsDynInit = cast<ConstantInt>(MDN->getOperand(3));
+ E.IsDynInit |= IsDynInit->isOne();
+ ConstantInt *IsBlacklisted = cast<ConstantInt>(MDN->getOperand(4));
+ E.IsBlacklisted |= IsBlacklisted->isOne();
}
}
- GlobalVariable *getSourceLocation(GlobalVariable *G) const {
- auto Pos = SourceLocation.find(G);
- return (Pos != SourceLocation.end()) ? Pos->second : nullptr;
- }
-
- /// Check if the global is dynamically initialized.
- bool isDynInit(GlobalVariable *G) const {
- return DynInitGlobals.count(G);
- }
-
- /// Check if the global was blacklisted.
- bool isBlacklisted(GlobalVariable *G) const {
- return BlacklistedGlobals.count(G);
- }
-
- /// Check if the global was generated to describe source location of another
- /// global (we don't want to instrument them).
- bool isSourceLocationGlobal(GlobalVariable *G) const {
- return LocationGlobals.count(G);
+ /// Returns metadata entry for a given global.
+ Entry get(GlobalVariable *G) const {
+ auto Pos = Entries.find(G);
+ return (Pos != Entries.end()) ? Pos->second : Entry();
}
private:
bool inited_;
- DenseMap<GlobalVariable*, GlobalVariable*> SourceLocation;
- DenseSet<GlobalVariable*> DynInitGlobals;
- DenseSet<GlobalVariable*> BlacklistedGlobals;
- DenseSet<GlobalVariable*> LocationGlobals;
-
- void addSourceLocationGlobal(GlobalVariable *SourceLocGV) {
- // Source location global is a struct with layout:
- // {
- // filename,
- // i32 line_number,
- // i32 column_number,
- // }
- LocationGlobals.insert(SourceLocGV);
- ConstantStruct *Contents =
- cast<ConstantStruct>(SourceLocGV->getInitializer());
- GlobalVariable *FilenameGV = cast<GlobalVariable>(Contents->getOperand(0));
- LocationGlobals.insert(FilenameGV);
- }
+ DenseMap<GlobalVariable*, Entry> Entries;
};
/// This struct defines the shadow mapping using the rule:
@@ -306,7 +292,7 @@ struct ShadowMapping {
static ShadowMapping getShadowMapping(const Module &M, int LongSize) {
llvm::Triple TargetTriple(M.getTargetTriple());
bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android;
- bool IsIOS = TargetTriple.getOS() == llvm::Triple::IOS;
+ bool IsIOS = TargetTriple.isiOS();
bool IsFreeBSD = TargetTriple.getOS() == llvm::Triple::FreeBSD;
bool IsLinux = TargetTriple.getOS() == llvm::Triple::Linux;
bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 ||
@@ -314,6 +300,8 @@ static ShadowMapping getShadowMapping(const Module &M, int LongSize) {
bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64;
bool IsMIPS32 = TargetTriple.getArch() == llvm::Triple::mips ||
TargetTriple.getArch() == llvm::Triple::mipsel;
+ bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
+ TargetTriple.getArch() == llvm::Triple::mips64el;
ShadowMapping Mapping;
@@ -335,6 +323,8 @@ static ShadowMapping getShadowMapping(const Module &M, int LongSize) {
Mapping.Offset = kFreeBSD_ShadowOffset64;
else if (IsLinux && IsX86_64)
Mapping.Offset = kSmallX86_64ShadowOffset;
+ else if (IsMIPS64)
+ Mapping.Offset = kMIPS64_ShadowOffset64;
else
Mapping.Offset = kDefaultShadowOffset64;
}
@@ -386,8 +376,6 @@ struct AddressSanitizer : public FunctionPass {
bool LooksLikeCodeInBug11395(Instruction *I);
bool GlobalIsLinkerInitialized(GlobalVariable *G);
- bool InjectCoverage(Function &F, const ArrayRef<BasicBlock*> AllBlocks);
- void InjectCoverageAtBlock(Function &F, BasicBlock &BB);
LLVMContext *C;
const DataLayout *DL;
@@ -397,7 +385,6 @@ struct AddressSanitizer : public FunctionPass {
Function *AsanCtorFunction;
Function *AsanInitFunction;
Function *AsanHandleNoReturnFunc;
- Function *AsanCovFunction;
Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
// This array is indexed by AccessIsWrite and log2(AccessSize).
Function *AsanErrorCallback[2][kNumberOfAccessSizes];
@@ -441,7 +428,6 @@ class AddressSanitizerModule : public ModulePass {
Function *AsanUnpoisonGlobals;
Function *AsanRegisterGlobals;
Function *AsanUnregisterGlobals;
- Function *AsanCovModuleInit;
};
// Stack poisoning does not play well with exception handling.
@@ -570,7 +556,7 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
}
/// Finds alloca where the value comes from.
AllocaInst *findAllocaForValue(Value *V);
- void poisonRedZones(const ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB,
+ void poisonRedZones(ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB,
Value *ShadowBase, bool DoPoison);
void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
@@ -617,8 +603,25 @@ static GlobalVariable *createPrivateGlobalForString(
return GV;
}
+/// \brief Create a global describing a source location.
+static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M,
+ LocationMetadata MD) {
+ Constant *LocData[] = {
+ createPrivateGlobalForString(M, MD.Filename, true),
+ ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo),
+ ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo),
+ };
+ auto LocStruct = ConstantStruct::getAnon(LocData);
+ auto GV = new GlobalVariable(M, LocStruct->getType(), true,
+ GlobalValue::PrivateLinkage, LocStruct,
+ kAsanGenPrefix);
+ GV->setUnnamedAddr(true);
+ return GV;
+}
+
static bool GlobalWasGeneratedByAsan(GlobalVariable *G) {
- return G->getName().find(kAsanGenPrefix) == 0;
+ return G->getName().find(kAsanGenPrefix) == 0 ||
+ G->getName().find(kSanCovGenPrefix) == 0;
}
Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
@@ -653,9 +656,12 @@ void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
}
// If I is an interesting memory access, return the PointerOperand
-// and set IsWrite/Alignment. Otherwise return NULL.
+// and set IsWrite/Alignment. Otherwise return nullptr.
static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
unsigned *Alignment) {
+ // Skip memory accesses inserted by another instrumentation.
+ if (I->getMetadata("nosanitize"))
+ return nullptr;
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!ClInstrumentReads) return nullptr;
*IsWrite = false;
@@ -710,7 +716,7 @@ bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
// If a global variable does not have dynamic initialization we don't
// have to instrument it. However, if a global does not have initializer
// at all, we assume it has dynamic initializer (in other TU).
- return G->hasInitializer() && !GlobalsMD.isDynInit(G);
+ return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit;
}
void
@@ -859,8 +865,11 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
TerminatorInst *CrashTerm = nullptr;
if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
+ // We use branch weights for the slow path check, to indicate that the slow
+ // path is rarely taken. This seems to be the case for SPEC benchmarks.
TerminatorInst *CheckTerm =
- SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
+ SplitBlockAndInsertIfThen(Cmp, InsertBefore, false,
+ MDBuilder(*C).createBranchWeights(1, 100000));
assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional());
BasicBlock *NextBB = CheckTerm->getSuccessor(0);
IRB.SetInsertPoint(CheckTerm);
@@ -905,10 +914,12 @@ void AddressSanitizerModule::createInitializerPoisonCalls(
ConstantStruct *CS = cast<ConstantStruct>(OP);
// Must have a function or null ptr.
- // (CS->getOperand(0) is the init priority.)
if (Function* F = dyn_cast<Function>(CS->getOperand(1))) {
- if (F->getName() != kAsanModuleCtorName)
- poisonOneInitializer(*F, ModuleName);
+ if (F->getName() == kAsanModuleCtorName) continue;
+ ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
+ // Don't instrument CTORs that will run before asan.module_ctor.
+ if (Priority->getLimitedValue() <= kAsanCtorAndDtorPriority) continue;
+ poisonOneInitializer(*F, ModuleName);
}
}
}
@@ -917,8 +928,7 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
Type *Ty = cast<PointerType>(G->getType())->getElementType();
DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
- if (GlobalsMD.isBlacklisted(G)) return false;
- if (GlobalsMD.isSourceLocationGlobal(G)) return false;
+ if (GlobalsMD.get(G).IsBlacklisted) return false;
if (!Ty->isSized()) return false;
if (!G->hasInitializer()) return false;
if (GlobalWasGeneratedByAsan(G)) return false; // Our own global.
@@ -939,16 +949,6 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
// For now, just ignore this Global if the alignment is large.
if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false;
- // Ignore all the globals with the names starting with "\01L_OBJC_".
- // Many of those are put into the .cstring section. The linker compresses
- // that section by removing the spare \0s after the string terminator, so
- // our redzones get broken.
- if ((G->getName().find("\01L_OBJC_") == 0) ||
- (G->getName().find("\01l_OBJC_") == 0)) {
- DEBUG(dbgs() << "Ignoring \\01L_OBJC_* global: " << *G << "\n");
- return false;
- }
-
if (G->hasSection()) {
StringRef Section(G->getSection());
// Ignore the globals from the __OBJC section. The ObjC runtime assumes
@@ -977,6 +977,11 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
return false;
}
+ if (Section.startswith("__TEXT,__objc_methname,cstring_literals")) {
+ DEBUG(dbgs() << "Ignoring objc_methname cstring global: " << *G << "\n");
+ return false;
+ }
+
// Callbacks put into the CRT initializer/terminator sections
// should not be instrumented.
@@ -998,24 +1003,20 @@ void AddressSanitizerModule::initializeCallbacks(Module &M) {
IRBuilder<> IRB(*C);
// Declare our poisoning and unpoisoning functions.
AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, NULL));
+ kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, nullptr));
AsanPoisonGlobals->setLinkage(Function::ExternalLinkage);
AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanUnpoisonGlobalsName, IRB.getVoidTy(), NULL));
+ kAsanUnpoisonGlobalsName, IRB.getVoidTy(), nullptr));
AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage);
// Declare functions that register/unregister globals.
AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
kAsanRegisterGlobalsName, IRB.getVoidTy(),
- IntptrTy, IntptrTy, NULL));
+ IntptrTy, IntptrTy, nullptr));
AsanRegisterGlobals->setLinkage(Function::ExternalLinkage);
AsanUnregisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
kAsanUnregisterGlobalsName,
- IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
- AsanCovModuleInit = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanCovModuleInitName,
- IRB.getVoidTy(), IntptrTy, NULL));
- AsanCovModuleInit->setLinkage(Function::ExternalLinkage);
}
// This function replaces all global variables with new variables that have
@@ -1045,7 +1046,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
// We initialize an array of such structures and pass it to a run-time call.
StructType *GlobalStructTy =
StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
- IntptrTy, IntptrTy, NULL);
+ IntptrTy, IntptrTy, nullptr);
SmallVector<Constant *, 16> Initializers(n);
bool HasDynamicallyInitializedGlobals = false;
@@ -1058,6 +1059,14 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
for (size_t i = 0; i < n; i++) {
static const uint64_t kMaxGlobalRedzone = 1 << 18;
GlobalVariable *G = GlobalsToChange[i];
+
+ auto MD = GlobalsMD.get(G);
+ // Create string holding the global name (use global name from metadata
+ // if it's available, otherwise just write the name of global variable).
+ GlobalVariable *Name = createPrivateGlobalForString(
+ M, MD.Name.empty() ? G->getName() : MD.Name,
+ /*AllowMerging*/ true);
+
PointerType *PtrTy = cast<PointerType>(G->getType());
Type *Ty = PtrTy->getElementType();
uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
@@ -1074,13 +1083,10 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0);
Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
- StructType *NewTy = StructType::get(Ty, RightRedZoneTy, NULL);
+ StructType *NewTy = StructType::get(Ty, RightRedZoneTy, nullptr);
Constant *NewInitializer = ConstantStruct::get(
NewTy, G->getInitializer(),
- Constant::getNullValue(RightRedZoneTy), NULL);
-
- GlobalVariable *Name =
- createPrivateGlobalForString(M, G->getName(), /*AllowMerging*/true);
+ Constant::getNullValue(RightRedZoneTy), nullptr);
// Create a new global variable with enough space for a redzone.
GlobalValue::LinkageTypes Linkage = G->getLinkage();
@@ -1101,8 +1107,13 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
NewGlobal->takeName(G);
G->eraseFromParent();
- bool GlobalHasDynamicInitializer = GlobalsMD.isDynInit(G);
- GlobalVariable *SourceLoc = GlobalsMD.getSourceLocation(G);
+ Constant *SourceLoc;
+ if (!MD.SourceLoc.empty()) {
+ auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc);
+ SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy);
+ } else {
+ SourceLoc = ConstantInt::get(IntptrTy, 0);
+ }
Initializers[i] = ConstantStruct::get(
GlobalStructTy, ConstantExpr::getPointerCast(NewGlobal, IntptrTy),
@@ -1110,12 +1121,9 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
ConstantExpr::getPointerCast(Name, IntptrTy),
ConstantExpr::getPointerCast(ModuleName, IntptrTy),
- ConstantInt::get(IntptrTy, GlobalHasDynamicInitializer),
- SourceLoc ? ConstantExpr::getPointerCast(SourceLoc, IntptrTy)
- : ConstantInt::get(IntptrTy, 0),
- NULL);
+ ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, nullptr);
- if (ClInitializers && GlobalHasDynamicInitializer)
+ if (ClInitializers && MD.IsDynInit)
HasDynamicallyInitializedGlobals = true;
DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
@@ -1166,13 +1174,6 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
assert(CtorFunc);
IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
- if (ClCoverage > 0) {
- Function *CovFunc = M.getFunction(kAsanCovName);
- int nCov = CovFunc ? CovFunc->getNumUses() : 0;
- IRB.CreateCall(AsanCovModuleInit, ConstantInt::get(IntptrTy, nCov));
- Changed = true;
- }
-
if (ClGlobals)
Changed |= InstrumentGlobals(IRB, M);
@@ -1191,43 +1192,42 @@ void AddressSanitizer::initializeCallbacks(Module &M) {
AsanErrorCallback[AccessIsWrite][AccessSizeIndex] =
checkInterfaceFunction(
M.getOrInsertFunction(kAsanReportErrorTemplate + Suffix,
- IRB.getVoidTy(), IntptrTy, NULL));
+ IRB.getVoidTy(), IntptrTy, nullptr));
AsanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
checkInterfaceFunction(
M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + Suffix,
- IRB.getVoidTy(), IntptrTy, NULL));
+ IRB.getVoidTy(), IntptrTy, nullptr));
}
}
AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
AsanMemoryAccessCallbackSized[0] = checkInterfaceFunction(
M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "loadN",
- IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
AsanMemoryAccessCallbackSized[1] = checkInterfaceFunction(
M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "storeN",
- IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
AsanMemmove = checkInterfaceFunction(M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, NULL));
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
AsanMemcpy = checkInterfaceFunction(M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, NULL));
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
AsanMemset = checkInterfaceFunction(M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, NULL));
+ IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, nullptr));
AsanHandleNoReturnFunc = checkInterfaceFunction(
- M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), NULL));
- AsanCovFunction = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanCovName, IRB.getVoidTy(), NULL));
+ M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), nullptr));
+
AsanPtrCmpFunction = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
AsanPtrSubFunction = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
// We insert an empty inline asm after __asan_report* to avoid callback merge.
EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
StringRef(""), StringRef(""),
@@ -1255,7 +1255,7 @@ bool AddressSanitizer::doInitialization(Module &M) {
// call __asan_init in the module ctor.
IRBuilder<> IRB(ReturnInst::Create(*C, AsanCtorBB));
AsanInitFunction = checkInterfaceFunction(
- M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), NULL));
+ M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), nullptr));
AsanInitFunction->setLinkage(Function::ExternalLinkage);
IRB.CreateCall(AsanInitFunction);
@@ -1281,74 +1281,6 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
return false;
}
-void AddressSanitizer::InjectCoverageAtBlock(Function &F, BasicBlock &BB) {
- BasicBlock::iterator IP = BB.getFirstInsertionPt(), BE = BB.end();
- // Skip static allocas at the top of the entry block so they don't become
- // dynamic when we split the block. If we used our optimized stack layout,
- // then there will only be one alloca and it will come first.
- for (; IP != BE; ++IP) {
- AllocaInst *AI = dyn_cast<AllocaInst>(IP);
- if (!AI || !AI->isStaticAlloca())
- break;
- }
-
- DebugLoc EntryLoc = IP->getDebugLoc().getFnDebugLoc(*C);
- IRBuilder<> IRB(IP);
- IRB.SetCurrentDebugLocation(EntryLoc);
- Type *Int8Ty = IRB.getInt8Ty();
- GlobalVariable *Guard = new GlobalVariable(
- *F.getParent(), Int8Ty, false, GlobalValue::PrivateLinkage,
- Constant::getNullValue(Int8Ty), "__asan_gen_cov_" + F.getName());
- LoadInst *Load = IRB.CreateLoad(Guard);
- Load->setAtomic(Monotonic);
- Load->setAlignment(1);
- Value *Cmp = IRB.CreateICmpEQ(Constant::getNullValue(Int8Ty), Load);
- Instruction *Ins = SplitBlockAndInsertIfThen(
- Cmp, IP, false, MDBuilder(*C).createBranchWeights(1, 100000));
- IRB.SetInsertPoint(Ins);
- IRB.SetCurrentDebugLocation(EntryLoc);
- // We pass &F to __sanitizer_cov. We could avoid this and rely on
- // GET_CALLER_PC, but having the PC of the first instruction is just nice.
- IRB.CreateCall(AsanCovFunction);
- StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int8Ty, 1), Guard);
- Store->setAtomic(Monotonic);
- Store->setAlignment(1);
-}
-
-// Poor man's coverage that works with ASan.
-// We create a Guard boolean variable with the same linkage
-// as the function and inject this code into the entry block (-asan-coverage=1)
-// or all blocks (-asan-coverage=2):
-// if (*Guard) {
-// __sanitizer_cov(&F);
-// *Guard = 1;
-// }
-// The accesses to Guard are atomic. The rest of the logic is
-// in __sanitizer_cov (it's fine to call it more than once).
-//
-// This coverage implementation provides very limited data:
-// it only tells if a given function (block) was ever executed.
-// No counters, no per-edge data.
-// But for many use cases this is what we need and the added slowdown
-// is negligible. This simple implementation will probably be obsoleted
-// by the upcoming Clang-based coverage implementation.
-// By having it here and now we hope to
-// a) get the functionality to users earlier and
-// b) collect usage statistics to help improve Clang coverage design.
-bool AddressSanitizer::InjectCoverage(Function &F,
- const ArrayRef<BasicBlock *> AllBlocks) {
- if (!ClCoverage) return false;
-
- if (ClCoverage == 1 ||
- (unsigned)ClCoverageBlockThreshold < AllBlocks.size()) {
- InjectCoverageAtBlock(F, F.getEntryBlock());
- } else {
- for (auto BB : AllBlocks)
- InjectCoverageAtBlock(F, *BB);
- }
- return true;
-}
-
bool AddressSanitizer::runOnFunction(Function &F) {
if (&F == AsanCtorFunction) return false;
if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
@@ -1385,7 +1317,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
if (Value *Addr =
isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) {
if (ClOpt && ClOptSameTemp) {
- if (!TempsToInstrument.insert(Addr))
+ if (!TempsToInstrument.insert(Addr).second)
continue; // We've seen this temp in the current BB.
}
} else if (ClInvalidPointerPairs &&
@@ -1459,9 +1391,6 @@ bool AddressSanitizer::runOnFunction(Function &F) {
bool res = NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty();
- if (InjectCoverage(F, AllBlocks))
- res = true;
-
DEBUG(dbgs() << "ASAN done instrumenting: " << res << " " << F << "\n");
if (ClKeepUninstrumented) {
@@ -1499,19 +1428,21 @@ void FunctionStackPoisoner::initializeCallbacks(Module &M) {
std::string Suffix = itostr(i);
AsanStackMallocFunc[i] = checkInterfaceFunction(
M.getOrInsertFunction(kAsanStackMallocNameTemplate + Suffix, IntptrTy,
- IntptrTy, IntptrTy, NULL));
+ IntptrTy, IntptrTy, nullptr));
AsanStackFreeFunc[i] = checkInterfaceFunction(M.getOrInsertFunction(
kAsanStackFreeNameTemplate + Suffix, IRB.getVoidTy(), IntptrTy,
- IntptrTy, IntptrTy, NULL));
+ IntptrTy, IntptrTy, nullptr));
}
- AsanPoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
- AsanUnpoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ AsanPoisonStackMemoryFunc = checkInterfaceFunction(
+ M.getOrInsertFunction(kAsanPoisonStackMemoryName, IRB.getVoidTy(),
+ IntptrTy, IntptrTy, nullptr));
+ AsanUnpoisonStackMemoryFunc = checkInterfaceFunction(
+ M.getOrInsertFunction(kAsanUnpoisonStackMemoryName, IRB.getVoidTy(),
+ IntptrTy, IntptrTy, nullptr));
}
void
-FunctionStackPoisoner::poisonRedZones(const ArrayRef<uint8_t> ShadowBytes,
+FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes,
IRBuilder<> &IRB, Value *ShadowBase,
bool DoPoison) {
size_t n = ShadowBytes.size();
diff --git a/lib/Transforms/Instrumentation/Android.mk b/lib/Transforms/Instrumentation/Android.mk
index f9a55c7..1f21028 100644
--- a/lib/Transforms/Instrumentation/Android.mk
+++ b/lib/Transforms/Instrumentation/Android.mk
@@ -8,6 +8,7 @@ instrumentation_SRC_FILES := \
GCOVProfiling.cpp \
Instrumentation.cpp \
MemorySanitizer.cpp \
+ SanitizerCoverage.cpp \
ThreadSanitizer.cpp
# For the host
diff --git a/lib/Transforms/Instrumentation/CMakeLists.txt b/lib/Transforms/Instrumentation/CMakeLists.txt
index 3563593..139e514 100644
--- a/lib/Transforms/Instrumentation/CMakeLists.txt
+++ b/lib/Transforms/Instrumentation/CMakeLists.txt
@@ -6,6 +6,7 @@ add_llvm_library(LLVMInstrumentation
GCOVProfiling.cpp
MemorySanitizer.cpp
Instrumentation.cpp
+ SanitizerCoverage.cpp
ThreadSanitizer.cpp
)
diff --git a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 799e14b..c5a4860 100644
--- a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -50,6 +50,8 @@
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstVisitor.h"
@@ -62,7 +64,10 @@
#include "llvm/Support/SpecialCaseList.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
+#include <algorithm>
#include <iterator>
+#include <set>
+#include <utility>
using namespace llvm;
@@ -135,11 +140,11 @@ class DFSanABIList {
std::unique_ptr<SpecialCaseList> SCL;
public:
- DFSanABIList(SpecialCaseList *SCL) : SCL(SCL) {}
+ DFSanABIList(std::unique_ptr<SpecialCaseList> SCL) : SCL(std::move(SCL)) {}
/// Returns whether either this function or its source file are listed in the
/// given category.
- bool isIn(const Function &F, const StringRef Category) const {
+ bool isIn(const Function &F, StringRef Category) const {
return isIn(*F.getParent(), Category) ||
SCL->inSection("fun", F.getName(), Category);
}
@@ -148,7 +153,7 @@ class DFSanABIList {
///
/// If GA aliases a function, the alias's name is matched as a function name
/// would be. Similarly, aliases of globals are matched like globals.
- bool isIn(const GlobalAlias &GA, const StringRef Category) const {
+ bool isIn(const GlobalAlias &GA, StringRef Category) const {
if (isIn(*GA.getParent(), Category))
return true;
@@ -160,7 +165,7 @@ class DFSanABIList {
}
/// Returns whether this module is listed in the given category.
- bool isIn(const Module &M, const StringRef Category) const {
+ bool isIn(const Module &M, StringRef Category) const {
return SCL->inSection("src", M.getModuleIdentifier(), Category);
}
};
@@ -229,18 +234,21 @@ class DataFlowSanitizer : public ModulePass {
FunctionType *DFSanUnimplementedFnTy;
FunctionType *DFSanSetLabelFnTy;
FunctionType *DFSanNonzeroLabelFnTy;
+ FunctionType *DFSanVarargWrapperFnTy;
Constant *DFSanUnionFn;
+ Constant *DFSanCheckedUnionFn;
Constant *DFSanUnionLoadFn;
Constant *DFSanUnimplementedFn;
Constant *DFSanSetLabelFn;
Constant *DFSanNonzeroLabelFn;
+ Constant *DFSanVarargWrapperFn;
MDNode *ColdCallWeights;
DFSanABIList ABIList;
DenseMap<Value *, Function *> UnwrappedFnMap;
AttributeSet ReadOnlyNoneAttrs;
+ DenseMap<const Function *, DISubprogram> FunctionDIs;
Value *getShadowAddress(Value *Addr, Instruction *Pos);
- Value *combineShadows(Value *V1, Value *V2, Instruction *Pos);
bool isInstrumented(const Function *F);
bool isInstrumented(const GlobalAlias *GA);
FunctionType *getArgsFunctionType(FunctionType *T);
@@ -266,6 +274,7 @@ class DataFlowSanitizer : public ModulePass {
struct DFSanFunction {
DataFlowSanitizer &DFS;
Function *F;
+ DominatorTree DT;
DataFlowSanitizer::InstrumentedABI IA;
bool IsNativeABI;
Value *ArgTLSPtr;
@@ -275,17 +284,32 @@ struct DFSanFunction {
DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap;
std::vector<std::pair<PHINode *, PHINode *> > PHIFixups;
DenseSet<Instruction *> SkipInsts;
- DenseSet<Value *> NonZeroChecks;
+ std::vector<Value *> NonZeroChecks;
+ bool AvoidNewBlocks;
+
+ struct CachedCombinedShadow {
+ BasicBlock *Block;
+ Value *Shadow;
+ };
+ DenseMap<std::pair<Value *, Value *>, CachedCombinedShadow>
+ CachedCombinedShadows;
+ DenseMap<Value *, std::set<Value *>> ShadowElements;
DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI)
: DFS(DFS), F(F), IA(DFS.getInstrumentedABI()),
IsNativeABI(IsNativeABI), ArgTLSPtr(nullptr), RetvalTLSPtr(nullptr),
- LabelReturnAlloca(nullptr) {}
+ LabelReturnAlloca(nullptr) {
+ DT.recalculate(*F);
+ // FIXME: Need to track down the register allocator issue which causes poor
+ // performance in pathological cases with large numbers of basic blocks.
+ AvoidNewBlocks = F->size() > 1000;
+ }
Value *getArgTLSPtr();
Value *getArgTLS(unsigned Index, Instruction *Pos);
Value *getRetvalTLS();
Value *getShadow(Value *V);
void setShadow(Instruction *I, Value *Shadow);
+ Value *combineShadows(Value *V1, Value *V2, Instruction *Pos);
Value *combineOperandShadows(Instruction *Inst);
Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align,
Instruction *Pos);
@@ -367,7 +391,6 @@ FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) {
}
FunctionType *DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
- assert(!T->isVarArg());
llvm::SmallVector<Type *, 4> ArgTypes;
for (FunctionType::param_iterator i = T->param_begin(), e = T->param_end();
i != e; ++i) {
@@ -382,10 +405,12 @@ FunctionType *DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
}
for (unsigned i = 0, e = T->getNumParams(); i != e; ++i)
ArgTypes.push_back(ShadowTy);
+ if (T->isVarArg())
+ ArgTypes.push_back(ShadowPtrTy);
Type *RetType = T->getReturnType();
if (!RetType->isVoidTy())
ArgTypes.push_back(ShadowPtrTy);
- return FunctionType::get(T->getReturnType(), ArgTypes, false);
+ return FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg());
}
bool DataFlowSanitizer::doInitialization(Module &M) {
@@ -415,7 +440,9 @@ bool DataFlowSanitizer::doInitialization(Module &M) {
DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx),
DFSanSetLabelArgs, /*isVarArg=*/false);
DFSanNonzeroLabelFnTy = FunctionType::get(
- Type::getVoidTy(*Ctx), ArrayRef<Type *>(), /*isVarArg=*/false);
+ Type::getVoidTy(*Ctx), None, /*isVarArg=*/false);
+ DFSanVarargWrapperFnTy = FunctionType::get(
+ Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
if (GetArgTLSPtr) {
Type *ArgTLSTy = ArrayType::get(ShadowTy, 64);
@@ -495,15 +522,26 @@ DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName,
AttributeSet::ReturnIndex));
BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF);
- std::vector<Value *> Args;
- unsigned n = FT->getNumParams();
- for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n)
- Args.push_back(&*ai);
- CallInst *CI = CallInst::Create(F, Args, "", BB);
- if (FT->getReturnType()->isVoidTy())
- ReturnInst::Create(*Ctx, BB);
- else
- ReturnInst::Create(*Ctx, CI, BB);
+ if (F->isVarArg()) {
+ NewF->removeAttributes(
+ AttributeSet::FunctionIndex,
+ AttributeSet().addAttribute(*Ctx, AttributeSet::FunctionIndex,
+ "split-stack"));
+ CallInst::Create(DFSanVarargWrapperFn,
+ IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "",
+ BB);
+ new UnreachableInst(*Ctx, BB);
+ } else {
+ std::vector<Value *> Args;
+ unsigned n = FT->getNumParams();
+ for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n)
+ Args.push_back(&*ai);
+ CallInst *CI = CallInst::Create(F, Args, "", BB);
+ if (FT->getReturnType()->isVoidTy())
+ ReturnInst::Create(*Ctx, BB);
+ else
+ ReturnInst::Create(*Ctx, CI, BB);
+ }
return NewF;
}
@@ -548,6 +586,8 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
if (ABIList.isIn(M, "skip"))
return false;
+ FunctionDIs = makeSubprogramMap(M);
+
if (!GetArgTLSPtr) {
Type *ArgTLSTy = ArrayType::get(ShadowTy, 64);
ArgTLS = Mod->getOrInsertGlobal("__dfsan_arg_tls", ArgTLSTy);
@@ -562,6 +602,15 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
DFSanUnionFn = Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy);
if (Function *F = dyn_cast<Function>(DFSanUnionFn)) {
+ F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
+ F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone);
+ F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
+ F->addAttribute(1, Attribute::ZExt);
+ F->addAttribute(2, Attribute::ZExt);
+ }
+ DFSanCheckedUnionFn = Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy);
+ if (Function *F = dyn_cast<Function>(DFSanCheckedUnionFn)) {
+ F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone);
F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
F->addAttribute(1, Attribute::ZExt);
@@ -570,6 +619,7 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
DFSanUnionLoadFn =
Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy);
if (Function *F = dyn_cast<Function>(DFSanUnionLoadFn)) {
+ F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly);
F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
}
@@ -582,16 +632,20 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
}
DFSanNonzeroLabelFn =
Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy);
+ DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper",
+ DFSanVarargWrapperFnTy);
std::vector<Function *> FnsToInstrument;
llvm::SmallPtrSet<Function *, 2> FnsWithNativeABI;
for (Module::iterator i = M.begin(), e = M.end(); i != e; ++i) {
if (!i->isIntrinsic() &&
i != DFSanUnionFn &&
+ i != DFSanCheckedUnionFn &&
i != DFSanUnionLoadFn &&
i != DFSanUnimplementedFn &&
i != DFSanSetLabelFn &&
- i != DFSanNonzeroLabelFn)
+ i != DFSanNonzeroLabelFn &&
+ i != DFSanVarargWrapperFn)
FnsToInstrument.push_back(&*i);
}
@@ -673,11 +727,6 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
} else {
addGlobalNamePrefix(&F);
}
- // Hopefully, nobody will try to indirectly call a vararg
- // function... yet.
- } else if (FT->isVarArg()) {
- UnwrappedFnMap[&F] = &F;
- *i = nullptr;
} else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) {
// Build a wrapper function for F. The wrapper simply calls F, and is
// added to FnsToInstrument so that any instrumentation according to its
@@ -694,6 +743,12 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
Value *WrappedFnCst =
ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT));
F.replaceAllUsesWith(WrappedFnCst);
+
+ // Patch the pointer to LLVM function in debug info descriptor.
+ auto DI = FunctionDIs.find(&F);
+ if (DI != FunctionDIs.end())
+ DI->second.replaceFunction(&F);
+
UnwrappedFnMap[WrappedFnCst] = &F;
*i = NewF;
@@ -713,6 +768,11 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
i = FnsToInstrument.begin() + N;
e = FnsToInstrument.begin() + Count;
}
+ // Hopefully, nobody will try to indirectly call a vararg
+ // function... yet.
+ } else if (FT->isVarArg()) {
+ UnwrappedFnMap[&F] = &F;
+ *i = nullptr;
}
}
@@ -771,18 +831,16 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
// yet). To make our life easier, do this work in a pass after the main
// instrumentation.
if (ClDebugNonzeroLabels) {
- for (DenseSet<Value *>::iterator i = DFSF.NonZeroChecks.begin(),
- e = DFSF.NonZeroChecks.end();
- i != e; ++i) {
+ for (Value *V : DFSF.NonZeroChecks) {
Instruction *Pos;
- if (Instruction *I = dyn_cast<Instruction>(*i))
+ if (Instruction *I = dyn_cast<Instruction>(V))
Pos = I->getNextNode();
else
Pos = DFSF.F->getEntryBlock().begin();
while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
Pos = Pos->getNextNode();
IRBuilder<> IRB(Pos);
- Value *Ne = IRB.CreateICmpNE(*i, DFSF.DFS.ZeroShadow);
+ Value *Ne = IRB.CreateICmpNE(V, DFSF.DFS.ZeroShadow);
BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
Ne, Pos, /*Unreachable=*/false, ColdCallWeights));
IRBuilder<> ThenIRB(BI);
@@ -847,7 +905,7 @@ Value *DFSanFunction::getShadow(Value *V) {
break;
}
}
- NonZeroChecks.insert(Shadow);
+ NonZeroChecks.push_back(Shadow);
} else {
Shadow = DFS.ZeroShadow;
}
@@ -873,30 +931,82 @@ Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) {
// Generates IR to compute the union of the two given shadows, inserting it
// before Pos. Returns the computed union Value.
-Value *DataFlowSanitizer::combineShadows(Value *V1, Value *V2,
- Instruction *Pos) {
- if (V1 == ZeroShadow)
+Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) {
+ if (V1 == DFS.ZeroShadow)
return V2;
- if (V2 == ZeroShadow)
+ if (V2 == DFS.ZeroShadow)
return V1;
if (V1 == V2)
return V1;
+
+ auto V1Elems = ShadowElements.find(V1);
+ auto V2Elems = ShadowElements.find(V2);
+ if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) {
+ if (std::includes(V1Elems->second.begin(), V1Elems->second.end(),
+ V2Elems->second.begin(), V2Elems->second.end())) {
+ return V1;
+ } else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(),
+ V1Elems->second.begin(), V1Elems->second.end())) {
+ return V2;
+ }
+ } else if (V1Elems != ShadowElements.end()) {
+ if (V1Elems->second.count(V2))
+ return V1;
+ } else if (V2Elems != ShadowElements.end()) {
+ if (V2Elems->second.count(V1))
+ return V2;
+ }
+
+ auto Key = std::make_pair(V1, V2);
+ if (V1 > V2)
+ std::swap(Key.first, Key.second);
+ CachedCombinedShadow &CCS = CachedCombinedShadows[Key];
+ if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent()))
+ return CCS.Shadow;
+
IRBuilder<> IRB(Pos);
- BasicBlock *Head = Pos->getParent();
- Value *Ne = IRB.CreateICmpNE(V1, V2);
- BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
- Ne, Pos, /*Unreachable=*/false, ColdCallWeights));
- IRBuilder<> ThenIRB(BI);
- CallInst *Call = ThenIRB.CreateCall2(DFSanUnionFn, V1, V2);
- Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
- Call->addAttribute(1, Attribute::ZExt);
- Call->addAttribute(2, Attribute::ZExt);
-
- BasicBlock *Tail = BI->getSuccessor(0);
- PHINode *Phi = PHINode::Create(ShadowTy, 2, "", Tail->begin());
- Phi->addIncoming(Call, Call->getParent());
- Phi->addIncoming(V1, Head);
- return Phi;
+ if (AvoidNewBlocks) {
+ CallInst *Call = IRB.CreateCall2(DFS.DFSanCheckedUnionFn, V1, V2);
+ Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
+ Call->addAttribute(1, Attribute::ZExt);
+ Call->addAttribute(2, Attribute::ZExt);
+
+ CCS.Block = Pos->getParent();
+ CCS.Shadow = Call;
+ } else {
+ BasicBlock *Head = Pos->getParent();
+ Value *Ne = IRB.CreateICmpNE(V1, V2);
+ BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
+ Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT));
+ IRBuilder<> ThenIRB(BI);
+ CallInst *Call = ThenIRB.CreateCall2(DFS.DFSanUnionFn, V1, V2);
+ Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
+ Call->addAttribute(1, Attribute::ZExt);
+ Call->addAttribute(2, Attribute::ZExt);
+
+ BasicBlock *Tail = BI->getSuccessor(0);
+ PHINode *Phi = PHINode::Create(DFS.ShadowTy, 2, "", Tail->begin());
+ Phi->addIncoming(Call, Call->getParent());
+ Phi->addIncoming(V1, Head);
+
+ CCS.Block = Tail;
+ CCS.Shadow = Phi;
+ }
+
+ std::set<Value *> UnionElems;
+ if (V1Elems != ShadowElements.end()) {
+ UnionElems = V1Elems->second;
+ } else {
+ UnionElems.insert(V1);
+ }
+ if (V2Elems != ShadowElements.end()) {
+ UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end());
+ } else {
+ UnionElems.insert(V2);
+ }
+ ShadowElements[CCS.Shadow] = std::move(UnionElems);
+
+ return CCS.Shadow;
}
// A convenience function which folds the shadows of each of the operands
@@ -908,7 +1018,7 @@ Value *DFSanFunction::combineOperandShadows(Instruction *Inst) {
Value *Shadow = getShadow(Inst->getOperand(0));
for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) {
- Shadow = DFS.combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst);
+ Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst);
}
return Shadow;
}
@@ -961,12 +1071,11 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
IRBuilder<> IRB(Pos);
Value *ShadowAddr1 =
IRB.CreateGEP(ShadowAddr, ConstantInt::get(DFS.IntptrTy, 1));
- return DFS.combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign),
- IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign),
- Pos);
+ return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign),
+ IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos);
}
}
- if (Size % (64 / DFS.ShadowWidth) == 0) {
+ if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidth) == 0) {
// Fast path for the common case where each byte has identical shadow: load
// shadow 64 bits at a time, fall out to a __dfsan_union_load call if any
// shadow is non-equal.
@@ -990,16 +1099,27 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
BasicBlock *Head = Pos->getParent();
BasicBlock *Tail = Head->splitBasicBlock(Pos);
+
+ if (DomTreeNode *OldNode = DT.getNode(Head)) {
+ std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end());
+
+ DomTreeNode *NewNode = DT.addNewBlock(Tail, Head);
+ for (auto Child : Children)
+ DT.changeImmediateDominator(Child, NewNode);
+ }
+
// In the following code LastBr will refer to the previous basic block's
// conditional branch instruction, whose true successor is fixed up to point
// to the next block during the loop below or to the tail after the final
// iteration.
BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq);
ReplaceInstWithInst(Head->getTerminator(), LastBr);
+ DT.addNewBlock(FallbackBB, Head);
for (uint64_t Ofs = 64 / DFS.ShadowWidth; Ofs != Size;
Ofs += 64 / DFS.ShadowWidth) {
BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
+ DT.addNewBlock(NextBB, LastBr->getParent());
IRBuilder<> NextIRB(NextBB);
WideAddr = NextIRB.CreateGEP(WideAddr, ConstantInt::get(DFS.IntptrTy, 1));
Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign);
@@ -1025,6 +1145,11 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
void DFSanVisitor::visitLoadInst(LoadInst &LI) {
uint64_t Size = DFSF.DFS.DL->getTypeStoreSize(LI.getType());
+ if (Size == 0) {
+ DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow);
+ return;
+ }
+
uint64_t Align;
if (ClPreserveAlignment) {
Align = LI.getAlignment();
@@ -1037,10 +1162,10 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) {
Value *Shadow = DFSF.loadShadow(LI.getPointerOperand(), Size, Align, &LI);
if (ClCombinePointerLabelsOnLoad) {
Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand());
- Shadow = DFSF.DFS.combineShadows(Shadow, PtrShadow, &LI);
+ Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI);
}
if (Shadow != DFSF.DFS.ZeroShadow)
- DFSF.NonZeroChecks.insert(Shadow);
+ DFSF.NonZeroChecks.push_back(Shadow);
DFSF.setShadow(&LI, Shadow);
}
@@ -1099,6 +1224,9 @@ void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align,
void DFSanVisitor::visitStoreInst(StoreInst &SI) {
uint64_t Size =
DFSF.DFS.DL->getTypeStoreSize(SI.getValueOperand()->getType());
+ if (Size == 0)
+ return;
+
uint64_t Align;
if (ClPreserveAlignment) {
Align = SI.getAlignment();
@@ -1111,7 +1239,7 @@ void DFSanVisitor::visitStoreInst(StoreInst &SI) {
Value* Shadow = DFSF.getShadow(SI.getValueOperand());
if (ClCombinePointerLabelsOnStore) {
Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand());
- Shadow = DFSF.DFS.combineShadows(Shadow, PtrShadow, &SI);
+ Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI);
}
DFSF.storeShadow(SI.getPointerOperand(), Size, Align, Shadow, &SI);
}
@@ -1176,9 +1304,9 @@ void DFSanVisitor::visitSelectInst(SelectInst &I) {
if (isa<VectorType>(I.getCondition()->getType())) {
DFSF.setShadow(
- &I, DFSF.DFS.combineShadows(
- CondShadow,
- DFSF.DFS.combineShadows(TrueShadow, FalseShadow, &I), &I));
+ &I,
+ DFSF.combineShadows(
+ CondShadow, DFSF.combineShadows(TrueShadow, FalseShadow, &I), &I));
} else {
Value *ShadowSel;
if (TrueShadow == FalseShadow) {
@@ -1187,7 +1315,7 @@ void DFSanVisitor::visitSelectInst(SelectInst &I) {
ShadowSel =
SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I);
}
- DFSF.setShadow(&I, DFSF.DFS.combineShadows(CondShadow, ShadowSel, &I));
+ DFSF.setShadow(&I, DFSF.combineShadows(CondShadow, ShadowSel, &I));
}
}
@@ -1253,6 +1381,15 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
return;
}
+ // Calls to this function are synthesized in wrappers, and we shouldn't
+ // instrument them.
+ if (F == DFSF.DFS.DFSanVarargWrapperFn)
+ return;
+
+ assert(!(cast<FunctionType>(
+ CS.getCalledValue()->getType()->getPointerElementType())->isVarArg() &&
+ dyn_cast<InvokeInst>(CS.getInstruction())));
+
IRBuilder<> IRB(CS.getInstruction());
DenseMap<Value *, Function *>::iterator i =
@@ -1324,6 +1461,20 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
Args.push_back(DFSF.getShadow(*i));
+ if (FT->isVarArg()) {
+ auto LabelVAAlloca =
+ new AllocaInst(ArrayType::get(DFSF.DFS.ShadowTy,
+ CS.arg_size() - FT->getNumParams()),
+ "labelva", DFSF.F->getEntryBlock().begin());
+
+ for (unsigned n = 0; i != CS.arg_end(); ++i, ++n) {
+ auto LabelVAPtr = IRB.CreateStructGEP(LabelVAAlloca, n);
+ IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr);
+ }
+
+ Args.push_back(IRB.CreateStructGEP(LabelVAAlloca, 0));
+ }
+
if (!FT->getReturnType()->isVoidTy()) {
if (!DFSF.LabelReturnAlloca) {
DFSF.LabelReturnAlloca =
@@ -1333,6 +1484,9 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
Args.push_back(DFSF.LabelReturnAlloca);
}
+ for (i = CS.arg_begin() + FT->getNumParams(); i != CS.arg_end(); ++i)
+ Args.push_back(*i);
+
CallInst *CustomCI = IRB.CreateCall(CustomF, Args);
CustomCI->setCallingConv(CI->getCallingConv());
CustomCI->setAttributes(CI->getAttributes());
@@ -1379,7 +1533,7 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
LoadInst *LI = NextIRB.CreateLoad(DFSF.getRetvalTLS());
DFSF.SkipInsts.insert(LI);
DFSF.setShadow(CS.getInstruction(), LI);
- DFSF.NonZeroChecks.insert(LI);
+ DFSF.NonZeroChecks.push_back(LI);
}
}
@@ -1433,7 +1587,7 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
ExtractValueInst::Create(NewCS.getInstruction(), 1, "", Next);
DFSF.SkipInsts.insert(ExShadow);
DFSF.setShadow(ExVal, ExShadow);
- DFSF.NonZeroChecks.insert(ExShadow);
+ DFSF.NonZeroChecks.push_back(ExShadow);
CS.getInstruction()->replaceAllUsesWith(ExVal);
}
diff --git a/lib/Transforms/Instrumentation/DebugIR.cpp b/lib/Transforms/Instrumentation/DebugIR.cpp
index f2f1738..5234341 100644
--- a/lib/Transforms/Instrumentation/DebugIR.cpp
+++ b/lib/Transforms/Instrumentation/DebugIR.cpp
@@ -396,7 +396,7 @@ private:
Elements.push_back(getOrCreateType(T->getStructElementType(i)));
// set struct elements
- StructDescriptor.setTypeArray(Builder.getOrCreateArray(Elements));
+ StructDescriptor.setArrays(Builder.getOrCreateArray(Elements));
} else if (T->isPointerTy()) {
Type *PointeeTy = T->getPointerElementType();
if (!(N = getType(PointeeTy)))
@@ -440,7 +440,7 @@ private:
Params.push_back(getOrCreateType(T));
}
- DIArray ParamArray = Builder.getOrCreateArray(Params);
+ DITypeArray ParamArray = Builder.getOrCreateTypeArray(Params);
return Builder.createSubroutineType(DIFile(FileNode), ParamArray);
}
@@ -525,11 +525,11 @@ std::string DebugIR::getPath() {
void DebugIR::writeDebugBitcode(const Module *M, int *fd) {
std::unique_ptr<raw_fd_ostream> Out;
- std::string error;
+ std::error_code EC;
if (!fd) {
std::string Path = getPath();
- Out.reset(new raw_fd_ostream(Path.c_str(), error, sys::fs::F_Text));
+ Out.reset(new raw_fd_ostream(Path, EC, sys::fs::F_Text));
DEBUG(dbgs() << "WRITING debug bitcode from Module " << M << " to file "
<< Path << "\n");
} else {
diff --git a/lib/Transforms/Instrumentation/DebugIR.h b/lib/Transforms/Instrumentation/DebugIR.h
index 02831ed..8d74a4d 100644
--- a/lib/Transforms/Instrumentation/DebugIR.h
+++ b/lib/Transforms/Instrumentation/DebugIR.h
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_DEBUGIR_H
-#define LLVM_TRANSFORMS_INSTRUMENTATION_DEBUGIR_H
+#ifndef LLVM_LIB_TRANSFORMS_INSTRUMENTATION_DEBUGIR_H
+#define LLVM_LIB_TRANSFORMS_INSTRUMENTATION_DEBUGIR_H
#include "llvm/Pass.h"
@@ -95,4 +95,4 @@ private:
} // llvm namespace
-#endif // LLVM_TRANSFORMS_INSTRUMENTATION_DEBUGIR_H
+#endif
diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index cfeb62e..220d7f8 100644
--- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -480,12 +480,12 @@ void GCOVProfiler::emitProfileNotes() {
// LTO, we'll generate the same .gcno files.
DICompileUnit CU(CU_Nodes->getOperand(i));
- std::string ErrorInfo;
- raw_fd_ostream out(mangleName(CU, "gcno").c_str(), ErrorInfo,
- sys::fs::F_None);
+ std::error_code EC;
+ raw_fd_ostream out(mangleName(CU, "gcno"), EC, sys::fs::F_None);
std::string EdgeDestinations;
DIArray SPs = CU.getSubprograms();
+ unsigned FunctionIdent = 0;
for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i) {
DISubprogram SP(SPs.getElement(i));
assert((!SP || SP.isSubprogram()) &&
@@ -505,8 +505,8 @@ void GCOVProfiler::emitProfileNotes() {
++It;
EntryBlock.splitBasicBlock(It);
- Funcs.push_back(
- make_unique<GCOVFunction>(SP, &out, i, Options.UseCfgChecksum));
+ Funcs.push_back(make_unique<GCOVFunction>(SP, &out, FunctionIdent++,
+ Options.UseCfgChecksum));
GCOVFunction &Func = *Funcs.back();
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
@@ -738,11 +738,11 @@ GlobalVariable *GCOVProfiler::buildEdgeLookupTable(
Edge += Successors;
}
- ArrayRef<Constant*> V(&EdgeTable[0], TableSize);
GlobalVariable *EdgeTableGV =
new GlobalVariable(
*M, EdgeTableTy, true, GlobalValue::InternalLinkage,
- ConstantArray::get(EdgeTableTy, V),
+ ConstantArray::get(EdgeTableTy,
+ makeArrayRef(&EdgeTable[0],TableSize)),
"__llvm_gcda_edge_table");
EdgeTableGV->setUnnamedAddr(true);
return EdgeTableGV;
diff --git a/lib/Transforms/Instrumentation/Instrumentation.cpp b/lib/Transforms/Instrumentation/Instrumentation.cpp
index ac1dd43..8e95367 100644
--- a/lib/Transforms/Instrumentation/Instrumentation.cpp
+++ b/lib/Transforms/Instrumentation/Instrumentation.cpp
@@ -27,6 +27,7 @@ void llvm::initializeInstrumentation(PassRegistry &Registry) {
initializeGCOVProfilerPass(Registry);
initializeMemorySanitizerPass(Registry);
initializeThreadSanitizerPass(Registry);
+ initializeSanitizerCoverageModulePass(Registry);
initializeDataFlowSanitizerPass(Registry);
}
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 496ab48..1261259 100644
--- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -127,6 +127,10 @@ static const uint64_t kOriginOffset64 = 1ULL << 45;
static const unsigned kMinOriginAlignment = 4;
static const unsigned kShadowTLSAlignment = 8;
+// These constants must be kept in sync with the ones in msan.h.
+static const unsigned kParamTLSSize = 800;
+static const unsigned kRetvalTLSSize = 800;
+
// Accesses sizes are powers of two: 1, 2, 4, 8.
static const size_t kNumberOfAccessSizes = 4;
@@ -195,6 +199,13 @@ static cl::opt<bool> ClWrapIndirectCallsFast("msan-wrap-indirect-calls-fast",
cl::desc("Do not wrap indirect calls with target in the same module"),
cl::Hidden, cl::init(true));
+// This is an experiment to enable handling of cases where shadow is a non-zero
+// compile-time constant. For some unexplainable reason they were silently
+// ignored in the instrumentation.
+static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
+ cl::desc("Insert checks for constant shadow values"),
+ cl::Hidden, cl::init(false));
+
namespace {
/// \brief An instrumentation pass implementing detection of uninitialized
@@ -321,7 +332,7 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
// which is not yet implemented.
StringRef WarningFnName = ClKeepGoing ? "__msan_warning"
: "__msan_warning_noreturn";
- WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL);
+ WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), nullptr);
for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
AccessSizeIndex++) {
@@ -329,34 +340,35 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
- IRB.getInt32Ty(), NULL);
+ IRB.getInt32Ty(), nullptr);
FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
- IRB.getInt8PtrTy(), IRB.getInt32Ty(), NULL);
+ IRB.getInt8PtrTy(), IRB.getInt32Ty(), nullptr);
}
MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
"__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
- IRB.getInt8PtrTy(), IntptrTy, NULL);
- MsanPoisonStackFn = M.getOrInsertFunction(
- "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL);
+ IRB.getInt8PtrTy(), IntptrTy, nullptr);
+ MsanPoisonStackFn =
+ M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(),
+ IRB.getInt8PtrTy(), IntptrTy, nullptr);
MsanChainOriginFn = M.getOrInsertFunction(
- "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), NULL);
+ "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), nullptr);
MemmoveFn = M.getOrInsertFunction(
"__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IntptrTy, NULL);
+ IRB.getInt8PtrTy(), IntptrTy, nullptr);
MemcpyFn = M.getOrInsertFunction(
"__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IntptrTy, NULL);
+ IntptrTy, nullptr);
MemsetFn = M.getOrInsertFunction(
"__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
- IntptrTy, NULL);
+ IntptrTy, nullptr);
// Create globals.
RetvalTLS = new GlobalVariable(
- M, ArrayType::get(IRB.getInt64Ty(), 8), false,
+ M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false,
GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr,
GlobalVariable::InitialExecTLSModel);
RetvalOriginTLS = new GlobalVariable(
@@ -364,16 +376,16 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
"__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
ParamTLS = new GlobalVariable(
- M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
+ M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr,
GlobalVariable::InitialExecTLSModel);
ParamOriginTLS = new GlobalVariable(
- M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage,
- nullptr, "__msan_param_origin_tls", nullptr,
- GlobalVariable::InitialExecTLSModel);
+ M, ArrayType::get(OriginTy, kParamTLSSize / 4), false,
+ GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls",
+ nullptr, GlobalVariable::InitialExecTLSModel);
VAArgTLS = new GlobalVariable(
- M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
+ M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr,
GlobalVariable::InitialExecTLSModel);
VAArgOverflowSizeTLS = new GlobalVariable(
@@ -393,7 +405,7 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
AnyFunctionPtrTy =
PointerType::getUnqual(FunctionType::get(IRB.getVoidTy(), false));
IndirectCallWrapperFn = M.getOrInsertFunction(
- ClWrapIndirectCalls, AnyFunctionPtrTy, AnyFunctionPtrTy, NULL);
+ ClWrapIndirectCalls, AnyFunctionPtrTy, AnyFunctionPtrTy, nullptr);
}
if (WrapIndirectCalls && ClWrapIndirectCallsFast) {
@@ -442,7 +454,7 @@ bool MemorySanitizer::doInitialization(Module &M) {
// Insert a call to __msan_init/__msan_track_origins into the module's CTORs.
appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
- "__msan_init", IRB.getVoidTy(), NULL)), 0);
+ "__msan_init", IRB.getVoidTy(), nullptr)), 0);
if (TrackOrigins)
new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
@@ -559,7 +571,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// TODO(eugenis): handle non-zero constant shadow by inserting an
// unconditional check (can not simply fail compilation as this could
// be in the dead code).
- if (isa<Constant>(ConvertedShadow)) return;
+ if (!ClCheckConstantShadow)
+ if (isa<Constant>(ConvertedShadow)) return;
unsigned TypeSizeInBits =
MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
@@ -615,8 +628,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
- // See the comment in materializeStores().
- if (isa<Constant>(ConvertedShadow)) return;
+ // See the comment in storeOrigin().
+ if (!ClCheckConstantShadow)
+ if (isa<Constant>(ConvertedShadow)) return;
unsigned TypeSizeInBits =
MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
@@ -763,6 +777,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return VectorType::get(IntegerType::get(*MS.C, EltSize),
VT->getNumElements());
}
+ if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
+ return ArrayType::get(getShadowTy(AT->getElementType()),
+ AT->getNumElements());
+ }
if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
SmallVector<Type*, 4> Elements;
for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
@@ -882,11 +900,18 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
assert(ShadowTy);
if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
return Constant::getAllOnesValue(ShadowTy);
- StructType *ST = cast<StructType>(ShadowTy);
- SmallVector<Constant *, 4> Vals;
- for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
- Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
- return ConstantStruct::get(ST, Vals);
+ if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
+ SmallVector<Constant *, 4> Vals(AT->getNumElements(),
+ getPoisonedShadow(AT->getElementType()));
+ return ConstantArray::get(AT, Vals);
+ }
+ if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
+ SmallVector<Constant *, 4> Vals;
+ for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
+ Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
+ return ConstantStruct::get(ST, Vals);
+ }
+ llvm_unreachable("Unexpected shadow type");
}
/// \brief Create a dirty shadow for a given value.
@@ -941,6 +966,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType())
: MS.DL->getTypeAllocSize(FArg.getType());
if (A == &FArg) {
+ bool Overflow = ArgOffset + Size > kParamTLSSize;
Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
if (FArg.hasByValAttr()) {
// ByVal pointer itself has clean shadow. We copy the actual
@@ -951,25 +977,38 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Type *EltType = A->getType()->getPointerElementType();
ArgAlign = MS.DL->getABITypeAlignment(EltType);
}
- unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
- Value *Cpy = EntryIRB.CreateMemCpy(
- getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
- CopyAlign);
- DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
- (void)Cpy;
+ if (Overflow) {
+ // ParamTLS overflow.
+ EntryIRB.CreateMemSet(
+ getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB),
+ Constant::getNullValue(EntryIRB.getInt8Ty()), Size, ArgAlign);
+ } else {
+ unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
+ Value *Cpy = EntryIRB.CreateMemCpy(
+ getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
+ CopyAlign);
+ DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
+ (void)Cpy;
+ }
*ShadowPtr = getCleanShadow(V);
} else {
- *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
+ if (Overflow) {
+ // ParamTLS overflow.
+ *ShadowPtr = getCleanShadow(V);
+ } else {
+ *ShadowPtr =
+ EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
+ }
}
DEBUG(dbgs() << " ARG: " << FArg << " ==> " <<
**ShadowPtr << "\n");
- if (MS.TrackOrigins) {
+ if (MS.TrackOrigins && !Overflow) {
Value *OriginPtr =
getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
}
}
- ArgOffset += DataLayout::RoundUpAlignment(Size, kShadowTLSAlignment);
+ ArgOffset += RoundUpToAlignment(Size, kShadowTLSAlignment);
}
assert(*ShadowPtr && "Could not find shadow for an argument");
return *ShadowPtr;
@@ -1024,9 +1063,16 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// UMR warning in runtime if the value is not fully defined.
void insertShadowCheck(Value *Val, Instruction *OrigIns) {
assert(Val);
- Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
- if (!Shadow) return;
- Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
+ Value *Shadow, *Origin;
+ if (ClCheckConstantShadow) {
+ Shadow = getShadow(Val);
+ if (!Shadow) return;
+ Origin = getOrigin(Val);
+ } else {
+ Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
+ if (!Shadow) return;
+ Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
+ }
insertShadowCheck(Shadow, Origin, OrigIns);
}
@@ -1859,7 +1905,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *Op = I.getArgOperand(0);
Type *OpType = Op->getType();
Function *BswapFunc = Intrinsic::getDeclaration(
- F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1));
+ F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1));
setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
setOrigin(&I, getOrigin(Op));
}
@@ -2313,26 +2359,32 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
" Shadow: " << *ArgShadow << "\n");
+ bool ArgIsInitialized = false;
if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
assert(A->getType()->isPointerTy() &&
"ByVal argument is not a pointer!");
Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType());
- unsigned Alignment = CS.getParamAlignment(i + 1);
+ if (ArgOffset + Size > kParamTLSSize) break;
+ unsigned ParamAlignment = CS.getParamAlignment(i + 1);
+ unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment);
Store = IRB.CreateMemCpy(ArgShadowBase,
getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
Size, Alignment);
} else {
Size = MS.DL->getTypeAllocSize(A->getType());
+ if (ArgOffset + Size > kParamTLSSize) break;
Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
kShadowTLSAlignment);
+ Constant *Cst = dyn_cast<Constant>(ArgShadow);
+ if (Cst && Cst->isNullValue()) ArgIsInitialized = true;
}
- if (MS.TrackOrigins)
+ if (MS.TrackOrigins && !ArgIsInitialized)
IRB.CreateStore(getOrigin(A),
getOriginPtrForArgument(A, IRB, ArgOffset));
(void)Store;
assert(Size != 0 && Store != nullptr);
DEBUG(dbgs() << " Param:" << *Store << "\n");
- ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
+ ArgOffset += RoundUpToAlignment(Size, 8);
}
DEBUG(dbgs() << " done with call args\n");
@@ -2613,7 +2665,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
Type *RealTy = A->getType()->getPointerElementType();
uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy);
Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
- OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
+ OverflowOffset += RoundUpToAlignment(ArgSize, 8);
IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
ArgSize, kShadowTLSAlignment);
} else {
@@ -2635,7 +2687,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
case AK_Memory:
uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());
Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
- OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
+ OverflowOffset += RoundUpToAlignment(ArgSize, 8);
}
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
diff --git a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
new file mode 100644
index 0000000..f882072
--- /dev/null
+++ b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -0,0 +1,294 @@
+//===-- SanitizerCoverage.cpp - coverage instrumentation for sanitizers ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Coverage instrumentation that works with AddressSanitizer
+// and potentially with other Sanitizers.
+//
+// We create a Guard boolean variable with the same linkage
+// as the function and inject this code into the entry block (CoverageLevel=1)
+// or all blocks (CoverageLevel>=2):
+// if (*Guard) {
+// __sanitizer_cov();
+// *Guard = 1;
+// }
+// The accesses to Guard are atomic. The rest of the logic is
+// in __sanitizer_cov (it's fine to call it more than once).
+//
+// With CoverageLevel>=3 we also split critical edges this effectively
+// instrumenting all edges.
+//
+// CoverageLevel>=4 add indirect call profiling implented as a function call.
+//
+// This coverage implementation provides very limited data:
+// it only tells if a given function (block) was ever executed. No counters.
+// But for many use cases this is what we need and the added slowdown small.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "sancov"
+
+static const char *const kSanCovModuleInitName = "__sanitizer_cov_module_init";
+static const char *const kSanCovName = "__sanitizer_cov";
+static const char *const kSanCovIndirCallName = "__sanitizer_cov_indir_call16";
+static const char *const kSanCovTraceEnter = "__sanitizer_cov_trace_func_enter";
+static const char *const kSanCovTraceBB = "__sanitizer_cov_trace_basic_block";
+static const char *const kSanCovModuleCtorName = "sancov.module_ctor";
+static const uint64_t kSanCtorAndDtorPriority = 1;
+
+static cl::opt<int> ClCoverageLevel("sanitizer-coverage-level",
+ cl::desc("Sanitizer Coverage. 0: none, 1: entry block, 2: all blocks, "
+ "3: all blocks and critical edges, "
+ "4: above plus indirect calls"),
+ cl::Hidden, cl::init(0));
+
+static cl::opt<int> ClCoverageBlockThreshold(
+ "sanitizer-coverage-block-threshold",
+ cl::desc("Add coverage instrumentation only to the entry block if there "
+ "are more than this number of blocks."),
+ cl::Hidden, cl::init(1500));
+
+static cl::opt<bool>
+ ClExperimentalTracing("sanitizer-coverage-experimental-tracing",
+ cl::desc("Experimental basic-block tracing: insert "
+ "callbacks at every basic block"),
+ cl::Hidden, cl::init(false));
+
+namespace {
+
+class SanitizerCoverageModule : public ModulePass {
+ public:
+ SanitizerCoverageModule(int CoverageLevel = 0)
+ : ModulePass(ID),
+ CoverageLevel(std::max(CoverageLevel, (int)ClCoverageLevel)) {}
+ bool runOnModule(Module &M) override;
+ bool runOnFunction(Function &F);
+ static char ID; // Pass identification, replacement for typeid
+ const char *getPassName() const override {
+ return "SanitizerCoverageModule";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DataLayoutPass>();
+ }
+
+ private:
+ void InjectCoverageForIndirectCalls(Function &F,
+ ArrayRef<Instruction *> IndirCalls);
+ bool InjectCoverage(Function &F, ArrayRef<BasicBlock *> AllBlocks,
+ ArrayRef<Instruction *> IndirCalls);
+ bool InjectTracing(Function &F, ArrayRef<BasicBlock *> AllBlocks);
+ void InjectCoverageAtBlock(Function &F, BasicBlock &BB);
+ Function *SanCovFunction;
+ Function *SanCovIndirCallFunction;
+ Function *SanCovModuleInit;
+ Function *SanCovTraceEnter, *SanCovTraceBB;
+ Type *IntptrTy;
+ LLVMContext *C;
+
+ int CoverageLevel;
+};
+
+} // namespace
+
+static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
+ if (Function *F = dyn_cast<Function>(FuncOrBitcast))
+ return F;
+ std::string Err;
+ raw_string_ostream Stream(Err);
+ Stream << "SanitizerCoverage interface function redefined: "
+ << *FuncOrBitcast;
+ report_fatal_error(Err);
+}
+
+bool SanitizerCoverageModule::runOnModule(Module &M) {
+ if (!CoverageLevel) return false;
+ C = &(M.getContext());
+ DataLayoutPass *DLP = &getAnalysis<DataLayoutPass>();
+ IntptrTy = Type::getIntNTy(*C, DLP->getDataLayout().getPointerSizeInBits());
+ Type *VoidTy = Type::getVoidTy(*C);
+
+ Function *CtorFunc =
+ Function::Create(FunctionType::get(VoidTy, false),
+ GlobalValue::InternalLinkage, kSanCovModuleCtorName, &M);
+ ReturnInst::Create(*C, BasicBlock::Create(*C, "", CtorFunc));
+ appendToGlobalCtors(M, CtorFunc, kSanCtorAndDtorPriority);
+
+ SanCovFunction =
+ checkInterfaceFunction(M.getOrInsertFunction(kSanCovName, VoidTy, nullptr));
+ SanCovIndirCallFunction = checkInterfaceFunction(M.getOrInsertFunction(
+ kSanCovIndirCallName, VoidTy, IntptrTy, IntptrTy, nullptr));
+ SanCovModuleInit = checkInterfaceFunction(M.getOrInsertFunction(
+ kSanCovModuleInitName, Type::getVoidTy(*C), IntptrTy, nullptr));
+ SanCovModuleInit->setLinkage(Function::ExternalLinkage);
+
+ if (ClExperimentalTracing) {
+ SanCovTraceEnter = checkInterfaceFunction(
+ M.getOrInsertFunction(kSanCovTraceEnter, VoidTy, IntptrTy, nullptr));
+ SanCovTraceBB = checkInterfaceFunction(
+ M.getOrInsertFunction(kSanCovTraceBB, VoidTy, IntptrTy, nullptr));
+ }
+
+ for (auto &F : M)
+ runOnFunction(F);
+
+ IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
+ IRB.CreateCall(SanCovModuleInit,
+ ConstantInt::get(IntptrTy, SanCovFunction->getNumUses()));
+ return true;
+}
+
+bool SanitizerCoverageModule::runOnFunction(Function &F) {
+ if (F.empty()) return false;
+ // For now instrument only functions that will also be asan-instrumented.
+ if (!F.hasFnAttribute(Attribute::SanitizeAddress))
+ return false;
+ if (CoverageLevel >= 3)
+ SplitAllCriticalEdges(F, this);
+ SmallVector<Instruction*, 8> IndirCalls;
+ SmallVector<BasicBlock*, 16> AllBlocks;
+ for (auto &BB : F) {
+ AllBlocks.push_back(&BB);
+ if (CoverageLevel >= 4)
+ for (auto &Inst : BB) {
+ CallSite CS(&Inst);
+ if (CS && !CS.getCalledFunction())
+ IndirCalls.push_back(&Inst);
+ }
+ }
+ InjectCoverage(F, AllBlocks, IndirCalls);
+ InjectTracing(F, AllBlocks);
+ return true;
+}
+
+// Experimental support for tracing.
+// Basicaly, insert a callback at the beginning of every basic block.
+// Every callback gets a pointer to a uniqie global for internal storage.
+bool SanitizerCoverageModule::InjectTracing(Function &F,
+ ArrayRef<BasicBlock *> AllBlocks) {
+ if (!ClExperimentalTracing) return false;
+ Type *Ty = ArrayType::get(IntptrTy, 1); // May need to use more words later.
+ for (auto BB : AllBlocks) {
+ IRBuilder<> IRB(BB->getFirstInsertionPt());
+ GlobalVariable *TraceCache = new GlobalVariable(
+ *F.getParent(), Ty, false, GlobalValue::PrivateLinkage,
+ Constant::getNullValue(Ty), "__sancov_gen_trace_cache");
+ IRB.CreateCall(&F.getEntryBlock() == BB ? SanCovTraceEnter : SanCovTraceBB,
+ IRB.CreatePointerCast(TraceCache, IntptrTy));
+ }
+ return true;
+}
+
+bool
+SanitizerCoverageModule::InjectCoverage(Function &F,
+ ArrayRef<BasicBlock *> AllBlocks,
+ ArrayRef<Instruction *> IndirCalls) {
+ if (!CoverageLevel) return false;
+
+ if (CoverageLevel == 1 ||
+ (unsigned)ClCoverageBlockThreshold < AllBlocks.size()) {
+ InjectCoverageAtBlock(F, F.getEntryBlock());
+ } else {
+ for (auto BB : AllBlocks)
+ InjectCoverageAtBlock(F, *BB);
+ }
+ InjectCoverageForIndirectCalls(F, IndirCalls);
+ return true;
+}
+
+// On every indirect call we call a run-time function
+// __sanitizer_cov_indir_call* with two parameters:
+// - callee address,
+// - global cache array that contains kCacheSize pointers (zero-initialized).
+// The cache is used to speed up recording the caller-callee pairs.
+// The address of the caller is passed implicitly via caller PC.
+// kCacheSize is encoded in the name of the run-time function.
+void SanitizerCoverageModule::InjectCoverageForIndirectCalls(
+ Function &F, ArrayRef<Instruction *> IndirCalls) {
+ if (IndirCalls.empty()) return;
+ const int kCacheSize = 16;
+ const int kCacheAlignment = 64; // Align for better performance.
+ Type *Ty = ArrayType::get(IntptrTy, kCacheSize);
+ for (auto I : IndirCalls) {
+ IRBuilder<> IRB(I);
+ CallSite CS(I);
+ Value *Callee = CS.getCalledValue();
+ if (dyn_cast<InlineAsm>(Callee)) continue;
+ GlobalVariable *CalleeCache = new GlobalVariable(
+ *F.getParent(), Ty, false, GlobalValue::PrivateLinkage,
+ Constant::getNullValue(Ty), "__sancov_gen_callee_cache");
+ CalleeCache->setAlignment(kCacheAlignment);
+ IRB.CreateCall2(SanCovIndirCallFunction,
+ IRB.CreatePointerCast(Callee, IntptrTy),
+ IRB.CreatePointerCast(CalleeCache, IntptrTy));
+ }
+}
+
+void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F,
+ BasicBlock &BB) {
+ BasicBlock::iterator IP = BB.getFirstInsertionPt(), BE = BB.end();
+ // Skip static allocas at the top of the entry block so they don't become
+ // dynamic when we split the block. If we used our optimized stack layout,
+ // then there will only be one alloca and it will come first.
+ for (; IP != BE; ++IP) {
+ AllocaInst *AI = dyn_cast<AllocaInst>(IP);
+ if (!AI || !AI->isStaticAlloca())
+ break;
+ }
+
+ DebugLoc EntryLoc = &BB == &F.getEntryBlock()
+ ? IP->getDebugLoc().getFnDebugLoc(*C)
+ : IP->getDebugLoc();
+ IRBuilder<> IRB(IP);
+ IRB.SetCurrentDebugLocation(EntryLoc);
+ Type *Int8Ty = IRB.getInt8Ty();
+ GlobalVariable *Guard = new GlobalVariable(
+ *F.getParent(), Int8Ty, false, GlobalValue::PrivateLinkage,
+ Constant::getNullValue(Int8Ty), "__sancov_gen_cov_" + F.getName());
+ LoadInst *Load = IRB.CreateLoad(Guard);
+ Load->setAtomic(Monotonic);
+ Load->setAlignment(1);
+ Value *Cmp = IRB.CreateICmpEQ(Constant::getNullValue(Int8Ty), Load);
+ Instruction *Ins = SplitBlockAndInsertIfThen(
+ Cmp, IP, false, MDBuilder(*C).createBranchWeights(1, 100000));
+ IRB.SetInsertPoint(Ins);
+ IRB.SetCurrentDebugLocation(EntryLoc);
+ // __sanitizer_cov gets the PC of the instruction using GET_CALLER_PC.
+ IRB.CreateCall(SanCovFunction);
+ StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int8Ty, 1), Guard);
+ Store->setAtomic(Monotonic);
+ Store->setAlignment(1);
+}
+
+char SanitizerCoverageModule::ID = 0;
+INITIALIZE_PASS(SanitizerCoverageModule, "sancov",
+ "SanitizerCoverage: TODO."
+ "ModulePass", false, false)
+ModulePass *llvm::createSanitizerCoverageModulePass(int CoverageLevel) {
+ return new SanitizerCoverageModule(CoverageLevel);
+}
diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 89386a6..8a56a1f 100644
--- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -135,33 +135,33 @@ void ThreadSanitizer::initializeCallbacks(Module &M) {
IRBuilder<> IRB(M.getContext());
// Initialize the callbacks.
TsanFuncEntry = checkInterfaceFunction(M.getOrInsertFunction(
- "__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
+ "__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
TsanFuncExit = checkInterfaceFunction(M.getOrInsertFunction(
- "__tsan_func_exit", IRB.getVoidTy(), NULL));
+ "__tsan_func_exit", IRB.getVoidTy(), nullptr));
OrdTy = IRB.getInt32Ty();
for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
const size_t ByteSize = 1 << i;
const size_t BitSize = ByteSize * 8;
SmallString<32> ReadName("__tsan_read" + itostr(ByteSize));
TsanRead[i] = checkInterfaceFunction(M.getOrInsertFunction(
- ReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
+ ReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
SmallString<32> WriteName("__tsan_write" + itostr(ByteSize));
TsanWrite[i] = checkInterfaceFunction(M.getOrInsertFunction(
- WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
+ WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
Type *PtrTy = Ty->getPointerTo();
SmallString<32> AtomicLoadName("__tsan_atomic" + itostr(BitSize) +
"_load");
TsanAtomicLoad[i] = checkInterfaceFunction(M.getOrInsertFunction(
- AtomicLoadName, Ty, PtrTy, OrdTy, NULL));
+ AtomicLoadName, Ty, PtrTy, OrdTy, nullptr));
SmallString<32> AtomicStoreName("__tsan_atomic" + itostr(BitSize) +
"_store");
TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction(
AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy,
- NULL));
+ nullptr));
for (int op = AtomicRMWInst::FIRST_BINOP;
op <= AtomicRMWInst::LAST_BINOP; ++op) {
@@ -185,33 +185,33 @@ void ThreadSanitizer::initializeCallbacks(Module &M) {
continue;
SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
TsanAtomicRMW[op][i] = checkInterfaceFunction(M.getOrInsertFunction(
- RMWName, Ty, PtrTy, Ty, OrdTy, NULL));
+ RMWName, Ty, PtrTy, Ty, OrdTy, nullptr));
}
SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) +
"_compare_exchange_val");
TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction(
- AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, NULL));
+ AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, nullptr));
}
TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction(
"__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), NULL));
+ IRB.getInt8PtrTy(), nullptr));
TsanVptrLoad = checkInterfaceFunction(M.getOrInsertFunction(
- "__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
+ "__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction(
- "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL));
+ "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, nullptr));
TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction(
- "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL));
+ "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, nullptr));
MemmoveFn = checkInterfaceFunction(M.getOrInsertFunction(
"memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IntptrTy, NULL));
+ IRB.getInt8PtrTy(), IntptrTy, nullptr));
MemcpyFn = checkInterfaceFunction(M.getOrInsertFunction(
"memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IntptrTy, NULL));
+ IntptrTy, nullptr));
MemsetFn = checkInterfaceFunction(M.getOrInsertFunction(
"memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
- IntptrTy, NULL));
+ IntptrTy, nullptr));
}
bool ThreadSanitizer::doInitialization(Module &M) {
@@ -224,7 +224,7 @@ bool ThreadSanitizer::doInitialization(Module &M) {
IRBuilder<> IRB(M.getContext());
IntptrTy = IRB.getIntPtrTy(DL);
Value *TsanInit = M.getOrInsertFunction("__tsan_init",
- IRB.getVoidTy(), NULL);
+ IRB.getVoidTy(), nullptr);
appendToGlobalCtors(M, cast<Function>(TsanInit), 0);
return true;
@@ -481,8 +481,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
Type *PtrTy = Ty->getPointerTo();
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
createOrdering(&IRB, LI->getOrdering())};
- CallInst *C = CallInst::Create(TsanAtomicLoad[Idx],
- ArrayRef<Value*>(Args));
+ CallInst *C = CallInst::Create(TsanAtomicLoad[Idx], Args);
ReplaceInstWithInst(I, C);
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
@@ -497,8 +496,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
IRB.CreateIntCast(SI->getValueOperand(), Ty, false),
createOrdering(&IRB, SI->getOrdering())};
- CallInst *C = CallInst::Create(TsanAtomicStore[Idx],
- ArrayRef<Value*>(Args));
+ CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args);
ReplaceInstWithInst(I, C);
} else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
Value *Addr = RMWI->getPointerOperand();
@@ -515,7 +513,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
IRB.CreateIntCast(RMWI->getValOperand(), Ty, false),
createOrdering(&IRB, RMWI->getOrdering())};
- CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args));
+ CallInst *C = CallInst::Create(F, Args);
ReplaceInstWithInst(I, C);
} else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
Value *Addr = CASI->getPointerOperand();
@@ -543,7 +541,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
Function *F = FI->getSynchScope() == SingleThread ?
TsanAtomicSignalFence : TsanAtomicThreadFence;
- CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args));
+ CallInst *C = CallInst::Create(F, Args);
ReplaceInstWithInst(I, C);
}
return true;