aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Transforms/Instrumentation/AddressSanitizer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms/Instrumentation/AddressSanitizer.cpp')
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp310
1 files changed, 201 insertions, 109 deletions
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index bbfa4c5..95fca75 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -13,8 +13,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asan"
-
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
@@ -53,8 +51,11 @@
using namespace llvm;
+#define DEBUG_TYPE "asan"
+
static const uint64_t kDefaultShadowScale = 3;
static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
+static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G.
static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41;
@@ -79,6 +80,7 @@ static const char *const kAsanUnregisterGlobalsName =
static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
static const char *const kAsanInitName = "__asan_init_v3";
+static const char *const kAsanCovModuleInitName = "__sanitizer_cov_module_init";
static const char *const kAsanCovName = "__sanitizer_cov";
static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp";
static const char *const kAsanPtrSub = "__sanitizer_ptr_sub";
@@ -135,10 +137,12 @@ static cl::opt<bool> ClGlobals("asan-globals",
static cl::opt<int> ClCoverage("asan-coverage",
cl::desc("ASan coverage. 0: none, 1: entry block, 2: all blocks"),
cl::Hidden, cl::init(false));
+static cl::opt<int> ClCoverageBlockThreshold("asan-coverage-block-threshold",
+ cl::desc("Add coverage instrumentation only to the entry block if there "
+ "are more than this number of blocks."),
+ cl::Hidden, cl::init(1500));
static cl::opt<bool> ClInitializers("asan-initialization-order",
cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(false));
-static cl::opt<bool> ClMemIntrin("asan-memintrin",
- cl::desc("Handle memset/memcpy/memmove"), cl::Hidden, cl::init(true));
static cl::opt<bool> ClInvalidPointerPairs("asan-detect-invalid-pointer-pair",
cl::desc("Instrument <, <=, >, >=, - with pointer operands"),
cl::Hidden, cl::init(false));
@@ -148,6 +152,16 @@ static cl::opt<unsigned> ClRealignStack("asan-realign-stack",
static cl::opt<std::string> ClBlacklistFile("asan-blacklist",
cl::desc("File containing the list of objects to ignore "
"during instrumentation"), cl::Hidden);
+static cl::opt<int> ClInstrumentationWithCallsThreshold(
+ "asan-instrumentation-with-call-threshold",
+ cl::desc("If the function being instrumented contains more than "
+ "this number of memory accesses, use callbacks instead of "
+ "inline checks (-1 means never use callbacks)."),
+ cl::Hidden, cl::init(7000));
+static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
+ "asan-memory-access-callback-prefix",
+ cl::desc("Prefix for memory access callbacks"), cl::Hidden,
+ cl::init("__asan_"));
// This is an experimental feature that will allow to choose between
// instrumented and non-instrumented code at link-time.
@@ -238,7 +252,7 @@ struct ShadowMapping {
static ShadowMapping getShadowMapping(const Module &M, int LongSize) {
llvm::Triple TargetTriple(M.getTargetTriple());
bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android;
- // bool IsMacOSX = TargetTriple.getOS() == llvm::Triple::MacOSX;
+ bool IsIOS = TargetTriple.getOS() == llvm::Triple::IOS;
bool IsFreeBSD = TargetTriple.getOS() == llvm::Triple::FreeBSD;
bool IsLinux = TargetTriple.getOS() == llvm::Triple::Linux;
bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 ||
@@ -256,6 +270,8 @@ static ShadowMapping getShadowMapping(const Module &M, int LongSize) {
Mapping.Offset = kMIPS32_ShadowOffset32;
else if (IsFreeBSD)
Mapping.Offset = kFreeBSD_ShadowOffset32;
+ else if (IsIOS)
+ Mapping.Offset = kIOSShadowOffset32;
else
Mapping.Offset = kDefaultShadowOffset32;
} else { // LongSize == 64
@@ -303,20 +319,17 @@ struct AddressSanitizer : public FunctionPass {
const char *getPassName() const override {
return "AddressSanitizerFunctionPass";
}
- void instrumentMop(Instruction *I);
+ void instrumentMop(Instruction *I, bool UseCalls);
void instrumentPointerComparisonOrSubtraction(Instruction *I);
void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
Value *Addr, uint32_t TypeSize, bool IsWrite,
- Value *SizeArgument);
+ Value *SizeArgument, bool UseCalls);
Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
Value *ShadowValue, uint32_t TypeSize);
Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
bool IsWrite, size_t AccessSizeIndex,
Value *SizeArgument);
- bool instrumentMemIntrinsic(MemIntrinsic *MI);
- void instrumentMemIntrinsicParam(Instruction *OrigIns, Value *Addr,
- Value *Size,
- Instruction *InsertBefore, bool IsWrite);
+ void instrumentMemIntrinsic(MemIntrinsic *MI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool runOnFunction(Function &F) override;
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
@@ -349,8 +362,11 @@ struct AddressSanitizer : public FunctionPass {
std::unique_ptr<SpecialCaseList> BL;
// This array is indexed by AccessIsWrite and log2(AccessSize).
Function *AsanErrorCallback[2][kNumberOfAccessSizes];
+ Function *AsanMemoryAccessCallback[2][kNumberOfAccessSizes];
// This array is indexed by AccessIsWrite.
- Function *AsanErrorCallbackSized[2];
+ Function *AsanErrorCallbackSized[2],
+ *AsanMemoryAccessCallbackSized[2];
+ Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
InlineAsm *EmptyAsm;
SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals;
@@ -393,6 +409,7 @@ class AddressSanitizerModule : public ModulePass {
Function *AsanUnpoisonGlobals;
Function *AsanRegisterGlobals;
Function *AsanUnregisterGlobals;
+ Function *AsanCovModuleInit;
};
// Stack poisoning does not play well with exception handling.
@@ -443,11 +460,9 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
bool runOnFunction() {
if (!ClStack) return false;
// Collect alloca, ret, lifetime instructions etc.
- for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
- DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
- BasicBlock *BB = *DI;
+ for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
visit(*BB);
- }
+
if (AllocaVec.empty()) return false;
initializeCallbacks(*F.getParent());
@@ -590,72 +605,54 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
}
-void AddressSanitizer::instrumentMemIntrinsicParam(
- Instruction *OrigIns,
- Value *Addr, Value *Size, Instruction *InsertBefore, bool IsWrite) {
- IRBuilder<> IRB(InsertBefore);
- if (Size->getType() != IntptrTy)
- Size = IRB.CreateIntCast(Size, IntptrTy, false);
- // Check the first byte.
- instrumentAddress(OrigIns, InsertBefore, Addr, 8, IsWrite, Size);
- // Check the last byte.
- IRB.SetInsertPoint(InsertBefore);
- Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
- Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
- Value *AddrLast = IRB.CreateAdd(AddrLong, SizeMinusOne);
- instrumentAddress(OrigIns, InsertBefore, AddrLast, 8, IsWrite, Size);
-}
-
// Instrument memset/memmove/memcpy
-bool AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
- Value *Dst = MI->getDest();
- MemTransferInst *MemTran = dyn_cast<MemTransferInst>(MI);
- Value *Src = MemTran ? MemTran->getSource() : 0;
- Value *Length = MI->getLength();
-
- Constant *ConstLength = dyn_cast<Constant>(Length);
- Instruction *InsertBefore = MI;
- if (ConstLength) {
- if (ConstLength->isNullValue()) return false;
- } else {
- // The size is not a constant so it could be zero -- check at run-time.
- IRBuilder<> IRB(InsertBefore);
-
- Value *Cmp = IRB.CreateICmpNE(Length,
- Constant::getNullValue(Length->getType()));
- InsertBefore = SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
+void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
+ IRBuilder<> IRB(MI);
+ if (isa<MemTransferInst>(MI)) {
+ IRB.CreateCall3(
+ isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
+ IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
+ } else if (isa<MemSetInst>(MI)) {
+ IRB.CreateCall3(
+ AsanMemset,
+ IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
+ IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
}
-
- instrumentMemIntrinsicParam(MI, Dst, Length, InsertBefore, true);
- if (Src)
- instrumentMemIntrinsicParam(MI, Src, Length, InsertBefore, false);
- return true;
+ MI->eraseFromParent();
}
// If I is an interesting memory access, return the PointerOperand
-// and set IsWrite. Otherwise return NULL.
-static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite) {
+// and set IsWrite/Alignment. Otherwise return NULL.
+static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
+ unsigned *Alignment) {
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
- if (!ClInstrumentReads) return NULL;
+ if (!ClInstrumentReads) return nullptr;
*IsWrite = false;
+ *Alignment = LI->getAlignment();
return LI->getPointerOperand();
}
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
- if (!ClInstrumentWrites) return NULL;
+ if (!ClInstrumentWrites) return nullptr;
*IsWrite = true;
+ *Alignment = SI->getAlignment();
return SI->getPointerOperand();
}
if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
- if (!ClInstrumentAtomics) return NULL;
+ if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
+ *Alignment = 0;
return RMW->getPointerOperand();
}
if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
- if (!ClInstrumentAtomics) return NULL;
+ if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
+ *Alignment = 0;
return XCHG->getPointerOperand();
}
- return NULL;
+ return nullptr;
}
static bool isPointerOperand(Value *V) {
@@ -700,9 +697,10 @@ AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) {
IRB.CreateCall2(F, Param[0], Param[1]);
}
-void AddressSanitizer::instrumentMop(Instruction *I) {
+void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) {
bool IsWrite = false;
- Value *Addr = isInterestingMemoryAccess(I, &IsWrite);
+ unsigned Alignment = 0;
+ Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &Alignment);
assert(Addr);
if (ClOpt && ClOptGlobals) {
if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) {
@@ -737,22 +735,29 @@ void AddressSanitizer::instrumentMop(Instruction *I) {
else
NumInstrumentedReads++;
- // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check.
- if (TypeSize == 8 || TypeSize == 16 ||
- TypeSize == 32 || TypeSize == 64 || TypeSize == 128)
- return instrumentAddress(I, I, Addr, TypeSize, IsWrite, 0);
- // Instrument unusual size (but still multiple of 8).
+ unsigned Granularity = 1 << Mapping.Scale;
+ // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
+ // if the data is properly aligned.
+ if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
+ TypeSize == 128) &&
+ (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
+ return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls);
+ // Instrument unusual size or unusual alignment.
// We can not do it with a single check, so we do 1-byte check for the first
// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
// to report the actual access size.
IRBuilder<> IRB(I);
- Value *LastByte = IRB.CreateIntToPtr(
- IRB.CreateAdd(IRB.CreatePointerCast(Addr, IntptrTy),
- ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
- OrigPtrTy);
Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
- instrumentAddress(I, I, Addr, 8, IsWrite, Size);
- instrumentAddress(I, I, LastByte, 8, IsWrite, Size);
+ Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
+ if (UseCalls) {
+ IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite], AddrLong, Size);
+ } else {
+ Value *LastByte = IRB.CreateIntToPtr(
+ IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
+ OrigPtrTy);
+ instrumentAddress(I, I, Addr, 8, IsWrite, Size, false);
+ instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false);
+ }
}
// Validate the result of Module::getOrInsertFunction called for an interface
@@ -800,11 +805,18 @@ Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
}
void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
- Instruction *InsertBefore,
- Value *Addr, uint32_t TypeSize,
- bool IsWrite, Value *SizeArgument) {
+ Instruction *InsertBefore, Value *Addr,
+ uint32_t TypeSize, bool IsWrite,
+ Value *SizeArgument, bool UseCalls) {
IRBuilder<> IRB(InsertBefore);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
+ size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
+
+ if (UseCalls) {
+ IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][AccessSizeIndex],
+ AddrLong);
+ return;
+ }
Type *ShadowTy = IntegerType::get(
*C, std::max(8U, TypeSize >> Mapping.Scale));
@@ -815,9 +827,8 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
- size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
size_t Granularity = 1 << Mapping.Scale;
- TerminatorInst *CrashTerm = 0;
+ TerminatorInst *CrashTerm = nullptr;
if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
TerminatorInst *CheckTerm =
@@ -842,8 +853,29 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
void AddressSanitizerModule::createInitializerPoisonCalls(
Module &M, GlobalValue *ModuleName) {
- // We do all of our poisoning and unpoisoning within _GLOBAL__I_a.
- Function *GlobalInit = M.getFunction("_GLOBAL__I_a");
+ // We do all of our poisoning and unpoisoning within a global constructor.
+ // These are called _GLOBAL__(sub_)?I_.*.
+ // TODO: Consider looking through the functions in
+ // M.getGlobalVariable("llvm.global_ctors") instead of using this stringly
+ // typed approach.
+ Function *GlobalInit = nullptr;
+ for (auto &F : M.getFunctionList()) {
+ StringRef FName = F.getName();
+
+ const char kGlobalPrefix[] = "_GLOBAL__";
+ if (!FName.startswith(kGlobalPrefix))
+ continue;
+ FName = FName.substr(strlen(kGlobalPrefix));
+
+ const char kOptionalSub[] = "sub_";
+ if (FName.startswith(kOptionalSub))
+ FName = FName.substr(strlen(kOptionalSub));
+
+ if (FName.startswith("I_")) {
+ GlobalInit = &F;
+ break;
+ }
+ }
// If that function is not present, this TU contains no globals, or they have
// all been optimized away
if (!GlobalInit)
@@ -858,7 +890,7 @@ void AddressSanitizerModule::createInitializerPoisonCalls(
// Add calls to unpoison all globals before each return instruction.
for (Function::iterator I = GlobalInit->begin(), E = GlobalInit->end();
- I != E; ++I) {
+ I != E; ++I) {
if (ReturnInst *RI = dyn_cast<ReturnInst>(I->getTerminator())) {
CallInst::Create(AsanUnpoisonGlobals, "", RI);
}
@@ -902,8 +934,8 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
// Ignore the globals from the __OBJC section. The ObjC runtime assumes
// those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
// them.
- if ((Section.find("__OBJC,") == 0) ||
- (Section.find("__DATA, __objc_") == 0)) {
+ if (Section.startswith("__OBJC,") ||
+ Section.startswith("__DATA, __objc_")) {
DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
return false;
}
@@ -915,16 +947,26 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
// is placed into __DATA,__cfstring
// Therefore there's no point in placing redzones into __DATA,__cfstring.
// Moreover, it causes the linker to crash on OS X 10.7
- if (Section.find("__DATA,__cfstring") == 0) {
+ if (Section.startswith("__DATA,__cfstring")) {
DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
return false;
}
// The linker merges the contents of cstring_literals and removes the
// trailing zeroes.
- if (Section.find("__TEXT,__cstring,cstring_literals") == 0) {
+ if (Section.startswith("__TEXT,__cstring,cstring_literals")) {
DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
return false;
}
+
+ // Callbacks put into the CRT initializer/terminator sections
+ // should not be instrumented.
+ // See https://code.google.com/p/address-sanitizer/issues/detail?id=305
+ // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
+ if (Section.startswith(".CRT")) {
+ DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n");
+ return false;
+ }
+
// Globals from llvm.metadata aren't emitted, do not instrument them.
if (Section == "llvm.metadata") return false;
}
@@ -950,6 +992,10 @@ void AddressSanitizerModule::initializeCallbacks(Module &M) {
kAsanUnregisterGlobalsName,
IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
+ AsanCovModuleInit = checkInterfaceFunction(M.getOrInsertFunction(
+ kAsanCovModuleInitName,
+ IRB.getVoidTy(), IntptrTy, NULL));
+ AsanCovModuleInit->setLinkage(Function::ExternalLinkage);
}
// This function replaces all global variables with new variables that have
@@ -980,6 +1026,14 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
GlobalsToChange.push_back(G);
}
+ Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
+ assert(CtorFunc);
+ IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
+
+ Function *CovFunc = M.getFunction(kAsanCovName);
+ int nCov = CovFunc ? CovFunc->getNumUses() : 0;
+ IRB.CreateCall(AsanCovModuleInit, ConstantInt::get(IntptrTy, nCov));
+
size_t n = GlobalsToChange.size();
if (n == 0) return false;
@@ -996,10 +1050,6 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
IntptrTy, IntptrTy, NULL);
SmallVector<Constant *, 16> Initializers(n);
- Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
- assert(CtorFunc);
- IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
-
bool HasDynamicallyInitializedGlobals = false;
// We shouldn't merge same module names, as this string serves as unique
@@ -1110,12 +1160,16 @@ void AddressSanitizer::initializeCallbacks(Module &M) {
for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
AccessSizeIndex++) {
// IsWrite and TypeSize are encoded in the function name.
- std::string FunctionName = std::string(kAsanReportErrorTemplate) +
+ std::string Suffix =
(AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex);
- // If we are merging crash callbacks, they have two parameters.
AsanErrorCallback[AccessIsWrite][AccessSizeIndex] =
- checkInterfaceFunction(M.getOrInsertFunction(
- FunctionName, IRB.getVoidTy(), IntptrTy, NULL));
+ checkInterfaceFunction(
+ M.getOrInsertFunction(kAsanReportErrorTemplate + Suffix,
+ IRB.getVoidTy(), IntptrTy, NULL));
+ AsanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
+ checkInterfaceFunction(
+ M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + Suffix,
+ IRB.getVoidTy(), IntptrTy, NULL));
}
}
AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction(
@@ -1123,8 +1177,25 @@ void AddressSanitizer::initializeCallbacks(Module &M) {
AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction(
kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
- AsanHandleNoReturnFunc = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanHandleNoReturnName, IRB.getVoidTy(), NULL));
+ AsanMemoryAccessCallbackSized[0] = checkInterfaceFunction(
+ M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "loadN",
+ IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ AsanMemoryAccessCallbackSized[1] = checkInterfaceFunction(
+ M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "storeN",
+ IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+
+ AsanMemmove = checkInterfaceFunction(M.getOrInsertFunction(
+ ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, NULL));
+ AsanMemcpy = checkInterfaceFunction(M.getOrInsertFunction(
+ ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, NULL));
+ AsanMemset = checkInterfaceFunction(M.getOrInsertFunction(
+ ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, NULL));
+
+ AsanHandleNoReturnFunc = checkInterfaceFunction(
+ M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), NULL));
AsanCovFunction = checkInterfaceFunction(M.getOrInsertFunction(
kAsanCovName, IRB.getVoidTy(), NULL));
AsanPtrCmpFunction = checkInterfaceFunction(M.getOrInsertFunction(
@@ -1142,7 +1213,7 @@ bool AddressSanitizer::doInitialization(Module &M) {
// Initialize the private fields. No one has accessed them before.
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
if (!DLP)
- return false;
+ report_fatal_error("data layout missing");
DL = &DLP->getDataLayout();
BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
@@ -1241,7 +1312,8 @@ bool AddressSanitizer::InjectCoverage(Function &F,
const ArrayRef<BasicBlock *> AllBlocks) {
if (!ClCoverage) return false;
- if (ClCoverage == 1) {
+ if (ClCoverage == 1 ||
+ (unsigned)ClCoverageBlockThreshold < AllBlocks.size()) {
InjectCoverageAtBlock(F, F.getEntryBlock());
} else {
for (size_t i = 0, n = AllBlocks.size(); i < n; i++)
@@ -1275,6 +1347,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
SmallVector<Instruction*, 16> PointerComparisonsOrSubtracts;
int NumAllocas = 0;
bool IsWrite;
+ unsigned Alignment;
// Fill the set of memory operations to instrument.
for (Function::iterator FI = F.begin(), FE = F.end();
@@ -1285,7 +1358,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
BI != BE; ++BI) {
if (LooksLikeCodeInBug11395(BI)) return false;
- if (Value *Addr = isInterestingMemoryAccess(BI, &IsWrite)) {
+ if (Value *Addr = isInterestingMemoryAccess(BI, &IsWrite, &Alignment)) {
if (ClOpt && ClOptSameTemp) {
if (!TempsToInstrument.insert(Addr))
continue; // We've seen this temp in the current BB.
@@ -1294,7 +1367,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
isInterestingPointerComparisonOrSubtraction(BI)) {
PointerComparisonsOrSubtracts.push_back(BI);
continue;
- } else if (isa<MemIntrinsic>(BI) && ClMemIntrin) {
+ } else if (isa<MemIntrinsic>(BI)) {
// ok, take it.
} else {
if (isa<AllocaInst>(BI))
@@ -1315,7 +1388,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
}
}
- Function *UninstrumentedDuplicate = 0;
+ Function *UninstrumentedDuplicate = nullptr;
bool LikelyToInstrument =
!NoReturnCalls.empty() || !ToInstrument.empty() || (NumAllocas > 0);
if (ClKeepUninstrumented && LikelyToInstrument) {
@@ -1326,14 +1399,19 @@ bool AddressSanitizer::runOnFunction(Function &F) {
F.getParent()->getFunctionList().push_back(UninstrumentedDuplicate);
}
+ bool UseCalls = false;
+ if (ClInstrumentationWithCallsThreshold >= 0 &&
+ ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold)
+ UseCalls = true;
+
// Instrument.
int NumInstrumented = 0;
for (size_t i = 0, n = ToInstrument.size(); i != n; i++) {
Instruction *Inst = ToInstrument[i];
if (ClDebugMin < 0 || ClDebugMax < 0 ||
(NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
- if (isInterestingMemoryAccess(Inst, &IsWrite))
- instrumentMop(Inst);
+ if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment))
+ instrumentMop(Inst, UseCalls);
else
instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
}
@@ -1464,12 +1542,23 @@ void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined(
}
}
+static DebugLoc getFunctionEntryDebugLocation(Function &F) {
+ BasicBlock::iterator I = F.getEntryBlock().begin(),
+ E = F.getEntryBlock().end();
+ for (; I != E; ++I)
+ if (!isa<AllocaInst>(I))
+ break;
+ return I->getDebugLoc();
+}
+
void FunctionStackPoisoner::poisonStack() {
int StackMallocIdx = -1;
+ DebugLoc EntryDebugLocation = getFunctionEntryDebugLocation(F);
assert(AllocaVec.size() > 0);
Instruction *InsBefore = AllocaVec[0];
IRBuilder<> IRB(InsBefore);
+ IRB.SetCurrentDebugLocation(EntryDebugLocation);
SmallVector<ASanStackVariableDescription, 16> SVD;
SVD.reserve(AllocaVec.size());
@@ -1493,6 +1582,7 @@ void FunctionStackPoisoner::poisonStack() {
Type *ByteArrayTy = ArrayType::get(IRB.getInt8Ty(), LocalStackSize);
AllocaInst *MyAlloca =
new AllocaInst(ByteArrayTy, "MyAlloca", InsBefore);
+ MyAlloca->setDebugLoc(EntryDebugLocation);
assert((ClRealignStack & (ClRealignStack - 1)) == 0);
size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack);
MyAlloca->setAlignment(FrameAlignment);
@@ -1513,11 +1603,13 @@ void FunctionStackPoisoner::poisonStack() {
Instruction *Term = SplitBlockAndInsertIfThen(Cmp, InsBefore, false);
BasicBlock *CmpBlock = cast<Instruction>(Cmp)->getParent();
IRBuilder<> IRBIf(Term);
+ IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
LocalStackBase = IRBIf.CreateCall2(
AsanStackMallocFunc[StackMallocIdx],
ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase);
BasicBlock *SetBlock = cast<Instruction>(LocalStackBase)->getParent();
IRB.SetInsertPoint(InsBefore);
+ IRB.SetCurrentDebugLocation(EntryDebugLocation);
PHINode *Phi = IRB.CreatePHI(IntptrTy, 2);
Phi->addIncoming(OrigStackBase, CmpBlock);
Phi->addIncoming(LocalStackBase, SetBlock);
@@ -1654,7 +1746,7 @@ void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
if (AllocaInst *AI = dyn_cast<AllocaInst>(V))
// We're intested only in allocas we can handle.
- return isInterestingAlloca(*AI) ? AI : 0;
+ return isInterestingAlloca(*AI) ? AI : nullptr;
// See if we've already calculated (or started to calculate) alloca for a
// given value.
AllocaForValueMapTy::iterator I = AllocaForValue.find(V);
@@ -1662,8 +1754,8 @@ AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
return I->second;
// Store 0 while we're calculating alloca for value V to avoid
// infinite recursion if the value references itself.
- AllocaForValue[V] = 0;
- AllocaInst *Res = 0;
+ AllocaForValue[V] = nullptr;
+ AllocaInst *Res = nullptr;
if (CastInst *CI = dyn_cast<CastInst>(V))
Res = findAllocaForValue(CI->getOperand(0));
else if (PHINode *PN = dyn_cast<PHINode>(V)) {
@@ -1673,12 +1765,12 @@ AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
if (IncValue == PN) continue;
AllocaInst *IncValueAI = findAllocaForValue(IncValue);
// AI for incoming values should exist and should all be equal.
- if (IncValueAI == 0 || (Res != 0 && IncValueAI != Res))
- return 0;
+ if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res))
+ return nullptr;
Res = IncValueAI;
}
}
- if (Res != 0)
+ if (Res)
AllocaForValue[V] = Res;
return Res;
}