diff options
Diffstat (limited to 'lib/Transforms/Instrumentation')
-rw-r--r-- | lib/Transforms/Instrumentation/AddressSanitizer.cpp | 818 | ||||
-rw-r--r-- | lib/Transforms/Instrumentation/BoundsChecking.cpp | 26 | ||||
-rw-r--r-- | lib/Transforms/Instrumentation/DataFlowSanitizer.cpp | 24 | ||||
-rw-r--r-- | lib/Transforms/Instrumentation/GCOVProfiling.cpp | 39 | ||||
-rw-r--r-- | lib/Transforms/Instrumentation/MemorySanitizer.cpp | 57 | ||||
-rw-r--r-- | lib/Transforms/Instrumentation/SanitizerCoverage.cpp | 157 | ||||
-rw-r--r-- | lib/Transforms/Instrumentation/ThreadSanitizer.cpp | 62 |
7 files changed, 686 insertions, 497 deletions
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 882aab0..978c857 100644 --- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -24,6 +24,9 @@ #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Triple.h" +#include "llvm/Analysis/MemoryBuiltins.h" +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DataLayout.h" @@ -43,12 +46,14 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/Endian.h" #include "llvm/Support/SwapByteOrder.h" +#include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/ModuleUtils.h" +#include "llvm/Transforms/Utils/PromoteMemToReg.h" #include <algorithm> #include <string> #include <system_error> @@ -70,17 +75,15 @@ static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; static const uint64_t kWindowsShadowOffset32 = 3ULL << 28; -static const size_t kMinStackMallocSize = 1 << 6; // 64B +static const size_t kMinStackMallocSize = 1 << 6; // 64B static const size_t kMaxStackMallocSize = 1 << 16; // 64K static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; static const char *const kAsanModuleCtorName = "asan.module_ctor"; static const char *const kAsanModuleDtorName = "asan.module_dtor"; -static const uint64_t kAsanCtorAndDtorPriority = 1; +static const uint64_t kAsanCtorAndDtorPriority = 1; static const char *const kAsanReportErrorTemplate = "__asan_report_"; -static const char *const kAsanReportLoadN = "__asan_report_load_n"; -static const char *const kAsanReportStoreN = "__asan_report_store_n"; static const char *const kAsanRegisterGlobalsName = "__asan_register_globals"; static const char *const kAsanUnregisterGlobalsName = "__asan_unregister_globals"; @@ -90,7 +93,7 @@ static const char *const kAsanInitName = "__asan_init_v5"; static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp"; static const char *const kAsanPtrSub = "__sanitizer_ptr_sub"; static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return"; -static const int kMaxAsanStackMallocSizeClass = 10; +static const int kMaxAsanStackMallocSizeClass = 10; static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_"; static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_"; static const char *const kAsanGenPrefix = "__asan_gen_"; @@ -103,10 +106,6 @@ static const char *const kAsanUnpoisonStackMemoryName = static const char *const kAsanOptionDetectUAR = "__asan_option_detect_stack_use_after_return"; -#ifndef NDEBUG -static const int kAsanStackAfterReturnMagic = 0xf5; -#endif - // Accesses sizes are powers of two: 1, 2, 4, 8, 16. static const size_t kNumberOfAccessSizes = 5; @@ -120,84 +119,110 @@ static const unsigned kAsanAllocaPartialVal2 = 0x000000cbU; // This flag may need to be replaced with -f[no-]asan-reads. static cl::opt<bool> ClInstrumentReads("asan-instrument-reads", - cl::desc("instrument read instructions"), cl::Hidden, cl::init(true)); -static cl::opt<bool> ClInstrumentWrites("asan-instrument-writes", - cl::desc("instrument write instructions"), cl::Hidden, cl::init(true)); -static cl::opt<bool> ClInstrumentAtomics("asan-instrument-atomics", - cl::desc("instrument atomic instructions (rmw, cmpxchg)"), - cl::Hidden, cl::init(true)); -static cl::opt<bool> ClAlwaysSlowPath("asan-always-slow-path", - cl::desc("use instrumentation with slow path for all accesses"), - cl::Hidden, cl::init(false)); + cl::desc("instrument read instructions"), + cl::Hidden, cl::init(true)); +static cl::opt<bool> ClInstrumentWrites( + "asan-instrument-writes", cl::desc("instrument write instructions"), + cl::Hidden, cl::init(true)); +static cl::opt<bool> ClInstrumentAtomics( + "asan-instrument-atomics", + cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, + cl::init(true)); +static cl::opt<bool> ClAlwaysSlowPath( + "asan-always-slow-path", + cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, + cl::init(false)); // This flag limits the number of instructions to be instrumented // in any given BB. Normally, this should be set to unlimited (INT_MAX), // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary // set it to 10000. -static cl::opt<int> ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", - cl::init(10000), - cl::desc("maximal number of instructions to instrument in any given BB"), - cl::Hidden); +static cl::opt<int> ClMaxInsnsToInstrumentPerBB( + "asan-max-ins-per-bb", cl::init(10000), + cl::desc("maximal number of instructions to instrument in any given BB"), + cl::Hidden); // This flag may need to be replaced with -f[no]asan-stack. -static cl::opt<bool> ClStack("asan-stack", - cl::desc("Handle stack memory"), cl::Hidden, cl::init(true)); +static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"), + cl::Hidden, cl::init(true)); static cl::opt<bool> ClUseAfterReturn("asan-use-after-return", - cl::desc("Check return-after-free"), cl::Hidden, cl::init(true)); + cl::desc("Check return-after-free"), + cl::Hidden, cl::init(true)); // This flag may need to be replaced with -f[no]asan-globals. static cl::opt<bool> ClGlobals("asan-globals", - cl::desc("Handle global objects"), cl::Hidden, cl::init(true)); + cl::desc("Handle global objects"), cl::Hidden, + cl::init(true)); static cl::opt<bool> ClInitializers("asan-initialization-order", - cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true)); -static cl::opt<bool> ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", - cl::desc("Instrument <, <=, >, >=, - with pointer operands"), - cl::Hidden, cl::init(false)); -static cl::opt<unsigned> ClRealignStack("asan-realign-stack", - cl::desc("Realign stack to the value of this flag (power of two)"), - cl::Hidden, cl::init(32)); + cl::desc("Handle C++ initializer order"), + cl::Hidden, cl::init(true)); +static cl::opt<bool> ClInvalidPointerPairs( + "asan-detect-invalid-pointer-pair", + cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, + cl::init(false)); +static cl::opt<unsigned> ClRealignStack( + "asan-realign-stack", + cl::desc("Realign stack to the value of this flag (power of two)"), + cl::Hidden, cl::init(32)); static cl::opt<int> ClInstrumentationWithCallsThreshold( "asan-instrumentation-with-call-threshold", - cl::desc("If the function being instrumented contains more than " - "this number of memory accesses, use callbacks instead of " - "inline checks (-1 means never use callbacks)."), - cl::Hidden, cl::init(7000)); + cl::desc( + "If the function being instrumented contains more than " + "this number of memory accesses, use callbacks instead of " + "inline checks (-1 means never use callbacks)."), + cl::Hidden, cl::init(7000)); static cl::opt<std::string> ClMemoryAccessCallbackPrefix( - "asan-memory-access-callback-prefix", - cl::desc("Prefix for memory access callbacks"), cl::Hidden, - cl::init("__asan_")); + "asan-memory-access-callback-prefix", + cl::desc("Prefix for memory access callbacks"), cl::Hidden, + cl::init("__asan_")); static cl::opt<bool> ClInstrumentAllocas("asan-instrument-allocas", - cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(false)); + cl::desc("instrument dynamic allocas"), + cl::Hidden, cl::init(false)); +static cl::opt<bool> ClSkipPromotableAllocas( + "asan-skip-promotable-allocas", + cl::desc("Do not instrument promotable allocas"), cl::Hidden, + cl::init(true)); // These flags allow to change the shadow mapping. // The shadow mapping looks like // Shadow = (Mem >> scale) + (1 << offset_log) static cl::opt<int> ClMappingScale("asan-mapping-scale", - cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0)); + cl::desc("scale of asan shadow mapping"), + cl::Hidden, cl::init(0)); // Optimization flags. Not user visible, used mostly for testing // and benchmarking the tool. -static cl::opt<bool> ClOpt("asan-opt", - cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true)); -static cl::opt<bool> ClOptSameTemp("asan-opt-same-temp", - cl::desc("Instrument the same temp just once"), cl::Hidden, - cl::init(true)); +static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"), + cl::Hidden, cl::init(true)); +static cl::opt<bool> ClOptSameTemp( + "asan-opt-same-temp", cl::desc("Instrument the same temp just once"), + cl::Hidden, cl::init(true)); static cl::opt<bool> ClOptGlobals("asan-opt-globals", - cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true)); + cl::desc("Don't instrument scalar globals"), + cl::Hidden, cl::init(true)); +static cl::opt<bool> ClOptStack( + "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), + cl::Hidden, cl::init(false)); -static cl::opt<bool> ClCheckLifetime("asan-check-lifetime", - cl::desc("Use llvm.lifetime intrinsics to insert extra checks"), - cl::Hidden, cl::init(false)); +static cl::opt<bool> ClCheckLifetime( + "asan-check-lifetime", + cl::desc("Use llvm.lifetime intrinsics to insert extra checks"), cl::Hidden, + cl::init(false)); static cl::opt<bool> ClDynamicAllocaStack( "asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true)); +static cl::opt<uint32_t> ClForceExperiment( + "asan-force-experiment", + cl::desc("Force optimization experiment (for testing)"), cl::Hidden, + cl::init(0)); + // Debug flags. static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0)); static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0)); -static cl::opt<std::string> ClDebugFunc("asan-debug-func", - cl::Hidden, cl::desc("Debug func")); +static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden, + cl::desc("Debug func")); static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1)); static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"), @@ -207,10 +232,10 @@ STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); STATISTIC(NumInstrumentedDynamicAllocas, "Number of instrumented dynamic allocas"); -STATISTIC(NumOptimizedAccessesToGlobalArray, - "Number of optimized accesses to global arrays"); STATISTIC(NumOptimizedAccessesToGlobalVar, "Number of optimized accesses to global vars"); +STATISTIC(NumOptimizedAccessesToStackVar, + "Number of optimized accesses to stack vars"); namespace { /// Frontend-provided metadata for source location. @@ -238,9 +263,7 @@ struct LocationMetadata { class GlobalsMetadata { public: struct Entry { - Entry() - : SourceLoc(), Name(), IsDynInit(false), - IsBlacklisted(false) {} + Entry() : SourceLoc(), Name(), IsDynInit(false), IsBlacklisted(false) {} LocationMetadata SourceLoc; StringRef Name; bool IsDynInit; @@ -249,19 +272,17 @@ class GlobalsMetadata { GlobalsMetadata() : inited_(false) {} - void init(Module& M) { + void init(Module &M) { assert(!inited_); inited_ = true; NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals"); - if (!Globals) - return; + if (!Globals) return; for (auto MDN : Globals->operands()) { // Metadata node contains the global and the fields of "Entry". assert(MDN->getNumOperands() == 5); auto *GV = mdconst::extract_or_null<GlobalVariable>(MDN->getOperand(0)); // The optimizer may optimize away a global entirely. - if (!GV) - continue; + if (!GV) continue; // We can already have an entry for GV if it was merged with another // global. Entry &E = Entries[GV]; @@ -286,7 +307,7 @@ class GlobalsMetadata { private: bool inited_; - DenseMap<GlobalVariable*, Entry> Entries; + DenseMap<GlobalVariable *, Entry> Entries; }; /// This struct defines the shadow mapping using the rule: @@ -371,17 +392,36 @@ struct AddressSanitizer : public FunctionPass { } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DominatorTreeWrapperPass>(); + AU.addRequired<TargetLibraryInfoWrapperPass>(); } - void instrumentMop(Instruction *I, bool UseCalls); + uint64_t getAllocaSizeInBytes(AllocaInst *AI) const { + Type *Ty = AI->getAllocatedType(); + uint64_t SizeInBytes = + AI->getModule()->getDataLayout().getTypeAllocSize(Ty); + return SizeInBytes; + } + /// Check if we want (and can) handle this alloca. + bool isInterestingAlloca(AllocaInst &AI) const; + /// If it is an interesting memory access, return the PointerOperand + /// and set IsWrite/Alignment. Otherwise return nullptr. + Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite, + uint64_t *TypeSize, + unsigned *Alignment) const; + void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I, + bool UseCalls, const DataLayout &DL); void instrumentPointerComparisonOrSubtraction(Instruction *I); void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, bool IsWrite, - Value *SizeArgument, bool UseCalls); + Value *SizeArgument, bool UseCalls, uint32_t Exp); + void instrumentUnusualSizeOrAlignment(Instruction *I, Value *Addr, + uint32_t TypeSize, bool IsWrite, + Value *SizeArgument, bool UseCalls, + uint32_t Exp); Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, Value *ShadowValue, uint32_t TypeSize); Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, bool IsWrite, size_t AccessSizeIndex, - Value *SizeArgument); + Value *SizeArgument, uint32_t Exp); void instrumentMemIntrinsic(MemIntrinsic *MI); Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); bool runOnFunction(Function &F) override; @@ -396,9 +436,10 @@ struct AddressSanitizer : public FunctionPass { bool LooksLikeCodeInBug11395(Instruction *I); bool GlobalIsLinkerInitialized(GlobalVariable *G); + bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, + uint64_t TypeSize) const; LLVMContext *C; - const DataLayout *DL; Triple TargetTriple; int LongSize; Type *IntptrTy; @@ -408,12 +449,12 @@ struct AddressSanitizer : public FunctionPass { Function *AsanInitFunction; Function *AsanHandleNoReturnFunc; Function *AsanPtrCmpFunction, *AsanPtrSubFunction; - // This array is indexed by AccessIsWrite and log2(AccessSize). - Function *AsanErrorCallback[2][kNumberOfAccessSizes]; - Function *AsanMemoryAccessCallback[2][kNumberOfAccessSizes]; - // This array is indexed by AccessIsWrite. - Function *AsanErrorCallbackSized[2], - *AsanMemoryAccessCallbackSized[2]; + // This array is indexed by AccessIsWrite, Experiment and log2(AccessSize). + Function *AsanErrorCallback[2][2][kNumberOfAccessSizes]; + Function *AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes]; + // This array is indexed by AccessIsWrite and Experiment. + Function *AsanErrorCallbackSized[2][2]; + Function *AsanMemoryAccessCallbackSized[2][2]; Function *AsanMemmove, *AsanMemcpy, *AsanMemset; InlineAsm *EmptyAsm; GlobalsMetadata GlobalsMD; @@ -426,9 +467,7 @@ class AddressSanitizerModule : public ModulePass { AddressSanitizerModule() : ModulePass(ID) {} bool runOnModule(Module &M) override; static char ID; // Pass identification, replacement for typeid - const char *getPassName() const override { - return "AddressSanitizerModule"; - } + const char *getPassName() const override { return "AddressSanitizerModule"; } private: void initializeCallbacks(Module &M); @@ -444,7 +483,6 @@ class AddressSanitizerModule : public ModulePass { GlobalsMetadata GlobalsMD; Type *IntptrTy; LLVMContext *C; - const DataLayout *DL; Triple TargetTriple; ShadowMapping Mapping; Function *AsanPoisonGlobals; @@ -471,12 +509,12 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { Type *IntptrPtrTy; ShadowMapping Mapping; - SmallVector<AllocaInst*, 16> AllocaVec; - SmallVector<Instruction*, 8> RetVec; + SmallVector<AllocaInst *, 16> AllocaVec; + SmallVector<Instruction *, 8> RetVec; unsigned StackAlignment; Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1], - *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; + *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc; // Stores a place and arguments of poisoning/unpoisoning call for alloca. @@ -497,33 +535,38 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { Value *LeftRzAddr; Value *RightRzAddr; bool Poison; - explicit DynamicAllocaCall(AllocaInst *AI, - Value *LeftRzAddr = nullptr, - Value *RightRzAddr = nullptr) - : AI(AI), LeftRzAddr(LeftRzAddr), RightRzAddr(RightRzAddr), Poison(true) - {} + explicit DynamicAllocaCall(AllocaInst *AI, Value *LeftRzAddr = nullptr, + Value *RightRzAddr = nullptr) + : AI(AI), + LeftRzAddr(LeftRzAddr), + RightRzAddr(RightRzAddr), + Poison(true) {} }; SmallVector<DynamicAllocaCall, 1> DynamicAllocaVec; // Maps Value to an AllocaInst from which the Value is originated. - typedef DenseMap<Value*, AllocaInst*> AllocaForValueMapTy; + typedef DenseMap<Value *, AllocaInst *> AllocaForValueMapTy; AllocaForValueMapTy AllocaForValue; bool HasNonEmptyInlineAsm; std::unique_ptr<CallInst> EmptyInlineAsm; FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) - : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false), - C(ASan.C), IntptrTy(ASan.IntptrTy), - IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), - StackAlignment(1 << Mapping.Scale), HasNonEmptyInlineAsm(false), + : F(F), + ASan(ASan), + DIB(*F.getParent(), /*AllowUnresolved*/ false), + C(ASan.C), + IntptrTy(ASan.IntptrTy), + IntptrPtrTy(PointerType::get(IntptrTy, 0)), + Mapping(ASan.Mapping), + StackAlignment(1 << Mapping.Scale), + HasNonEmptyInlineAsm(false), EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {} bool runOnFunction() { if (!ClStack) return false; // Collect alloca, ret, lifetime instructions etc. - for (BasicBlock *BB : depth_first(&F.getEntryBlock())) - visit(*BB); + for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB); if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false; @@ -544,33 +587,31 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { // ----------------------- Visitors. /// \brief Collect all Ret instructions. - void visitReturnInst(ReturnInst &RI) { - RetVec.push_back(&RI); - } + void visitReturnInst(ReturnInst &RI) { RetVec.push_back(&RI); } // Unpoison dynamic allocas redzones. void unpoisonDynamicAlloca(DynamicAllocaCall &AllocaCall) { - if (!AllocaCall.Poison) - return; + if (!AllocaCall.Poison) return; for (auto Ret : RetVec) { IRBuilder<> IRBRet(Ret); PointerType *Int32PtrTy = PointerType::getUnqual(IRBRet.getInt32Ty()); Value *Zero = Constant::getNullValue(IRBRet.getInt32Ty()); Value *PartialRzAddr = IRBRet.CreateSub(AllocaCall.RightRzAddr, ConstantInt::get(IntptrTy, 4)); - IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(AllocaCall.LeftRzAddr, - Int32PtrTy)); - IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(PartialRzAddr, - Int32PtrTy)); - IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(AllocaCall.RightRzAddr, - Int32PtrTy)); + IRBRet.CreateStore( + Zero, IRBRet.CreateIntToPtr(AllocaCall.LeftRzAddr, Int32PtrTy)); + IRBRet.CreateStore(Zero, + IRBRet.CreateIntToPtr(PartialRzAddr, Int32PtrTy)); + IRBRet.CreateStore( + Zero, IRBRet.CreateIntToPtr(AllocaCall.RightRzAddr, Int32PtrTy)); } } // Right shift for BigEndian and left shift for LittleEndian. Value *shiftAllocaMagic(Value *Val, IRBuilder<> &IRB, Value *Shift) { - return ASan.DL->isLittleEndian() ? IRB.CreateShl(Val, Shift) - : IRB.CreateLShr(Val, Shift); + auto &DL = F.getParent()->getDataLayout(); + return DL.isLittleEndian() ? IRB.CreateShl(Val, Shift) + : IRB.CreateLShr(Val, Shift); } // Compute PartialRzMagic for dynamic alloca call. Since we don't know the @@ -599,7 +640,7 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { /// \brief Collect Alloca instructions we want (and can) handle. void visitAllocaInst(AllocaInst &AI) { - if (!isInterestingAlloca(AI)) return; + if (!ASan.isInterestingAlloca(AI)) return; StackAlignment = std::max(StackAlignment, AI.getAlignment()); if (isDynamicAlloca(AI)) @@ -613,8 +654,7 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { void visitIntrinsicInst(IntrinsicInst &II) { if (!ClCheckLifetime) return; Intrinsic::ID ID = II.getIntrinsicID(); - if (ID != Intrinsic::lifetime_start && - ID != Intrinsic::lifetime_end) + if (ID != Intrinsic::lifetime_start && ID != Intrinsic::lifetime_end) return; // Found lifetime intrinsic, add ASan instrumentation if necessary. ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0)); @@ -644,8 +684,7 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { bool doesDominateAllExits(const Instruction *I) const { for (auto Ret : RetVec) { - if (!ASan.getDominatorTree().dominates(I, Ret)) - return false; + if (!ASan.getDominatorTree().dominates(I, Ret)) return false; } return true; } @@ -653,19 +692,6 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { bool isDynamicAlloca(AllocaInst &AI) const { return AI.isArrayAllocation() || !AI.isStaticAlloca(); } - - // Check if we want (and can) handle this alloca. - bool isInterestingAlloca(AllocaInst &AI) const { - return (AI.getAllocatedType()->isSized() && - // alloca() may be called with 0 size, ignore it. - getAllocaSizeInBytes(&AI) > 0); - } - - uint64_t getAllocaSizeInBytes(AllocaInst *AI) const { - Type *Ty = AI->getAllocatedType(); - uint64_t SizeInBytes = ASan.DL->getTypeAllocSize(Ty); - return SizeInBytes; - } /// Finds alloca where the value comes from. AllocaInst *findAllocaForValue(Value *V); void poisonRedZones(ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB, @@ -683,21 +709,25 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { } // namespace char AddressSanitizer::ID = 0; -INITIALIZE_PASS_BEGIN(AddressSanitizer, "asan", - "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", - false, false) +INITIALIZE_PASS_BEGIN( + AddressSanitizer, "asan", + "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, + false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_END(AddressSanitizer, "asan", - "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", - false, false) +INITIALIZE_PASS_END( + AddressSanitizer, "asan", + "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, + false) FunctionPass *llvm::createAddressSanitizerFunctionPass() { return new AddressSanitizer(); } char AddressSanitizerModule::ID = 0; -INITIALIZE_PASS(AddressSanitizerModule, "asan-module", +INITIALIZE_PASS( + AddressSanitizerModule, "asan-module", "AddressSanitizer: detects use-after-free and out-of-bounds bugs." - "ModulePass", false, false) + "ModulePass", + false, false) ModulePass *llvm::createAddressSanitizerModulePass() { return new AddressSanitizerModule(); } @@ -709,16 +739,15 @@ static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { } // \brief Create a constant for Str so that we can pass it to the run-time lib. -static GlobalVariable *createPrivateGlobalForString( - Module &M, StringRef Str, bool AllowMerging) { +static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str, + bool AllowMerging) { Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); // We use private linkage for module-local strings. If they can be merged // with another one, we set the unnamed_addr attribute. GlobalVariable *GV = new GlobalVariable(M, StrConst->getType(), true, GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix); - if (AllowMerging) - GV->setUnnamedAddr(true); + if (AllowMerging) GV->setUnnamedAddr(true); GV->setAlignment(1); // Strings may not be merged w/o setting align 1. return GV; } @@ -747,8 +776,7 @@ static bool GlobalWasGeneratedByAsan(GlobalVariable *G) { Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { // Shadow >> scale Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); - if (Mapping.Offset == 0) - return Shadow; + if (Mapping.Offset == 0) return Shadow; // (Shadow >> scale) | offset if (Mapping.OrShadowOffset) return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset)); @@ -775,38 +803,61 @@ void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { MI->eraseFromParent(); } -// If I is an interesting memory access, return the PointerOperand -// and set IsWrite/Alignment. Otherwise return nullptr. -static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite, - unsigned *Alignment) { +/// Check if we want (and can) handle this alloca. +bool AddressSanitizer::isInterestingAlloca(AllocaInst &AI) const { + return (AI.getAllocatedType()->isSized() && + // alloca() may be called with 0 size, ignore it. + getAllocaSizeInBytes(&AI) > 0 && + // We are only interested in allocas not promotable to registers. + // Promotable allocas are common under -O0. + (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI))); +} + +/// If I is an interesting memory access, return the PointerOperand +/// and set IsWrite/Alignment. Otherwise return nullptr. +Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I, + bool *IsWrite, + uint64_t *TypeSize, + unsigned *Alignment) const { // Skip memory accesses inserted by another instrumentation. - if (I->getMetadata("nosanitize")) - return nullptr; + if (I->getMetadata("nosanitize")) return nullptr; + + Value *PtrOperand = nullptr; + const DataLayout &DL = I->getModule()->getDataLayout(); if (LoadInst *LI = dyn_cast<LoadInst>(I)) { if (!ClInstrumentReads) return nullptr; *IsWrite = false; + *TypeSize = DL.getTypeStoreSizeInBits(LI->getType()); *Alignment = LI->getAlignment(); - return LI->getPointerOperand(); - } - if (StoreInst *SI = dyn_cast<StoreInst>(I)) { + PtrOperand = LI->getPointerOperand(); + } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { if (!ClInstrumentWrites) return nullptr; *IsWrite = true; + *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType()); *Alignment = SI->getAlignment(); - return SI->getPointerOperand(); - } - if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { + PtrOperand = SI->getPointerOperand(); + } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { if (!ClInstrumentAtomics) return nullptr; *IsWrite = true; + *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType()); *Alignment = 0; - return RMW->getPointerOperand(); - } - if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { + PtrOperand = RMW->getPointerOperand(); + } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { if (!ClInstrumentAtomics) return nullptr; *IsWrite = true; + *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType()); *Alignment = 0; - return XCHG->getPointerOperand(); + PtrOperand = XCHG->getPointerOperand(); } - return nullptr; + + // Treat memory accesses to promotable allocas as non-interesting since they + // will not cause memory violations. This greatly speeds up the instrumented + // executable at -O0. + if (ClSkipPromotableAllocas) + if (auto AI = dyn_cast_or_null<AllocaInst>(PtrOperand)) + return isInterestingAlloca(*AI) ? AI : nullptr; + + return PtrOperand; } static bool isPointerOperand(Value *V) { @@ -818,17 +869,15 @@ static bool isPointerOperand(Value *V) { // the frontend. static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) { if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) { - if (!Cmp->isRelational()) - return false; + if (!Cmp->isRelational()) return false; } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { - if (BO->getOpcode() != Instruction::Sub) - return false; + if (BO->getOpcode() != Instruction::Sub) return false; } else { return false; } if (!isPointerOperand(I->getOperand(0)) || !isPointerOperand(I->getOperand(1))) - return false; + return false; return true; } @@ -839,8 +888,8 @@ bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) { return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit; } -void -AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) { +void AddressSanitizer::instrumentPointerComparisonOrSubtraction( + Instruction *I) { IRBuilder<> IRB(I); Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; @@ -851,38 +900,47 @@ AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) { IRB.CreateCall2(F, Param[0], Param[1]); } -void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) { +void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, + Instruction *I, bool UseCalls, + const DataLayout &DL) { bool IsWrite = false; unsigned Alignment = 0; - Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &Alignment); + uint64_t TypeSize = 0; + Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment); assert(Addr); + + // Optimization experiments. + // The experiments can be used to evaluate potential optimizations that remove + // instrumentation (assess false negatives). Instead of completely removing + // some instrumentation, you set Exp to a non-zero value (mask of optimization + // experiments that want to remove instrumentation of this instruction). + // If Exp is non-zero, this pass will emit special calls into runtime + // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls + // make runtime terminate the program in a special way (with a different + // exit status). Then you run the new compiler on a buggy corpus, collect + // the special terminations (ideally, you don't see them at all -- no false + // negatives) and make the decision on the optimization. + uint32_t Exp = ClForceExperiment; + if (ClOpt && ClOptGlobals) { - if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) { - // If initialization order checking is disabled, a simple access to a - // dynamically initialized global is always valid. - if (!ClInitializers || GlobalIsLinkerInitialized(G)) { - NumOptimizedAccessesToGlobalVar++; - return; - } - } - ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr); - if (CE && CE->isGEPWithNoNotionalOverIndexing()) { - if (GlobalVariable *G = dyn_cast<GlobalVariable>(CE->getOperand(0))) { - if (CE->getOperand(1)->isNullValue() && GlobalIsLinkerInitialized(G)) { - NumOptimizedAccessesToGlobalArray++; - return; - } - } + // If initialization order checking is disabled, a simple access to a + // dynamically initialized global is always valid. + GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL)); + if (G != NULL && (!ClInitializers || GlobalIsLinkerInitialized(G)) && + isSafeAccess(ObjSizeVis, Addr, TypeSize)) { + NumOptimizedAccessesToGlobalVar++; + return; } } - Type *OrigPtrTy = Addr->getType(); - Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType(); - - assert(OrigTy->isSized()); - uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy); - - assert((TypeSize % 8) == 0); + if (ClOpt && ClOptStack) { + // A direct inbounds access to a stack variable is always valid. + if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) && + isSafeAccess(ObjSizeVis, Addr, TypeSize)) { + NumOptimizedAccessesToStackVar++; + return; + } + } if (IsWrite) NumInstrumentedWrites++; @@ -895,23 +953,10 @@ void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) { if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || TypeSize == 128) && (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8)) - return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls); - // Instrument unusual size or unusual alignment. - // We can not do it with a single check, so we do 1-byte check for the first - // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able - // to report the actual access size. - IRBuilder<> IRB(I); - Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); - Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); - if (UseCalls) { - IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite], AddrLong, Size); - } else { - Value *LastByte = IRB.CreateIntToPtr( - IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), - OrigPtrTy); - instrumentAddress(I, I, Addr, 8, IsWrite, Size, false); - instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false); - } + return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls, + Exp); + instrumentUnusualSizeOrAlignment(I, Addr, TypeSize, IsWrite, nullptr, + UseCalls, Exp); } // Validate the result of Module::getOrInsertFunction called for an interface @@ -921,17 +966,34 @@ void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) { static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { if (isa<Function>(FuncOrBitcast)) return cast<Function>(FuncOrBitcast); FuncOrBitcast->dump(); - report_fatal_error("trying to redefine an AddressSanitizer " - "interface function"); + report_fatal_error( + "trying to redefine an AddressSanitizer " + "interface function"); } -Instruction *AddressSanitizer::generateCrashCode( - Instruction *InsertBefore, Value *Addr, - bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument) { +Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore, + Value *Addr, bool IsWrite, + size_t AccessSizeIndex, + Value *SizeArgument, + uint32_t Exp) { IRBuilder<> IRB(InsertBefore); - CallInst *Call = SizeArgument - ? IRB.CreateCall2(AsanErrorCallbackSized[IsWrite], Addr, SizeArgument) - : IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex], Addr); + Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp); + CallInst *Call = nullptr; + if (SizeArgument) { + if (Exp == 0) + Call = IRB.CreateCall2(AsanErrorCallbackSized[IsWrite][0], Addr, + SizeArgument); + else + Call = IRB.CreateCall3(AsanErrorCallbackSized[IsWrite][1], Addr, + SizeArgument, ExpVal); + } else { + if (Exp == 0) + Call = + IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr); + else + Call = IRB.CreateCall2(AsanErrorCallback[IsWrite][1][AccessSizeIndex], + Addr, ExpVal); + } // We don't do Call->setDoesNotReturn() because the BB already has // UnreachableInst at the end. @@ -941,19 +1003,19 @@ Instruction *AddressSanitizer::generateCrashCode( } Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, - Value *ShadowValue, - uint32_t TypeSize) { + Value *ShadowValue, + uint32_t TypeSize) { size_t Granularity = 1 << Mapping.Scale; // Addr & (Granularity - 1) - Value *LastAccessedByte = IRB.CreateAnd( - AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); + Value *LastAccessedByte = + IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); // (Addr & (Granularity - 1)) + size - 1 if (TypeSize / 8 > 1) LastAccessedByte = IRB.CreateAdd( LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); // (uint8_t) ((Addr & (Granularity-1)) + size - 1) - LastAccessedByte = IRB.CreateIntCast( - LastAccessedByte, ShadowValue->getType(), false); + LastAccessedByte = + IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false); // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); } @@ -961,24 +1023,29 @@ Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, void AddressSanitizer::instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, bool IsWrite, - Value *SizeArgument, bool UseCalls) { + Value *SizeArgument, bool UseCalls, + uint32_t Exp) { IRBuilder<> IRB(InsertBefore); Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); if (UseCalls) { - IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][AccessSizeIndex], - AddrLong); + if (Exp == 0) + IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], + AddrLong); + else + IRB.CreateCall2(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], + AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)); return; } - Type *ShadowTy = IntegerType::get( - *C, std::max(8U, TypeSize >> Mapping.Scale)); + Type *ShadowTy = + IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale)); Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); Value *ShadowPtr = memToShadow(AddrLong, IRB); Value *CmpVal = Constant::getNullValue(ShadowTy); - Value *ShadowValue = IRB.CreateLoad( - IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); + Value *ShadowValue = + IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); size_t Granularity = 1 << Mapping.Scale; @@ -987,9 +1054,8 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns, if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { // We use branch weights for the slow path check, to indicate that the slow // path is rarely taken. This seems to be the case for SPEC benchmarks. - TerminatorInst *CheckTerm = - SplitBlockAndInsertIfThen(Cmp, InsertBefore, false, - MDBuilder(*C).createBranchWeights(1, 100000)); + TerminatorInst *CheckTerm = SplitBlockAndInsertIfThen( + Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000)); assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional()); BasicBlock *NextBB = CheckTerm->getSuccessor(0); IRB.SetInsertPoint(CheckTerm); @@ -1003,11 +1069,37 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns, CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, true); } - Instruction *Crash = generateCrashCode( - CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument); + Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite, + AccessSizeIndex, SizeArgument, Exp); Crash->setDebugLoc(OrigIns->getDebugLoc()); } +// Instrument unusual size or unusual alignment. +// We can not do it with a single check, so we do 1-byte check for the first +// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able +// to report the actual access size. +void AddressSanitizer::instrumentUnusualSizeOrAlignment( + Instruction *I, Value *Addr, uint32_t TypeSize, bool IsWrite, + Value *SizeArgument, bool UseCalls, uint32_t Exp) { + IRBuilder<> IRB(I); + Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); + Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); + if (UseCalls) { + if (Exp == 0) + IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite][0], AddrLong, + Size); + else + IRB.CreateCall3(AsanMemoryAccessCallbackSized[IsWrite][1], AddrLong, Size, + ConstantInt::get(IRB.getInt32Ty(), Exp)); + } else { + Value *LastByte = IRB.CreateIntToPtr( + IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), + Addr->getType()); + instrumentAddress(I, I, Addr, 8, IsWrite, Size, false, Exp); + instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false, Exp); + } +} + void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName) { // Set up the arguments to our poison/unpoison functions. @@ -1029,12 +1121,11 @@ void AddressSanitizerModule::createInitializerPoisonCalls( ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); for (Use &OP : CA->operands()) { - if (isa<ConstantAggregateZero>(OP)) - continue; + if (isa<ConstantAggregateZero>(OP)) continue; ConstantStruct *CS = cast<ConstantStruct>(OP); // Must have a function or null ptr. - if (Function* F = dyn_cast<Function>(CS->getOperand(1))) { + if (Function *F = dyn_cast<Function>(CS->getOperand(1))) { if (F->getName() == kAsanModuleCtorName) continue; ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0)); // Don't instrument CTORs that will run before asan.module_ctor. @@ -1059,13 +1150,11 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { G->getLinkage() != GlobalVariable::PrivateLinkage && G->getLinkage() != GlobalVariable::InternalLinkage) return false; - if (G->hasComdat()) - return false; + if (G->hasComdat()) return false; // Two problems with thread-locals: // - The address of the main thread's copy can't be computed at link-time. // - Need to poison all copies, not just the main thread's one. - if (G->isThreadLocal()) - return false; + if (G->isThreadLocal()) return false; // For now, just ignore this Global if the alignment is large. if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false; @@ -1076,10 +1165,8 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { StringRef ParsedSegment, ParsedSection; unsigned TAA = 0, StubSize = 0; bool TAAParsed; - std::string ErrorCode = - MCSectionMachO::ParseSectionSpecifier(Section, ParsedSegment, - ParsedSection, TAA, TAAParsed, - StubSize); + std::string ErrorCode = MCSectionMachO::ParseSectionSpecifier( + Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize); if (!ErrorCode.empty()) { report_fatal_error("Invalid section specifier '" + ParsedSection + "': " + ErrorCode + "."); @@ -1140,12 +1227,11 @@ void AddressSanitizerModule::initializeCallbacks(Module &M) { AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage); // Declare functions that register/unregister globals. AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( - kAsanRegisterGlobalsName, IRB.getVoidTy(), - IntptrTy, IntptrTy, nullptr)); + kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); AsanRegisterGlobals->setLinkage(Function::ExternalLinkage); - AsanUnregisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( - kAsanUnregisterGlobalsName, - IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); + AsanUnregisterGlobals = checkInterfaceFunction( + M.getOrInsertFunction(kAsanUnregisterGlobalsName, IRB.getVoidTy(), + IntptrTy, IntptrTy, nullptr)); AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage); } @@ -1158,8 +1244,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { SmallVector<GlobalVariable *, 16> GlobalsToChange; for (auto &G : M.globals()) { - if (ShouldInstrumentGlobal(&G)) - GlobalsToChange.push_back(&G); + if (ShouldInstrumentGlobal(&G)) GlobalsToChange.push_back(&G); } size_t n = GlobalsToChange.size(); @@ -1184,8 +1269,9 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { // We shouldn't merge same module names, as this string serves as unique // module ID in runtime. GlobalVariable *ModuleName = createPrivateGlobalForString( - M, M.getModuleIdentifier(), /*AllowMerging*/false); + M, M.getModuleIdentifier(), /*AllowMerging*/ false); + auto &DL = M.getDataLayout(); for (size_t i = 0; i < n; i++) { static const uint64_t kMaxGlobalRedzone = 1 << 18; GlobalVariable *G = GlobalsToChange[i]; @@ -1199,32 +1285,30 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { PointerType *PtrTy = cast<PointerType>(G->getType()); Type *Ty = PtrTy->getElementType(); - uint64_t SizeInBytes = DL->getTypeAllocSize(Ty); + uint64_t SizeInBytes = DL.getTypeAllocSize(Ty); uint64_t MinRZ = MinRedzoneSizeForGlobal(); // MinRZ <= RZ <= kMaxGlobalRedzone // and trying to make RZ to be ~ 1/4 of SizeInBytes. - uint64_t RZ = std::max(MinRZ, - std::min(kMaxGlobalRedzone, - (SizeInBytes / MinRZ / 4) * MinRZ)); + uint64_t RZ = std::max( + MinRZ, std::min(kMaxGlobalRedzone, (SizeInBytes / MinRZ / 4) * MinRZ)); uint64_t RightRedzoneSize = RZ; // Round up to MinRZ - if (SizeInBytes % MinRZ) - RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ); + if (SizeInBytes % MinRZ) RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ); assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0); Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); StructType *NewTy = StructType::get(Ty, RightRedZoneTy, nullptr); - Constant *NewInitializer = ConstantStruct::get( - NewTy, G->getInitializer(), - Constant::getNullValue(RightRedZoneTy), nullptr); + Constant *NewInitializer = + ConstantStruct::get(NewTy, G->getInitializer(), + Constant::getNullValue(RightRedZoneTy), nullptr); // Create a new global variable with enough space for a redzone. GlobalValue::LinkageTypes Linkage = G->getLinkage(); if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage) Linkage = GlobalValue::InternalLinkage; - GlobalVariable *NewGlobal = new GlobalVariable( - M, NewTy, G->isConstant(), Linkage, - NewInitializer, "", G, G->getThreadLocalMode()); + GlobalVariable *NewGlobal = + new GlobalVariable(M, NewTy, G->isConstant(), Linkage, NewInitializer, + "", G, G->getThreadLocalMode()); NewGlobal->copyAttributesFrom(G); NewGlobal->setAlignment(MinRZ); @@ -1253,8 +1337,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { ConstantExpr::getPointerCast(ModuleName, IntptrTy), ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, nullptr); - if (ClInitializers && MD.IsDynInit) - HasDynamicallyInitializedGlobals = true; + if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true; DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); } @@ -1273,9 +1356,9 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { // We also need to unregister globals at the end, e.g. when a shared library // gets closed. - Function *AsanDtorFunction = Function::Create( - FunctionType::get(Type::getVoidTy(*C), false), - GlobalValue::InternalLinkage, kAsanModuleDtorName, &M); + Function *AsanDtorFunction = + Function::Create(FunctionType::get(Type::getVoidTy(*C), false), + GlobalValue::InternalLinkage, kAsanModuleDtorName, &M); BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB)); IRB_Dtor.CreateCall2(AsanUnregisterGlobals, @@ -1288,12 +1371,8 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { } bool AddressSanitizerModule::runOnModule(Module &M) { - DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); - if (!DLP) - return false; - DL = &DLP->getDataLayout(); C = &(M.getContext()); - int LongSize = DL->getPointerSizeInBits(); + int LongSize = M.getDataLayout().getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); TargetTriple = Triple(M.getTargetTriple()); Mapping = getShadowMapping(TargetTriple, LongSize); @@ -1305,8 +1384,7 @@ bool AddressSanitizerModule::runOnModule(Module &M) { assert(CtorFunc); IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator()); - if (ClGlobals) - Changed |= InstrumentGlobals(IRB, M); + if (ClGlobals) Changed |= InstrumentGlobals(IRB, M); return Changed; } @@ -1314,33 +1392,34 @@ bool AddressSanitizerModule::runOnModule(Module &M) { void AddressSanitizer::initializeCallbacks(Module &M) { IRBuilder<> IRB(*C); // Create __asan_report* callbacks. - for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { - for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; - AccessSizeIndex++) { - // IsWrite and TypeSize are encoded in the function name. - std::string Suffix = - (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex); - AsanErrorCallback[AccessIsWrite][AccessSizeIndex] = - checkInterfaceFunction( - M.getOrInsertFunction(kAsanReportErrorTemplate + Suffix, - IRB.getVoidTy(), IntptrTy, nullptr)); - AsanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] = - checkInterfaceFunction( - M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + Suffix, - IRB.getVoidTy(), IntptrTy, nullptr)); + // IsWrite, TypeSize and Exp are encoded in the function name. + for (int Exp = 0; Exp < 2; Exp++) { + for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { + const std::string TypeStr = AccessIsWrite ? "store" : "load"; + const std::string ExpStr = Exp ? "exp_" : ""; + const Type *ExpType = Exp ? Type::getInt32Ty(*C) : nullptr; + AsanErrorCallbackSized[AccessIsWrite][Exp] = + checkInterfaceFunction(M.getOrInsertFunction( + kAsanReportErrorTemplate + ExpStr + TypeStr + "_n", + IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr)); + AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = + checkInterfaceFunction(M.getOrInsertFunction( + ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N", + IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr)); + for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; + AccessSizeIndex++) { + const std::string Suffix = TypeStr + itostr(1 << AccessSizeIndex); + AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] = + checkInterfaceFunction(M.getOrInsertFunction( + kAsanReportErrorTemplate + ExpStr + Suffix, IRB.getVoidTy(), + IntptrTy, ExpType, nullptr)); + AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] = + checkInterfaceFunction(M.getOrInsertFunction( + ClMemoryAccessCallbackPrefix + ExpStr + Suffix, IRB.getVoidTy(), + IntptrTy, ExpType, nullptr)); + } } } - AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction( - kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); - AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction( - kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); - - AsanMemoryAccessCallbackSized[0] = checkInterfaceFunction( - M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "loadN", - IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); - AsanMemoryAccessCallbackSized[1] = checkInterfaceFunction( - M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "storeN", - IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); AsanMemmove = checkInterfaceFunction(M.getOrInsertFunction( ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(), @@ -1368,21 +1447,17 @@ void AddressSanitizer::initializeCallbacks(Module &M) { // virtual bool AddressSanitizer::doInitialization(Module &M) { // Initialize the private fields. No one has accessed them before. - DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); - if (!DLP) - report_fatal_error("data layout missing"); - DL = &DLP->getDataLayout(); GlobalsMD.init(M); C = &(M.getContext()); - LongSize = DL->getPointerSizeInBits(); + LongSize = M.getDataLayout().getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); TargetTriple = Triple(M.getTargetTriple()); - AsanCtorFunction = Function::Create( - FunctionType::get(Type::getVoidTy(*C), false), - GlobalValue::InternalLinkage, kAsanModuleCtorName, &M); + AsanCtorFunction = + Function::Create(FunctionType::get(Type::getVoidTy(*C), false), + GlobalValue::InternalLinkage, kAsanModuleCtorName, &M); BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction); // call __asan_init in the module ctor. IRBuilder<> IRB(ReturnInst::Create(*C, AsanCtorBB)); @@ -1424,22 +1499,21 @@ bool AddressSanitizer::runOnFunction(Function &F) { // If needed, insert __asan_init before checking for SanitizeAddress attr. maybeInsertAsanInitAtFunctionEntry(F); - if (!F.hasFnAttribute(Attribute::SanitizeAddress)) - return false; + if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return false; - if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) - return false; + if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) return false; // We want to instrument every address only once per basic block (unless there // are calls between uses). - SmallSet<Value*, 16> TempsToInstrument; - SmallVector<Instruction*, 16> ToInstrument; - SmallVector<Instruction*, 8> NoReturnCalls; - SmallVector<BasicBlock*, 16> AllBlocks; - SmallVector<Instruction*, 16> PointerComparisonsOrSubtracts; + SmallSet<Value *, 16> TempsToInstrument; + SmallVector<Instruction *, 16> ToInstrument; + SmallVector<Instruction *, 8> NoReturnCalls; + SmallVector<BasicBlock *, 16> AllBlocks; + SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts; int NumAllocas = 0; bool IsWrite; unsigned Alignment; + uint64_t TypeSize; // Fill the set of memory operations to instrument. for (auto &BB : F) { @@ -1448,8 +1522,8 @@ bool AddressSanitizer::runOnFunction(Function &F) { int NumInsnsPerBB = 0; for (auto &Inst : BB) { if (LooksLikeCodeInBug11395(&Inst)) return false; - if (Value *Addr = - isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) { + if (Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize, + &Alignment)) { if (ClOpt && ClOptSameTemp) { if (!TempsToInstrument.insert(Addr).second) continue; // We've seen this temp in the current BB. @@ -1461,21 +1535,18 @@ bool AddressSanitizer::runOnFunction(Function &F) { } else if (isa<MemIntrinsic>(Inst)) { // ok, take it. } else { - if (isa<AllocaInst>(Inst)) - NumAllocas++; + if (isa<AllocaInst>(Inst)) NumAllocas++; CallSite CS(&Inst); if (CS) { // A call inside BB. TempsToInstrument.clear(); - if (CS.doesNotReturn()) - NoReturnCalls.push_back(CS.getInstruction()); + if (CS.doesNotReturn()) NoReturnCalls.push_back(CS.getInstruction()); } continue; } ToInstrument.push_back(&Inst); NumInsnsPerBB++; - if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) - break; + if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break; } } @@ -1484,13 +1555,20 @@ bool AddressSanitizer::runOnFunction(Function &F) { ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold) UseCalls = true; + const TargetLibraryInfo *TLI = + &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); + const DataLayout &DL = F.getParent()->getDataLayout(); + ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), + /*RoundToAlign=*/true); + // Instrument. int NumInstrumented = 0; for (auto Inst : ToInstrument) { if (ClDebugMin < 0 || ClDebugMax < 0 || (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) { - if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment)) - instrumentMop(Inst, UseCalls); + if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment)) + instrumentMop(ObjSizeVis, Inst, UseCalls, + F.getParent()->getDataLayout()); else instrumentMemIntrinsic(cast<MemIntrinsic>(Inst)); } @@ -1549,10 +1627,9 @@ void FunctionStackPoisoner::initializeCallbacks(Module &M) { IntptrTy, IntptrTy, nullptr)); } -void -FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes, - IRBuilder<> &IRB, Value *ShadowBase, - bool DoPoison) { +void FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes, + IRBuilder<> &IRB, Value *ShadowBase, + bool DoPoison) { size_t n = ShadowBytes.size(); size_t i = 0; // We need to (un)poison n bytes of stack shadow. Poison as many as we can @@ -1563,7 +1640,7 @@ FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes, for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) { uint64_t Val = 0; for (size_t j = 0; j < LargeStoreSizeInBytes; j++) { - if (ASan.DL->isLittleEndian()) + if (F.getParent()->getDataLayout().isLittleEndian()) Val |= (uint64_t)ShadowBytes[i + j] << (8 * j); else Val = (Val << 8) | ShadowBytes[i + j]; @@ -1582,9 +1659,8 @@ FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes, static int StackMallocSizeClass(uint64_t LocalStackSize) { assert(LocalStackSize <= kMaxStackMallocSize); uint64_t MaxSize = kMinStackMallocSize; - for (int i = 0; ; i++, MaxSize *= 2) - if (LocalStackSize <= MaxSize) - return i; + for (int i = 0;; i++, MaxSize *= 2) + if (LocalStackSize <= MaxSize) return i; llvm_unreachable("impossible LocalStackSize"); } @@ -1596,18 +1672,21 @@ static int StackMallocSizeClass(uint64_t LocalStackSize) { void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined( IRBuilder<> &IRB, Value *ShadowBase, int Size) { assert(!(Size % 8)); - assert(kAsanStackAfterReturnMagic == 0xf5); + + // kAsanStackAfterReturnMagic is 0xf5. + const uint64_t kAsanStackAfterReturnMagic64 = 0xf5f5f5f5f5f5f5f5ULL; + for (int i = 0; i < Size; i += 8) { Value *p = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); - IRB.CreateStore(ConstantInt::get(IRB.getInt64Ty(), 0xf5f5f5f5f5f5f5f5ULL), - IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo())); + IRB.CreateStore( + ConstantInt::get(IRB.getInt64Ty(), kAsanStackAfterReturnMagic64), + IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo())); } } static DebugLoc getFunctionEntryDebugLocation(Function &F) { for (const auto &Inst : F.getEntryBlock()) - if (!isa<AllocaInst>(Inst)) - return Inst.getDebugLoc(); + if (!isa<AllocaInst>(Inst)) return Inst.getDebugLoc(); return DebugLoc(); } @@ -1664,9 +1743,9 @@ void FunctionStackPoisoner::poisonStack() { SmallVector<ASanStackVariableDescription, 16> SVD; SVD.reserve(AllocaVec.size()); for (AllocaInst *AI : AllocaVec) { - ASanStackVariableDescription D = { AI->getName().data(), - getAllocaSizeInBytes(AI), - AI->getAlignment(), AI, 0}; + ASanStackVariableDescription D = {AI->getName().data(), + ASan.getAllocaSizeInBytes(AI), + AI->getAlignment(), AI, 0}; SVD.push_back(D); } // Minimal header size (left redzone) is 4 pointers, @@ -1757,19 +1836,19 @@ void FunctionStackPoisoner::poisonStack() { BasePlus0); // Write the frame description constant to redzone[1]. Value *BasePlus1 = IRB.CreateIntToPtr( - IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize/8)), - IntptrPtrTy); + IRB.CreateAdd(LocalStackBase, + ConstantInt::get(IntptrTy, ASan.LongSize / 8)), + IntptrPtrTy); GlobalVariable *StackDescriptionGlobal = createPrivateGlobalForString(*F.getParent(), L.DescriptionString, - /*AllowMerging*/true); - Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, - IntptrTy); + /*AllowMerging*/ true); + Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy); IRB.CreateStore(Description, BasePlus1); // Write the PC to redzone[2]. Value *BasePlus2 = IRB.CreateIntToPtr( - IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, - 2 * ASan.LongSize/8)), - IntptrPtrTy); + IRB.CreateAdd(LocalStackBase, + ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)), + IntptrPtrTy); IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); // Poison the stack redzones at the entry. @@ -1830,8 +1909,7 @@ void FunctionStackPoisoner::poisonStack() { } // We are done. Remove the old unused alloca instructions. - for (auto AI : AllocaVec) - AI->eraseFromParent(); + for (auto AI : AllocaVec) AI->eraseFromParent(); } void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, @@ -1839,9 +1917,9 @@ void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, // For now just insert the call to ASan runtime. Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); Value *SizeArg = ConstantInt::get(IntptrTy, Size); - IRB.CreateCall2(DoPoison ? AsanPoisonStackMemoryFunc - : AsanUnpoisonStackMemoryFunc, - AddrArg, SizeArg); + IRB.CreateCall2( + DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc, + AddrArg, SizeArg); } // Handling llvm.lifetime intrinsics for a given %alloca: @@ -1856,12 +1934,11 @@ void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) { if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) // We're intested only in allocas we can handle. - return isInterestingAlloca(*AI) ? AI : nullptr; + return ASan.isInterestingAlloca(*AI) ? AI : nullptr; // See if we've already calculated (or started to calculate) alloca for a // given value. AllocaForValueMapTy::iterator I = AllocaForValue.find(V); - if (I != AllocaForValue.end()) - return I->second; + if (I != AllocaForValue.end()) return I->second; // Store 0 while we're calculating alloca for value V to avoid // infinite recursion if the value references itself. AllocaForValue[V] = nullptr; @@ -1880,8 +1957,7 @@ AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) { Res = IncValueAI; } } - if (Res) - AllocaForValue[V] = Res; + if (Res) AllocaForValue[V] = Res; return Res; } @@ -1912,14 +1988,14 @@ Value *FunctionStackPoisoner::computePartialRzMagic(Value *PartialSize, Value *Shift = IRB.CreateAnd(PartialSize, IRB.getInt32(~7)); unsigned Val1Int = kAsanAllocaPartialVal1; unsigned Val2Int = kAsanAllocaPartialVal2; - if (!ASan.DL->isLittleEndian()) { + if (!F.getParent()->getDataLayout().isLittleEndian()) { Val1Int = sys::getSwappedBytes(Val1Int); Val2Int = sys::getSwappedBytes(Val2Int); } Value *Val1 = shiftAllocaMagic(IRB.getInt32(Val1Int), IRB, Shift); Value *PartialBits = IRB.CreateAnd(PartialSize, IRB.getInt32(7)); // For BigEndian get 0x000000YZ -> 0xYZ000000. - if (ASan.DL->isBigEndian()) + if (F.getParent()->getDataLayout().isBigEndian()) PartialBits = IRB.CreateShl(PartialBits, IRB.getInt32(24)); Value *Val2 = IRB.getInt32(Val2Int); Value *Cond = @@ -1953,7 +2029,8 @@ void FunctionStackPoisoner::handleDynamicAllocaCall( // redzones, and OldSize is number of allocated blocks with // ElementSize size, get allocated memory size in bytes by // OldSize * ElementSize. - unsigned ElementSize = ASan.DL->getTypeAllocSize(AI->getAllocatedType()); + unsigned ElementSize = + F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType()); Value *OldSize = IRB.CreateMul(AI->getArraySize(), ConstantInt::get(IntptrTy, ElementSize)); @@ -2021,3 +2098,20 @@ void FunctionStackPoisoner::handleDynamicAllocaCall( AI->eraseFromParent(); NumInstrumentedDynamicAllocas++; } + +// isSafeAccess returns true if Addr is always inbounds with respect to its +// base object. For example, it is a field access or an array access with +// constant inbounds index. +bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, + Value *Addr, uint64_t TypeSize) const { + SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); + if (!ObjSizeVis.bothKnown(SizeOffset)) return false; + uint64_t Size = SizeOffset.first.getZExtValue(); + int64_t Offset = SizeOffset.second.getSExtValue(); + // Three checks are required to ensure safety: + // . Offset >= 0 (since the offset is given from the base ptr) + // . Size >= Offset (unsigned) + // . Size - Offset >= NeededSize (unsigned) + return Offset >= 0 && Size >= uint64_t(Offset) && + Size - uint64_t(Offset) >= TypeSize / 8; +} diff --git a/lib/Transforms/Instrumentation/BoundsChecking.cpp b/lib/Transforms/Instrumentation/BoundsChecking.cpp index 2b5f39c..8113834 100644 --- a/lib/Transforms/Instrumentation/BoundsChecking.cpp +++ b/lib/Transforms/Instrumentation/BoundsChecking.cpp @@ -16,6 +16,7 @@ #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/TargetFolder.h" +#include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstIterator.h" @@ -24,7 +25,6 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Analysis/TargetLibraryInfo.h" using namespace llvm; #define DEBUG_TYPE "bounds-checking" @@ -49,12 +49,10 @@ namespace { bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.addRequired<DataLayoutPass>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); } private: - const DataLayout *DL; const TargetLibraryInfo *TLI; ObjectSizeOffsetEvaluator *ObjSizeEval; BuilderTy *Builder; @@ -63,7 +61,7 @@ namespace { BasicBlock *getTrapBB(); void emitBranchToTrap(Value *Cmp = nullptr); - bool instrument(Value *Ptr, Value *Val); + bool instrument(Value *Ptr, Value *Val, const DataLayout &DL); }; } @@ -125,8 +123,9 @@ void BoundsChecking::emitBranchToTrap(Value *Cmp) { /// result from the load or the value being stored. It is used to determine the /// size of memory block that is touched. /// Returns true if any change was made to the IR, false otherwise. -bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) { - uint64_t NeededSize = DL->getTypeStoreSize(InstVal->getType()); +bool BoundsChecking::instrument(Value *Ptr, Value *InstVal, + const DataLayout &DL) { + uint64_t NeededSize = DL.getTypeStoreSize(InstVal->getType()); DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize) << " bytes\n"); @@ -141,7 +140,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) { Value *Offset = SizeOffset.second; ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size); - Type *IntTy = DL->getIntPtrType(Ptr->getType()); + Type *IntTy = DL.getIntPtrType(Ptr->getType()); Value *NeededSizeVal = ConstantInt::get(IntTy, NeededSize); // three checks are required to ensure safety: @@ -165,7 +164,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) { } bool BoundsChecking::runOnFunction(Function &F) { - DL = &getAnalysis<DataLayoutPass>().getDataLayout(); + const DataLayout &DL = F.getParent()->getDataLayout(); TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); TrapBB = nullptr; @@ -192,13 +191,16 @@ bool BoundsChecking::runOnFunction(Function &F) { Builder->SetInsertPoint(Inst); if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { - MadeChange |= instrument(LI->getPointerOperand(), LI); + MadeChange |= instrument(LI->getPointerOperand(), LI, DL); } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { - MadeChange |= instrument(SI->getPointerOperand(), SI->getValueOperand()); + MadeChange |= + instrument(SI->getPointerOperand(), SI->getValueOperand(), DL); } else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) { - MadeChange |= instrument(AI->getPointerOperand(),AI->getCompareOperand()); + MadeChange |= + instrument(AI->getPointerOperand(), AI->getCompareOperand(), DL); } else if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst)) { - MadeChange |= instrument(AI->getPointerOperand(), AI->getValOperand()); + MadeChange |= + instrument(AI->getPointerOperand(), AI->getValOperand(), DL); } else { llvm_unreachable("unknown Instruction type"); } diff --git a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp index 6adf0d2..b3925ee 100644 --- a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -217,7 +217,6 @@ class DataFlowSanitizer : public ModulePass { WK_Custom }; - const DataLayout *DL; Module *Mod; LLVMContext *Ctx; IntegerType *ShadowTy; @@ -422,16 +421,13 @@ bool DataFlowSanitizer::doInitialization(Module &M) { bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 || TargetTriple.getArch() == llvm::Triple::mips64el; - DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); - if (!DLP) - report_fatal_error("data layout missing"); - DL = &DLP->getDataLayout(); + const DataLayout &DL = M.getDataLayout(); Mod = &M; Ctx = &M.getContext(); ShadowTy = IntegerType::get(*Ctx, ShadowWidth); ShadowPtrTy = PointerType::getUnqual(ShadowTy); - IntptrTy = DL->getIntPtrType(*Ctx); + IntptrTy = DL.getIntPtrType(*Ctx); ZeroShadow = ConstantInt::getSigned(ShadowTy, 0); ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8); if (IsX86_64) @@ -593,9 +589,6 @@ Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT, } bool DataFlowSanitizer::runOnModule(Module &M) { - if (!DL) - return false; - if (ABIList.isIn(M, "skip")) return false; @@ -1056,7 +1049,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align, uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8; SmallVector<Value *, 2> Objs; - GetUnderlyingObjects(Addr, Objs, DFS.DL); + GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout()); bool AllConstants = true; for (SmallVector<Value *, 2>::iterator i = Objs.begin(), e = Objs.end(); i != e; ++i) { @@ -1157,7 +1150,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align, } void DFSanVisitor::visitLoadInst(LoadInst &LI) { - uint64_t Size = DFSF.DFS.DL->getTypeStoreSize(LI.getType()); + auto &DL = LI.getModule()->getDataLayout(); + uint64_t Size = DL.getTypeStoreSize(LI.getType()); if (Size == 0) { DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow); return; @@ -1167,7 +1161,7 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) { if (ClPreserveAlignment) { Align = LI.getAlignment(); if (Align == 0) - Align = DFSF.DFS.DL->getABITypeAlignment(LI.getType()); + Align = DL.getABITypeAlignment(LI.getType()); } else { Align = 1; } @@ -1235,8 +1229,8 @@ void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align, } void DFSanVisitor::visitStoreInst(StoreInst &SI) { - uint64_t Size = - DFSF.DFS.DL->getTypeStoreSize(SI.getValueOperand()->getType()); + auto &DL = SI.getModule()->getDataLayout(); + uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType()); if (Size == 0) return; @@ -1244,7 +1238,7 @@ void DFSanVisitor::visitStoreInst(StoreInst &SI) { if (ClPreserveAlignment) { Align = SI.getAlignment(); if (Align == 0) - Align = DFSF.DFS.DL->getABITypeAlignment(SI.getValueOperand()->getType()); + Align = DL.getABITypeAlignment(SI.getValueOperand()->getType()); } else { Align = 1; } diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp index cb965fb..a793e69 100644 --- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp +++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp @@ -47,6 +47,8 @@ using namespace llvm; static cl::opt<std::string> DefaultGCOVVersion("default-gcov-version", cl::init("402*"), cl::Hidden, cl::ValueRequired); +static cl::opt<bool> DefaultExitBlockBeforeBody("gcov-exit-block-before-body", + cl::init(false), cl::Hidden); GCOVOptions GCOVOptions::getDefault() { GCOVOptions Options; @@ -55,6 +57,7 @@ GCOVOptions GCOVOptions::getDefault() { Options.UseCfgChecksum = false; Options.NoRedZone = false; Options.FunctionNamesInData = true; + Options.ExitBlockBeforeBody = DefaultExitBlockBeforeBody; if (DefaultGCOVVersion.size() != 4) { llvm::report_fatal_error(std::string("Invalid -default-gcov-version: ") + @@ -70,20 +73,10 @@ namespace { class GCOVProfiler : public ModulePass { public: static char ID; - GCOVProfiler() : ModulePass(ID), Options(GCOVOptions::getDefault()) { - init(); - } - GCOVProfiler(const GCOVOptions &Options) : ModulePass(ID), Options(Options){ + GCOVProfiler() : GCOVProfiler(GCOVOptions::getDefault()) {} + GCOVProfiler(const GCOVOptions &Opts) : ModulePass(ID), Options(Opts) { assert((Options.EmitNotes || Options.EmitData) && "GCOVProfiler asked to do nothing?"); - init(); - } - const char *getPassName() const override { - return "GCOV Profiler"; - } - - private: - void init() { ReversedVersion[0] = Options.Version[3]; ReversedVersion[1] = Options.Version[2]; ReversedVersion[2] = Options.Version[1]; @@ -91,6 +84,11 @@ namespace { ReversedVersion[4] = '\0'; initializeGCOVProfilerPass(*PassRegistry::getPassRegistry()); } + const char *getPassName() const override { + return "GCOV Profiler"; + } + + private: bool runOnModule(Module &M) override; // Create the .gcno files for the Module based on DebugInfo. @@ -312,7 +310,7 @@ namespace { class GCOVFunction : public GCOVRecord { public: GCOVFunction(DISubprogram SP, raw_ostream *os, uint32_t Ident, - bool UseCfgChecksum) + bool UseCfgChecksum, bool ExitBlockBeforeBody) : SP(SP), Ident(Ident), UseCfgChecksum(UseCfgChecksum), CfgChecksum(0), ReturnBlock(1, os) { this->os = os; @@ -322,11 +320,13 @@ namespace { uint32_t i = 0; for (auto &BB : *F) { - // Skip index 1 (0, 2, 3, 4, ...) because that's assigned to the - // ReturnBlock. - bool first = i == 0; - Blocks.insert(std::make_pair(&BB, GCOVBlock(i++ + !first, os))); + // Skip index 1 if it's assigned to the ReturnBlock. + if (i == 1 && ExitBlockBeforeBody) + ++i; + Blocks.insert(std::make_pair(&BB, GCOVBlock(i++, os))); } + if (!ExitBlockBeforeBody) + ReturnBlock.Number = i; std::string FunctionNameAndLine; raw_string_ostream FNLOS(FunctionNameAndLine); @@ -469,7 +469,7 @@ static bool functionHasLines(Function *F) { if (Loc.isUnknown()) continue; // Artificial lines such as calls to the global constructors. - if (Loc.getLine() == 0) continue; + if (Loc.getLine() == 0) continue; return true; } @@ -513,7 +513,8 @@ void GCOVProfiler::emitProfileNotes() { EntryBlock.splitBasicBlock(It); Funcs.push_back(make_unique<GCOVFunction>(SP, &out, FunctionIdent++, - Options.UseCfgChecksum)); + Options.UseCfgChecksum, + Options.ExitBlockBeforeBody)); GCOVFunction &Func = *Funcs.back(); for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) { diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 4152679..c2aa1e2 100644 --- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -274,7 +274,6 @@ class MemorySanitizer : public FunctionPass { MemorySanitizer(int TrackOrigins = 0) : FunctionPass(ID), TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)), - DL(nullptr), WarningFn(nullptr) {} const char *getPassName() const override { return "MemorySanitizer"; } bool runOnFunction(Function &F) override; @@ -287,7 +286,6 @@ class MemorySanitizer : public FunctionPass { /// \brief Track origins (allocation points) of uninitialized values. int TrackOrigins; - const DataLayout *DL; LLVMContext *C; Type *IntptrTy; Type *OriginTy; @@ -449,10 +447,7 @@ void MemorySanitizer::initializeCallbacks(Module &M) { /// /// inserts a call to __msan_init to the module's constructor list. bool MemorySanitizer::doInitialization(Module &M) { - DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); - if (!DLP) - report_fatal_error("data layout missing"); - DL = &DLP->getDataLayout(); + auto &DL = M.getDataLayout(); Triple TargetTriple(M.getTargetTriple()); switch (TargetTriple.getOS()) { @@ -604,7 +599,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) { - unsigned IntptrSize = MS.DL->getTypeStoreSize(MS.IntptrTy); + const DataLayout &DL = F.getParent()->getDataLayout(); + unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy); if (IntptrSize == kOriginSize) return Origin; assert(IntptrSize == kOriginSize * 2); Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false); @@ -614,8 +610,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { /// \brief Fill memory range with the given origin value. void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr, unsigned Size, unsigned Alignment) { - unsigned IntptrAlignment = MS.DL->getABITypeAlignment(MS.IntptrTy); - unsigned IntptrSize = MS.DL->getTypeStoreSize(MS.IntptrTy); + const DataLayout &DL = F.getParent()->getDataLayout(); + unsigned IntptrAlignment = DL.getABITypeAlignment(MS.IntptrTy); + unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy); assert(IntptrAlignment >= kMinOriginAlignment); assert(IntptrSize >= kOriginSize); @@ -643,8 +640,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin, unsigned Alignment, bool AsCall) { + const DataLayout &DL = F.getParent()->getDataLayout(); unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment); - unsigned StoreSize = MS.DL->getTypeStoreSize(Shadow->getType()); + unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType()); if (isa<StructType>(Shadow->getType())) { paintOrigin(IRB, updateOrigin(Origin, IRB), getOriginPtr(Addr, IRB, Alignment), StoreSize, @@ -661,7 +659,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } unsigned TypeSizeInBits = - MS.DL->getTypeSizeInBits(ConvertedShadow->getType()); + DL.getTypeSizeInBits(ConvertedShadow->getType()); unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); if (AsCall && SizeIndex < kNumberOfAccessSizes) { Value *Fn = MS.MaybeStoreOriginFn[SizeIndex]; @@ -731,8 +729,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return; } - unsigned TypeSizeInBits = - MS.DL->getTypeSizeInBits(ConvertedShadow->getType()); + const DataLayout &DL = OrigIns->getModule()->getDataLayout(); + + unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType()); unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); if (AsCall && SizeIndex < kNumberOfAccessSizes) { Value *Fn = MS.MaybeWarningFn[SizeIndex]; @@ -772,7 +771,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { /// \brief Add MemorySanitizer instrumentation to a function. bool runOnFunction() { MS.initializeCallbacks(*F.getParent()); - if (!MS.DL) return false; // In the presence of unreachable blocks, we may see Phi nodes with // incoming nodes from such blocks. Since InstVisitor skips unreachable @@ -828,8 +826,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // This may return weird-sized types like i1. if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy)) return IT; + const DataLayout &DL = F.getParent()->getDataLayout(); if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) { - uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType()); + uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType()); return VectorType::get(IntegerType::get(*MS.C, EltSize), VT->getNumElements()); } @@ -845,7 +844,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); return Res; } - uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy); + uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy); return IntegerType::get(*MS.C, TypeSize); } @@ -1038,14 +1037,16 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { Function *F = A->getParent(); IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI()); unsigned ArgOffset = 0; + const DataLayout &DL = F->getParent()->getDataLayout(); for (auto &FArg : F->args()) { if (!FArg.getType()->isSized()) { DEBUG(dbgs() << "Arg is not sized\n"); continue; } - unsigned Size = FArg.hasByValAttr() - ? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType()) - : MS.DL->getTypeAllocSize(FArg.getType()); + unsigned Size = + FArg.hasByValAttr() + ? DL.getTypeAllocSize(FArg.getType()->getPointerElementType()) + : DL.getTypeAllocSize(FArg.getType()); if (A == &FArg) { bool Overflow = ArgOffset + Size > kParamTLSSize; Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset); @@ -1056,7 +1057,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { unsigned ArgAlign = FArg.getParamAlignment(); if (ArgAlign == 0) { Type *EltType = A->getType()->getPointerElementType(); - ArgAlign = MS.DL->getABITypeAlignment(EltType); + ArgAlign = DL.getABITypeAlignment(EltType); } if (Overflow) { // ParamTLS overflow. @@ -2427,10 +2428,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { DEBUG(dbgs() << " Arg#" << i << ": " << *A << " Shadow: " << *ArgShadow << "\n"); bool ArgIsInitialized = false; + const DataLayout &DL = F.getParent()->getDataLayout(); if (CS.paramHasAttr(i + 1, Attribute::ByVal)) { assert(A->getType()->isPointerTy() && "ByVal argument is not a pointer!"); - Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType()); + Size = DL.getTypeAllocSize(A->getType()->getPointerElementType()); if (ArgOffset + Size > kParamTLSSize) break; unsigned ParamAlignment = CS.getParamAlignment(i + 1); unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment); @@ -2438,7 +2440,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB), Size, Alignment); } else { - Size = MS.DL->getTypeAllocSize(A->getType()); + Size = DL.getTypeAllocSize(A->getType()); if (ArgOffset + Size > kParamTLSSize) break; Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, kShadowTLSAlignment); @@ -2531,7 +2533,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setShadow(&I, getCleanShadow(&I)); setOrigin(&I, getCleanOrigin()); IRBuilder<> IRB(I.getNextNode()); - uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType()); + const DataLayout &DL = F.getParent()->getDataLayout(); + uint64_t Size = DL.getTypeAllocSize(I.getAllocatedType()); if (PoisonStack && ClPoisonStackWithCall) { IRB.CreateCall2(MS.MsanPoisonStackFn, IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), @@ -2723,6 +2726,7 @@ struct VarArgAMD64Helper : public VarArgHelper { unsigned GpOffset = 0; unsigned FpOffset = AMD64GpEndOffset; unsigned OverflowOffset = AMD64FpEndOffset; + const DataLayout &DL = F.getParent()->getDataLayout(); for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); ArgIt != End; ++ArgIt) { Value *A = *ArgIt; @@ -2732,7 +2736,7 @@ struct VarArgAMD64Helper : public VarArgHelper { // ByVal arguments always go to the overflow area. assert(A->getType()->isPointerTy()); Type *RealTy = A->getType()->getPointerElementType(); - uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy); + uint64_t ArgSize = DL.getTypeAllocSize(RealTy); Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset); OverflowOffset += RoundUpToAlignment(ArgSize, 8); IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB), @@ -2754,7 +2758,7 @@ struct VarArgAMD64Helper : public VarArgHelper { FpOffset += 16; break; case AK_Memory: - uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType()); + uint64_t ArgSize = DL.getTypeAllocSize(A->getType()); Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset); OverflowOffset += RoundUpToAlignment(ArgSize, 8); } @@ -2862,11 +2866,12 @@ struct VarArgMIPS64Helper : public VarArgHelper { void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override { unsigned VAArgOffset = 0; + const DataLayout &DL = F.getParent()->getDataLayout(); for (CallSite::arg_iterator ArgIt = CS.arg_begin() + 1, End = CS.arg_end(); ArgIt != End; ++ArgIt) { Value *A = *ArgIt; Value *Base; - uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType()); + uint64_t ArgSize = DL.getTypeAllocSize(A->getType()); #if defined(__MIPSEB__) || defined(MIPSEB) // Adjusting the shadow for argument with size < 8 to match the placement // of bits in big endian system diff --git a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp index 8c56e87..289675e 100644 --- a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp +++ b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp @@ -59,6 +59,7 @@ static const char *const kSanCovWithCheckName = "__sanitizer_cov_with_check"; static const char *const kSanCovIndirCallName = "__sanitizer_cov_indir_call16"; static const char *const kSanCovTraceEnter = "__sanitizer_cov_trace_func_enter"; static const char *const kSanCovTraceBB = "__sanitizer_cov_trace_basic_block"; +static const char *const kSanCovTraceCmp = "__sanitizer_cov_trace_cmp"; static const char *const kSanCovModuleCtorName = "sancov.module_ctor"; static const uint64_t kSanCtorAndDtorPriority = 2; @@ -72,7 +73,7 @@ static cl::opt<unsigned> ClCoverageBlockThreshold( "sanitizer-coverage-block-threshold", cl::desc("Use a callback with a guard check inside it if there are" " more than this number of blocks."), - cl::Hidden, cl::init(1000)); + cl::Hidden, cl::init(500)); static cl::opt<bool> ClExperimentalTracing("sanitizer-coverage-experimental-tracing", @@ -80,6 +81,22 @@ static cl::opt<bool> "callbacks at every basic block"), cl::Hidden, cl::init(false)); +static cl::opt<bool> + ClExperimentalCMPTracing("sanitizer-coverage-experimental-trace-compares", + cl::desc("Experimental tracing of CMP and similar " + "instructions"), + cl::Hidden, cl::init(false)); + +// Experimental 8-bit counters used as an additional search heuristic during +// coverage-guided fuzzing. +// The counters are not thread-friendly: +// - contention on these counters may cause significant slowdown; +// - the counter updates are racy and the results may be inaccurate. +// They are also inaccurate due to 8-bit integer overflow. +static cl::opt<bool> ClUse8bitCounters("sanitizer-coverage-8bit-counters", + cl::desc("Experimental 8-bit counters"), + cl::Hidden, cl::init(false)); + namespace { class SanitizerCoverageModule : public ModulePass { @@ -94,26 +111,29 @@ class SanitizerCoverageModule : public ModulePass { return "SanitizerCoverageModule"; } - void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.addRequired<DataLayoutPass>(); - } - private: void InjectCoverageForIndirectCalls(Function &F, ArrayRef<Instruction *> IndirCalls); - bool InjectCoverage(Function &F, ArrayRef<BasicBlock *> AllBlocks, - ArrayRef<Instruction *> IndirCalls); + void InjectTraceForCmp(Function &F, ArrayRef<Instruction *> CmpTraceTargets); + bool InjectCoverage(Function &F, ArrayRef<BasicBlock *> AllBlocks); + void SetNoSanitizeMetada(Instruction *I); void InjectCoverageAtBlock(Function &F, BasicBlock &BB, bool UseCalls); + unsigned NumberOfInstrumentedBlocks() { + return SanCovFunction->getNumUses() + SanCovWithCheckFunction->getNumUses(); + } Function *SanCovFunction; Function *SanCovWithCheckFunction; Function *SanCovIndirCallFunction; Function *SanCovModuleInit; Function *SanCovTraceEnter, *SanCovTraceBB; + Function *SanCovTraceCmpFunction; InlineAsm *EmptyAsm; - Type *IntptrTy; + Type *IntptrTy, *Int64Ty; LLVMContext *C; + const DataLayout *DL; GlobalVariable *GuardArray; + GlobalVariable *EightBitCounterArray; int CoverageLevel; }; @@ -133,12 +153,13 @@ static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { bool SanitizerCoverageModule::runOnModule(Module &M) { if (!CoverageLevel) return false; C = &(M.getContext()); - DataLayoutPass *DLP = &getAnalysis<DataLayoutPass>(); - IntptrTy = Type::getIntNTy(*C, DLP->getDataLayout().getPointerSizeInBits()); + DL = &M.getDataLayout(); + IntptrTy = Type::getIntNTy(*C, DL->getPointerSizeInBits()); Type *VoidTy = Type::getVoidTy(*C); IRBuilder<> IRB(*C); Type *Int8PtrTy = PointerType::getUnqual(IRB.getInt8Ty()); Type *Int32PtrTy = PointerType::getUnqual(IRB.getInt32Ty()); + Int64Ty = IRB.getInt64Ty(); Function *CtorFunc = Function::Create(FunctionType::get(VoidTy, false), @@ -152,9 +173,12 @@ bool SanitizerCoverageModule::runOnModule(Module &M) { M.getOrInsertFunction(kSanCovWithCheckName, VoidTy, Int32PtrTy, nullptr)); SanCovIndirCallFunction = checkInterfaceFunction(M.getOrInsertFunction( kSanCovIndirCallName, VoidTy, IntptrTy, IntptrTy, nullptr)); - SanCovModuleInit = checkInterfaceFunction( - M.getOrInsertFunction(kSanCovModuleInitName, Type::getVoidTy(*C), - Int32PtrTy, IntptrTy, Int8PtrTy, nullptr)); + SanCovTraceCmpFunction = checkInterfaceFunction(M.getOrInsertFunction( + kSanCovTraceCmp, VoidTy, Int64Ty, Int64Ty, Int64Ty, nullptr)); + + SanCovModuleInit = checkInterfaceFunction(M.getOrInsertFunction( + kSanCovModuleInitName, Type::getVoidTy(*C), Int32PtrTy, IntptrTy, + Int8PtrTy, Int8PtrTy, nullptr)); SanCovModuleInit->setLinkage(Function::ExternalLinkage); // We insert an empty inline asm after cov callbacks to avoid callback merge. EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), @@ -171,26 +195,49 @@ bool SanitizerCoverageModule::runOnModule(Module &M) { // At this point we create a dummy array of guards because we don't // know how many elements we will need. Type *Int32Ty = IRB.getInt32Ty(); + Type *Int8Ty = IRB.getInt8Ty(); + GuardArray = new GlobalVariable(M, Int32Ty, false, GlobalValue::ExternalLinkage, nullptr, "__sancov_gen_cov_tmp"); + if (ClUse8bitCounters) + EightBitCounterArray = + new GlobalVariable(M, Int8Ty, false, GlobalVariable::ExternalLinkage, + nullptr, "__sancov_gen_cov_tmp"); for (auto &F : M) runOnFunction(F); + auto N = NumberOfInstrumentedBlocks(); + // Now we know how many elements we need. Create an array of guards // with one extra element at the beginning for the size. - Type *Int32ArrayNTy = - ArrayType::get(Int32Ty, SanCovFunction->getNumUses() + 1); + Type *Int32ArrayNTy = ArrayType::get(Int32Ty, N + 1); GlobalVariable *RealGuardArray = new GlobalVariable( M, Int32ArrayNTy, false, GlobalValue::PrivateLinkage, Constant::getNullValue(Int32ArrayNTy), "__sancov_gen_cov"); + // Replace the dummy array with the real one. GuardArray->replaceAllUsesWith( IRB.CreatePointerCast(RealGuardArray, Int32PtrTy)); GuardArray->eraseFromParent(); + GlobalVariable *RealEightBitCounterArray; + if (ClUse8bitCounters) { + // Make sure the array is 16-aligned. + static const int kCounterAlignment = 16; + Type *Int8ArrayNTy = + ArrayType::get(Int8Ty, RoundUpToAlignment(N, kCounterAlignment)); + RealEightBitCounterArray = new GlobalVariable( + M, Int8ArrayNTy, false, GlobalValue::PrivateLinkage, + Constant::getNullValue(Int8ArrayNTy), "__sancov_gen_cov_counter"); + RealEightBitCounterArray->setAlignment(kCounterAlignment); + EightBitCounterArray->replaceAllUsesWith( + IRB.CreatePointerCast(RealEightBitCounterArray, Int8PtrTy)); + EightBitCounterArray->eraseFromParent(); + } + // Create variable for module (compilation unit) name Constant *ModNameStrConst = ConstantDataArray::getString(M.getContext(), M.getName(), true); @@ -200,10 +247,13 @@ bool SanitizerCoverageModule::runOnModule(Module &M) { // Call __sanitizer_cov_module_init IRB.SetInsertPoint(CtorFunc->getEntryBlock().getTerminator()); - IRB.CreateCall3(SanCovModuleInit, - IRB.CreatePointerCast(RealGuardArray, Int32PtrTy), - ConstantInt::get(IntptrTy, SanCovFunction->getNumUses()), - IRB.CreatePointerCast(ModuleName, Int8PtrTy)); + IRB.CreateCall4( + SanCovModuleInit, IRB.CreatePointerCast(RealGuardArray, Int32PtrTy), + ConstantInt::get(IntptrTy, N), + ClUse8bitCounters + ? IRB.CreatePointerCast(RealEightBitCounterArray, Int8PtrTy) + : Constant::getNullValue(Int8PtrTy), + IRB.CreatePointerCast(ModuleName, Int8PtrTy)); return true; } @@ -215,23 +265,28 @@ bool SanitizerCoverageModule::runOnFunction(Function &F) { SplitAllCriticalEdges(F); SmallVector<Instruction*, 8> IndirCalls; SmallVector<BasicBlock*, 16> AllBlocks; + SmallVector<Instruction*, 8> CmpTraceTargets; for (auto &BB : F) { AllBlocks.push_back(&BB); - if (CoverageLevel >= 4) - for (auto &Inst : BB) { + for (auto &Inst : BB) { + if (CoverageLevel >= 4) { CallSite CS(&Inst); if (CS && !CS.getCalledFunction()) IndirCalls.push_back(&Inst); } + if (ClExperimentalCMPTracing) + if (isa<ICmpInst>(&Inst)) + CmpTraceTargets.push_back(&Inst); + } } - InjectCoverage(F, AllBlocks, IndirCalls); + InjectCoverage(F, AllBlocks); + InjectCoverageForIndirectCalls(F, IndirCalls); + InjectTraceForCmp(F, CmpTraceTargets); return true; } -bool -SanitizerCoverageModule::InjectCoverage(Function &F, - ArrayRef<BasicBlock *> AllBlocks, - ArrayRef<Instruction *> IndirCalls) { +bool SanitizerCoverageModule::InjectCoverage(Function &F, + ArrayRef<BasicBlock *> AllBlocks) { if (!CoverageLevel) return false; if (CoverageLevel == 1) { @@ -241,7 +296,6 @@ SanitizerCoverageModule::InjectCoverage(Function &F, InjectCoverageAtBlock(F, *BB, ClCoverageBlockThreshold < AllBlocks.size()); } - InjectCoverageForIndirectCalls(F, IndirCalls); return true; } @@ -273,6 +327,32 @@ void SanitizerCoverageModule::InjectCoverageForIndirectCalls( } } +void SanitizerCoverageModule::InjectTraceForCmp( + Function &F, ArrayRef<Instruction *> CmpTraceTargets) { + if (!ClExperimentalCMPTracing) return; + for (auto I : CmpTraceTargets) { + if (ICmpInst *ICMP = dyn_cast<ICmpInst>(I)) { + IRBuilder<> IRB(ICMP); + Value *A0 = ICMP->getOperand(0); + Value *A1 = ICMP->getOperand(1); + if (!A0->getType()->isIntegerTy()) continue; + uint64_t TypeSize = DL->getTypeStoreSizeInBits(A0->getType()); + // __sanitizer_cov_indir_call((type_size << 32) | predicate, A0, A1); + IRB.CreateCall3( + SanCovTraceCmpFunction, + ConstantInt::get(Int64Ty, (TypeSize << 32) | ICMP->getPredicate()), + IRB.CreateIntCast(A0, Int64Ty, true), + IRB.CreateIntCast(A1, Int64Ty, true)); + } + } +} + +void SanitizerCoverageModule::SetNoSanitizeMetada(Instruction *I) { + I->setMetadata( + I->getParent()->getParent()->getParent()->getMDKindID("nosanitize"), + MDNode::get(*C, None)); +} + void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB, bool UseCalls) { BasicBlock::iterator IP = BB.getFirstInsertionPt(), BE = BB.end(); @@ -286,14 +366,15 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB, } bool IsEntryBB = &BB == &F.getEntryBlock(); - DebugLoc EntryLoc = - IsEntryBB ? IP->getDebugLoc().getFnDebugLoc(*C) : IP->getDebugLoc(); + DebugLoc EntryLoc = IsEntryBB && !IP->getDebugLoc().isUnknown() + ? IP->getDebugLoc().getFnDebugLoc(*C) + : IP->getDebugLoc(); IRBuilder<> IRB(IP); IRB.SetCurrentDebugLocation(EntryLoc); SmallVector<Value *, 1> Indices; Value *GuardP = IRB.CreateAdd( IRB.CreatePointerCast(GuardArray, IntptrTy), - ConstantInt::get(IntptrTy, (1 + SanCovFunction->getNumUses()) * 4)); + ConstantInt::get(IntptrTy, (1 + NumberOfInstrumentedBlocks()) * 4)); Type *Int32PtrTy = PointerType::getUnqual(IRB.getInt32Ty()); GuardP = IRB.CreateIntToPtr(GuardP, Int32PtrTy); if (UseCalls) { @@ -302,8 +383,7 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB, LoadInst *Load = IRB.CreateLoad(GuardP); Load->setAtomic(Monotonic); Load->setAlignment(4); - Load->setMetadata(F.getParent()->getMDKindID("nosanitize"), - MDNode::get(*C, None)); + SetNoSanitizeMetada(Load); Value *Cmp = IRB.CreateICmpSGE(Constant::getNullValue(Load->getType()), Load); Instruction *Ins = SplitBlockAndInsertIfThen( Cmp, IP, false, MDBuilder(*C).createBranchWeights(1, 100000)); @@ -314,6 +394,19 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB, IRB.CreateCall(EmptyAsm); // Avoids callback merge. } + if(ClUse8bitCounters) { + IRB.SetInsertPoint(IP); + Value *P = IRB.CreateAdd( + IRB.CreatePointerCast(EightBitCounterArray, IntptrTy), + ConstantInt::get(IntptrTy, NumberOfInstrumentedBlocks() - 1)); + P = IRB.CreateIntToPtr(P, IRB.getInt8PtrTy()); + LoadInst *LI = IRB.CreateLoad(P); + Value *Inc = IRB.CreateAdd(LI, ConstantInt::get(IRB.getInt8Ty(), 1)); + StoreInst *SI = IRB.CreateStore(Inc, P); + SetNoSanitizeMetada(LI); + SetNoSanitizeMetada(SI); + } + if (ClExperimentalTracing) { // Experimental support for tracing. // Insert a callback with the same guard variable as used for coverage. diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index e4a4911..c3ba722 100644 --- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -19,14 +19,14 @@ // The rest is handled by the run-time library. //===----------------------------------------------------------------------===// -#include "llvm/Analysis/CaptureTracking.h" -#include "llvm/Analysis/ValueTracking.h" #include "llvm/Transforms/Instrumentation.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/Analysis/CaptureTracking.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" @@ -76,7 +76,7 @@ namespace { /// ThreadSanitizer: instrument the code in module to find races. struct ThreadSanitizer : public FunctionPass { - ThreadSanitizer() : FunctionPass(ID), DL(nullptr) {} + ThreadSanitizer() : FunctionPass(ID) {} const char *getPassName() const override; bool runOnFunction(Function &F) override; bool doInitialization(Module &M) override; @@ -84,15 +84,15 @@ struct ThreadSanitizer : public FunctionPass { private: void initializeCallbacks(Module &M); - bool instrumentLoadOrStore(Instruction *I); - bool instrumentAtomic(Instruction *I); + bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL); + bool instrumentAtomic(Instruction *I, const DataLayout &DL); bool instrumentMemIntrinsic(Instruction *I); - void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local, - SmallVectorImpl<Instruction*> &All); + void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local, + SmallVectorImpl<Instruction *> &All, + const DataLayout &DL); bool addrPointsToConstantData(Value *Addr); - int getMemoryAccessFuncIndex(Value *Addr); + int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL); - const DataLayout *DL; Type *IntptrTy; IntegerType *OrdTy; // Callbacks to run-time library are computed in doInitialization. @@ -230,10 +230,7 @@ void ThreadSanitizer::initializeCallbacks(Module &M) { } bool ThreadSanitizer::doInitialization(Module &M) { - DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); - if (!DLP) - report_fatal_error("data layout missing"); - DL = &DLP->getDataLayout(); + const DataLayout &DL = M.getDataLayout(); // Always insert a call to __tsan_init into the module's CTORs. IRBuilder<> IRB(M.getContext()); @@ -285,8 +282,8 @@ bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) { // 'Local' is a vector of insns within the same BB (no calls between). // 'All' is a vector of insns that will be instrumented. void ThreadSanitizer::chooseInstructionsToInstrument( - SmallVectorImpl<Instruction*> &Local, - SmallVectorImpl<Instruction*> &All) { + SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All, + const DataLayout &DL) { SmallSet<Value*, 8> WriteTargets; // Iterate from the end. for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(), @@ -310,7 +307,7 @@ void ThreadSanitizer::chooseInstructionsToInstrument( Value *Addr = isa<StoreInst>(*I) ? cast<StoreInst>(I)->getPointerOperand() : cast<LoadInst>(I)->getPointerOperand(); - if (isa<AllocaInst>(GetUnderlyingObject(Addr, nullptr)) && + if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) && !PointerMayBeCaptured(Addr, true, true)) { // The variable is addressable but not captured, so it cannot be // referenced from a different thread and participate in a data race @@ -338,7 +335,6 @@ static bool isAtomic(Instruction *I) { } bool ThreadSanitizer::runOnFunction(Function &F) { - if (!DL) return false; initializeCallbacks(*F.getParent()); SmallVector<Instruction*, 8> RetVec; SmallVector<Instruction*, 8> AllLoadsAndStores; @@ -348,6 +344,7 @@ bool ThreadSanitizer::runOnFunction(Function &F) { bool Res = false; bool HasCalls = false; bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread); + const DataLayout &DL = F.getParent()->getDataLayout(); // Traverse all instructions, collect loads/stores/returns, check for calls. for (auto &BB : F) { @@ -362,10 +359,11 @@ bool ThreadSanitizer::runOnFunction(Function &F) { if (isa<MemIntrinsic>(Inst)) MemIntrinCalls.push_back(&Inst); HasCalls = true; - chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); + chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, + DL); } } - chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); + chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL); } // We have collected all loads and stores. @@ -375,14 +373,14 @@ bool ThreadSanitizer::runOnFunction(Function &F) { // Instrument memory accesses only if we want to report bugs in the function. if (ClInstrumentMemoryAccesses && SanitizeFunction) for (auto Inst : AllLoadsAndStores) { - Res |= instrumentLoadOrStore(Inst); + Res |= instrumentLoadOrStore(Inst, DL); } // Instrument atomic memory accesses in any case (they can be used to // implement synchronization). if (ClInstrumentAtomics) for (auto Inst : AtomicAccesses) { - Res |= instrumentAtomic(Inst); + Res |= instrumentAtomic(Inst, DL); } if (ClInstrumentMemIntrinsics && SanitizeFunction) @@ -406,13 +404,14 @@ bool ThreadSanitizer::runOnFunction(Function &F) { return Res; } -bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) { +bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I, + const DataLayout &DL) { IRBuilder<> IRB(I); bool IsWrite = isa<StoreInst>(*I); Value *Addr = IsWrite ? cast<StoreInst>(I)->getPointerOperand() : cast<LoadInst>(I)->getPointerOperand(); - int Idx = getMemoryAccessFuncIndex(Addr); + int Idx = getMemoryAccessFuncIndex(Addr, DL); if (Idx < 0) return false; if (IsWrite && isVtableAccess(I)) { @@ -443,7 +442,7 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) { ? cast<StoreInst>(I)->getAlignment() : cast<LoadInst>(I)->getAlignment(); Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType(); - const uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy); + const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy); Value *OnAccessFunc = nullptr; if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx]; @@ -504,11 +503,11 @@ bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) { // The following page contains more background information: // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ -bool ThreadSanitizer::instrumentAtomic(Instruction *I) { +bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) { IRBuilder<> IRB(I); if (LoadInst *LI = dyn_cast<LoadInst>(I)) { Value *Addr = LI->getPointerOperand(); - int Idx = getMemoryAccessFuncIndex(Addr); + int Idx = getMemoryAccessFuncIndex(Addr, DL); if (Idx < 0) return false; const size_t ByteSize = 1 << Idx; @@ -522,7 +521,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) { } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { Value *Addr = SI->getPointerOperand(); - int Idx = getMemoryAccessFuncIndex(Addr); + int Idx = getMemoryAccessFuncIndex(Addr, DL); if (Idx < 0) return false; const size_t ByteSize = 1 << Idx; @@ -536,7 +535,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) { ReplaceInstWithInst(I, C); } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) { Value *Addr = RMWI->getPointerOperand(); - int Idx = getMemoryAccessFuncIndex(Addr); + int Idx = getMemoryAccessFuncIndex(Addr, DL); if (Idx < 0) return false; Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx]; @@ -553,7 +552,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) { ReplaceInstWithInst(I, C); } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) { Value *Addr = CASI->getPointerOperand(); - int Idx = getMemoryAccessFuncIndex(Addr); + int Idx = getMemoryAccessFuncIndex(Addr, DL); if (Idx < 0) return false; const size_t ByteSize = 1 << Idx; @@ -583,11 +582,12 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) { return true; } -int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) { +int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr, + const DataLayout &DL) { Type *OrigPtrTy = Addr->getType(); Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType(); assert(OrigTy->isSized()); - uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy); + uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy); if (TypeSize != 8 && TypeSize != 16 && TypeSize != 32 && TypeSize != 64 && TypeSize != 128) { NumAccessesWithBadSize++; |