diff options
Diffstat (limited to 'lib/Transforms/Instrumentation')
| -rw-r--r-- | lib/Transforms/Instrumentation/AddressSanitizer.cpp | 873 | ||||
| -rw-r--r-- | lib/Transforms/Instrumentation/BlackList.cpp | 48 | ||||
| -rw-r--r-- | lib/Transforms/Instrumentation/BlackList.h | 3 | ||||
| -rw-r--r-- | lib/Transforms/Instrumentation/BoundsChecking.cpp | 27 | ||||
| -rw-r--r-- | lib/Transforms/Instrumentation/CMakeLists.txt | 1 | ||||
| -rw-r--r-- | lib/Transforms/Instrumentation/EdgeProfiling.cpp | 11 | ||||
| -rw-r--r-- | lib/Transforms/Instrumentation/GCOVProfiling.cpp | 106 | ||||
| -rw-r--r-- | lib/Transforms/Instrumentation/Instrumentation.cpp | 2 | ||||
| -rw-r--r-- | lib/Transforms/Instrumentation/MaximumSpanningTree.h | 4 | ||||
| -rw-r--r-- | lib/Transforms/Instrumentation/MemorySanitizer.cpp | 1857 | ||||
| -rw-r--r-- | lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp | 21 | ||||
| -rw-r--r-- | lib/Transforms/Instrumentation/PathProfiling.cpp | 23 | ||||
| -rw-r--r-- | lib/Transforms/Instrumentation/ProfilingUtils.cpp | 10 | ||||
| -rw-r--r-- | lib/Transforms/Instrumentation/ThreadSanitizer.cpp | 213 |
14 files changed, 2724 insertions, 475 deletions
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 0775cf4..9bd3239 100644 --- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -15,34 +15,38 @@ #define DEBUG_TYPE "asan" +#include "llvm/Transforms/Instrumentation.h" #include "BlackList.h" -#include "llvm/Function.h" -#include "llvm/IRBuilder.h" -#include "llvm/InlineAsm.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/LLVMContext.h" -#include "llvm/Module.h" -#include "llvm/Type.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/OwningPtr.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Triple.h" +#include "llvm/DIBuilder.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/InstVisitor.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/system_error.h" -#include "llvm/Target/TargetData.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/ModuleUtils.h" - -#include <string> #include <algorithm> +#include <string> using namespace llvm; @@ -69,6 +73,10 @@ static const char *kAsanMappingOffsetName = "__asan_mapping_offset"; static const char *kAsanMappingScaleName = "__asan_mapping_scale"; static const char *kAsanStackMallocName = "__asan_stack_malloc"; static const char *kAsanStackFreeName = "__asan_stack_free"; +static const char *kAsanGenPrefix = "__asan_gen_"; +static const char *kAsanPoisonStackMemoryName = "__asan_poison_stack_memory"; +static const char *kAsanUnpoisonStackMemoryName = + "__asan_unpoison_stack_memory"; static const int kAsanStackLeftRedzoneMagic = 0xf1; static const int kAsanStackMidRedzoneMagic = 0xf2; @@ -112,9 +120,10 @@ static cl::opt<bool> ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(false)); static cl::opt<bool> ClMemIntrin("asan-memintrin", cl::desc("Handle memset/memcpy/memmove"), cl::Hidden, cl::init(true)); -// This flag may need to be replaced with -fasan-blacklist. -static cl::opt<std::string> ClBlackListFile("asan-blacklist", - cl::desc("File containing the list of functions to ignore " +static cl::opt<bool> ClRealignStack("asan-realign-stack", + cl::desc("Realign stack to 32"), cl::Hidden, cl::init(true)); +static cl::opt<std::string> ClBlacklistFile("asan-blacklist", + cl::desc("File containing the list of objects to ignore " "during instrumentation"), cl::Hidden); // These flags allow to change the shadow mapping. @@ -135,6 +144,10 @@ static cl::opt<bool> ClOptSameTemp("asan-opt-same-temp", static cl::opt<bool> ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true)); +static cl::opt<bool> ClCheckLifetime("asan-check-lifetime", + cl::desc("Use llvm.lifetime intrinsics to insert extra checks"), + cl::Hidden, cl::init(false)); + // Debug flags. static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0)); @@ -148,80 +161,274 @@ static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"), cl::Hidden, cl::init(-1)); namespace { +/// A set of dynamically initialized globals extracted from metadata. +class SetOfDynamicallyInitializedGlobals { + public: + void Init(Module& M) { + // Clang generates metadata identifying all dynamically initialized globals. + NamedMDNode *DynamicGlobals = + M.getNamedMetadata("llvm.asan.dynamically_initialized_globals"); + if (!DynamicGlobals) + return; + for (int i = 0, n = DynamicGlobals->getNumOperands(); i < n; ++i) { + MDNode *MDN = DynamicGlobals->getOperand(i); + assert(MDN->getNumOperands() == 1); + Value *VG = MDN->getOperand(0); + // The optimizer may optimize away a global entirely, in which case we + // cannot instrument access to it. + if (!VG) + continue; + DynInitGlobals.insert(cast<GlobalVariable>(VG)); + } + } + bool Contains(GlobalVariable *G) { return DynInitGlobals.count(G) != 0; } + private: + SmallSet<GlobalValue*, 32> DynInitGlobals; +}; -/// An object of this type is created while instrumenting every function. -struct AsanFunctionContext { - AsanFunctionContext(Function &Function) : F(Function) { } +static int MappingScale() { + return ClMappingScale ? ClMappingScale : kDefaultShadowScale; +} - Function &F; -}; +static size_t RedzoneSize() { + // Redzone used for stack and globals is at least 32 bytes. + // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. + return std::max(32U, 1U << MappingScale()); +} /// AddressSanitizer: instrument the code in module to find memory bugs. -struct AddressSanitizer : public ModulePass { - AddressSanitizer(); - virtual const char *getPassName() const; - void instrumentMop(AsanFunctionContext &AFC, Instruction *I); - void instrumentAddress(AsanFunctionContext &AFC, - Instruction *OrigIns, IRBuilder<> &IRB, +struct AddressSanitizer : public FunctionPass { + AddressSanitizer(bool CheckInitOrder = false, + bool CheckUseAfterReturn = false, + bool CheckLifetime = false, + StringRef BlacklistFile = StringRef()) + : FunctionPass(ID), + CheckInitOrder(CheckInitOrder || ClInitializers), + CheckUseAfterReturn(CheckUseAfterReturn || ClUseAfterReturn), + CheckLifetime(CheckLifetime || ClCheckLifetime), + BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile + : BlacklistFile) {} + virtual const char *getPassName() const { + return "AddressSanitizerFunctionPass"; + } + void instrumentMop(Instruction *I); + void instrumentAddress(Instruction *OrigIns, IRBuilder<> &IRB, Value *Addr, uint32_t TypeSize, bool IsWrite); Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, Value *ShadowValue, uint32_t TypeSize); Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, bool IsWrite, size_t AccessSizeIndex); - bool instrumentMemIntrinsic(AsanFunctionContext &AFC, MemIntrinsic *MI); - void instrumentMemIntrinsicParam(AsanFunctionContext &AFC, - Instruction *OrigIns, Value *Addr, + bool instrumentMemIntrinsic(MemIntrinsic *MI); + void instrumentMemIntrinsicParam(Instruction *OrigIns, Value *Addr, Value *Size, Instruction *InsertBefore, bool IsWrite); Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); - bool handleFunction(Module &M, Function &F); + bool runOnFunction(Function &F); void createInitializerPoisonCalls(Module &M, Value *FirstAddr, Value *LastAddr); bool maybeInsertAsanInitAtFunctionEntry(Function &F); - bool poisonStackInFunction(Module &M, Function &F); - virtual bool runOnModule(Module &M); - bool insertGlobalRedzones(Module &M); + virtual bool doInitialization(Module &M); static char ID; // Pass identification, replacement for typeid private: - uint64_t getAllocaSizeInBytes(AllocaInst *AI) { - Type *Ty = AI->getAllocatedType(); - uint64_t SizeInBytes = TD->getTypeAllocSize(Ty); - return SizeInBytes; - } - uint64_t getAlignedSize(uint64_t SizeInBytes) { - return ((SizeInBytes + RedzoneSize - 1) - / RedzoneSize) * RedzoneSize; - } - uint64_t getAlignedAllocaSize(AllocaInst *AI) { - uint64_t SizeInBytes = getAllocaSizeInBytes(AI); - return getAlignedSize(SizeInBytes); - } + void initializeCallbacks(Module &M); - Function *checkInterfaceFunction(Constant *FuncOrBitcast); bool ShouldInstrumentGlobal(GlobalVariable *G); - void PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec, IRBuilder<> IRB, - Value *ShadowBase, bool DoPoison); bool LooksLikeCodeInBug11395(Instruction *I); void FindDynamicInitializers(Module &M); - bool HasDynamicInitializer(GlobalVariable *G); + bool CheckInitOrder; + bool CheckUseAfterReturn; + bool CheckLifetime; LLVMContext *C; - TargetData *TD; + DataLayout *TD; uint64_t MappingOffset; - int MappingScale; - size_t RedzoneSize; int LongSize; Type *IntptrTy; - Type *IntptrPtrTy; Function *AsanCtorFunction; Function *AsanInitFunction; - Instruction *CtorInsertBefore; + Function *AsanHandleNoReturnFunc; + SmallString<64> BlacklistFile; OwningPtr<BlackList> BL; // This array is indexed by AccessIsWrite and log2(AccessSize). Function *AsanErrorCallback[2][kNumberOfAccessSizes]; InlineAsm *EmptyAsm; - SmallSet<GlobalValue*, 32> DynamicallyInitializedGlobals; + SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals; + + friend struct FunctionStackPoisoner; +}; + +class AddressSanitizerModule : public ModulePass { + public: + AddressSanitizerModule(bool CheckInitOrder = false, + StringRef BlacklistFile = StringRef()) + : ModulePass(ID), + CheckInitOrder(CheckInitOrder || ClInitializers), + BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile + : BlacklistFile) {} + bool runOnModule(Module &M); + static char ID; // Pass identification, replacement for typeid + virtual const char *getPassName() const { + return "AddressSanitizerModule"; + } + + private: + void initializeCallbacks(Module &M); + + bool ShouldInstrumentGlobal(GlobalVariable *G); + void createInitializerPoisonCalls(Module &M, Value *FirstAddr, + Value *LastAddr); + + bool CheckInitOrder; + SmallString<64> BlacklistFile; + OwningPtr<BlackList> BL; + SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals; + Type *IntptrTy; + LLVMContext *C; + DataLayout *TD; + Function *AsanPoisonGlobals; + Function *AsanUnpoisonGlobals; + Function *AsanRegisterGlobals; + Function *AsanUnregisterGlobals; +}; + +// Stack poisoning does not play well with exception handling. +// When an exception is thrown, we essentially bypass the code +// that unpoisones the stack. This is why the run-time library has +// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire +// stack in the interceptor. This however does not work inside the +// actual function which catches the exception. Most likely because the +// compiler hoists the load of the shadow value somewhere too high. +// This causes asan to report a non-existing bug on 453.povray. +// It sounds like an LLVM bug. +struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { + Function &F; + AddressSanitizer &ASan; + DIBuilder DIB; + LLVMContext *C; + Type *IntptrTy; + Type *IntptrPtrTy; + + SmallVector<AllocaInst*, 16> AllocaVec; + SmallVector<Instruction*, 8> RetVec; + uint64_t TotalStackSize; + unsigned StackAlignment; + + Function *AsanStackMallocFunc, *AsanStackFreeFunc; + Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc; + + // Stores a place and arguments of poisoning/unpoisoning call for alloca. + struct AllocaPoisonCall { + IntrinsicInst *InsBefore; + uint64_t Size; + bool DoPoison; + }; + SmallVector<AllocaPoisonCall, 8> AllocaPoisonCallVec; + + // Maps Value to an AllocaInst from which the Value is originated. + typedef DenseMap<Value*, AllocaInst*> AllocaForValueMapTy; + AllocaForValueMapTy AllocaForValue; + + FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) + : F(F), ASan(ASan), DIB(*F.getParent()), C(ASan.C), + IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)), + TotalStackSize(0), StackAlignment(1 << MappingScale()) {} + + bool runOnFunction() { + if (!ClStack) return false; + // Collect alloca, ret, lifetime instructions etc. + for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), + DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { + BasicBlock *BB = *DI; + visit(*BB); + } + if (AllocaVec.empty()) return false; + + initializeCallbacks(*F.getParent()); + + poisonStack(); + + if (ClDebugStack) { + DEBUG(dbgs() << F); + } + return true; + } + + // Finds all static Alloca instructions and puts + // poisoned red zones around all of them. + // Then unpoison everything back before the function returns. + void poisonStack(); + + // ----------------------- Visitors. + /// \brief Collect all Ret instructions. + void visitReturnInst(ReturnInst &RI) { + RetVec.push_back(&RI); + } + + /// \brief Collect Alloca instructions we want (and can) handle. + void visitAllocaInst(AllocaInst &AI) { + if (!isInterestingAlloca(AI)) return; + + StackAlignment = std::max(StackAlignment, AI.getAlignment()); + AllocaVec.push_back(&AI); + uint64_t AlignedSize = getAlignedAllocaSize(&AI); + TotalStackSize += AlignedSize; + } + + /// \brief Collect lifetime intrinsic calls to check for use-after-scope + /// errors. + void visitIntrinsicInst(IntrinsicInst &II) { + if (!ASan.CheckLifetime) return; + Intrinsic::ID ID = II.getIntrinsicID(); + if (ID != Intrinsic::lifetime_start && + ID != Intrinsic::lifetime_end) + return; + // Found lifetime intrinsic, add ASan instrumentation if necessary. + ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0)); + // If size argument is undefined, don't do anything. + if (Size->isMinusOne()) return; + // Check that size doesn't saturate uint64_t and can + // be stored in IntptrTy. + const uint64_t SizeValue = Size->getValue().getLimitedValue(); + if (SizeValue == ~0ULL || + !ConstantInt::isValueValidForType(IntptrTy, SizeValue)) + return; + // Find alloca instruction that corresponds to llvm.lifetime argument. + AllocaInst *AI = findAllocaForValue(II.getArgOperand(1)); + if (!AI) return; + bool DoPoison = (ID == Intrinsic::lifetime_end); + AllocaPoisonCall APC = {&II, SizeValue, DoPoison}; + AllocaPoisonCallVec.push_back(APC); + } + + // ---------------------- Helpers. + void initializeCallbacks(Module &M); + + // Check if we want (and can) handle this alloca. + bool isInterestingAlloca(AllocaInst &AI) { + return (!AI.isArrayAllocation() && + AI.isStaticAlloca() && + AI.getAllocatedType()->isSized()); + } + + uint64_t getAllocaSizeInBytes(AllocaInst *AI) { + Type *Ty = AI->getAllocatedType(); + uint64_t SizeInBytes = ASan.TD->getTypeAllocSize(Ty); + return SizeInBytes; + } + uint64_t getAlignedSize(uint64_t SizeInBytes) { + size_t RZ = RedzoneSize(); + return ((SizeInBytes + RZ - 1) / RZ) * RZ; + } + uint64_t getAlignedAllocaSize(AllocaInst *AI) { + uint64_t SizeInBytes = getAllocaSizeInBytes(AI); + return getAlignedSize(SizeInBytes); + } + /// Finds alloca where the value comes from. + AllocaInst *findAllocaForValue(Value *V); + void poisonRedZones(const ArrayRef<AllocaInst*> &AllocaVec, IRBuilder<> IRB, + Value *ShadowBase, bool DoPoison); + void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> IRB, bool DoPoison); }; } // namespace @@ -230,13 +437,20 @@ char AddressSanitizer::ID = 0; INITIALIZE_PASS(AddressSanitizer, "asan", "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, false) -AddressSanitizer::AddressSanitizer() : ModulePass(ID) { } -ModulePass *llvm::createAddressSanitizerPass() { - return new AddressSanitizer(); +FunctionPass *llvm::createAddressSanitizerFunctionPass( + bool CheckInitOrder, bool CheckUseAfterReturn, bool CheckLifetime, + StringRef BlacklistFile) { + return new AddressSanitizer(CheckInitOrder, CheckUseAfterReturn, + CheckLifetime, BlacklistFile); } -const char *AddressSanitizer::getPassName() const { - return "AddressSanitizer"; +char AddressSanitizerModule::ID = 0; +INITIALIZE_PASS(AddressSanitizerModule, "asan-module", + "AddressSanitizer: detects use-after-free and out-of-bounds bugs." + "ModulePass", false, false) +ModulePass *llvm::createAddressSanitizerModulePass( + bool CheckInitOrder, StringRef BlacklistFile) { + return new AddressSanitizerModule(CheckInitOrder, BlacklistFile); } static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { @@ -249,44 +463,17 @@ static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str) { Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); return new GlobalVariable(M, StrConst->getType(), true, - GlobalValue::PrivateLinkage, StrConst, ""); + GlobalValue::PrivateLinkage, StrConst, + kAsanGenPrefix); } -// Split the basic block and insert an if-then code. -// Before: -// Head -// Cmp -// Tail -// After: -// Head -// if (Cmp) -// ThenBlock -// Tail -// -// ThenBlock block is created and its terminator is returned. -// If Unreachable, ThenBlock is terminated with UnreachableInst, otherwise -// it is terminated with BranchInst to Tail. -static TerminatorInst *splitBlockAndInsertIfThen(Value *Cmp, bool Unreachable) { - Instruction *SplitBefore = cast<Instruction>(Cmp)->getNextNode(); - BasicBlock *Head = SplitBefore->getParent(); - BasicBlock *Tail = Head->splitBasicBlock(SplitBefore); - TerminatorInst *HeadOldTerm = Head->getTerminator(); - LLVMContext &C = Head->getParent()->getParent()->getContext(); - BasicBlock *ThenBlock = BasicBlock::Create(C, "", Head->getParent(), Tail); - TerminatorInst *CheckTerm; - if (Unreachable) - CheckTerm = new UnreachableInst(C, ThenBlock); - else - CheckTerm = BranchInst::Create(Tail, ThenBlock); - BranchInst *HeadNewTerm = - BranchInst::Create(/*ifTrue*/ThenBlock, /*ifFalse*/Tail, Cmp); - ReplaceInstWithInst(HeadOldTerm, HeadNewTerm); - return CheckTerm; +static bool GlobalWasGeneratedByAsan(GlobalVariable *G) { + return G->getName().find(kAsanGenPrefix) == 0; } Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { // Shadow >> scale - Shadow = IRB.CreateLShr(Shadow, MappingScale); + Shadow = IRB.CreateLShr(Shadow, MappingScale()); if (MappingOffset == 0) return Shadow; // (Shadow >> scale) | offset @@ -295,12 +482,12 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { } void AddressSanitizer::instrumentMemIntrinsicParam( - AsanFunctionContext &AFC, Instruction *OrigIns, + Instruction *OrigIns, Value *Addr, Value *Size, Instruction *InsertBefore, bool IsWrite) { // Check the first byte. { IRBuilder<> IRB(InsertBefore); - instrumentAddress(AFC, OrigIns, IRB, Addr, 8, IsWrite); + instrumentAddress(OrigIns, IRB, Addr, 8, IsWrite); } // Check the last byte. { @@ -310,13 +497,12 @@ void AddressSanitizer::instrumentMemIntrinsicParam( SizeMinusOne = IRB.CreateIntCast(SizeMinusOne, IntptrTy, false); Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); Value *AddrPlusSizeMinisOne = IRB.CreateAdd(AddrLong, SizeMinusOne); - instrumentAddress(AFC, OrigIns, IRB, AddrPlusSizeMinisOne, 8, IsWrite); + instrumentAddress(OrigIns, IRB, AddrPlusSizeMinisOne, 8, IsWrite); } } // Instrument memset/memmove/memcpy -bool AddressSanitizer::instrumentMemIntrinsic(AsanFunctionContext &AFC, - MemIntrinsic *MI) { +bool AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { Value *Dst = MI->getDest(); MemTransferInst *MemTran = dyn_cast<MemTransferInst>(MI); Value *Src = MemTran ? MemTran->getSource() : 0; @@ -332,12 +518,12 @@ bool AddressSanitizer::instrumentMemIntrinsic(AsanFunctionContext &AFC, Value *Cmp = IRB.CreateICmpNE(Length, Constant::getNullValue(Length->getType())); - InsertBefore = splitBlockAndInsertIfThen(Cmp, false); + InsertBefore = SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false); } - instrumentMemIntrinsicParam(AFC, MI, Dst, Length, InsertBefore, true); + instrumentMemIntrinsicParam(MI, Dst, Length, InsertBefore, true); if (Src) - instrumentMemIntrinsicParam(AFC, MI, Src, Length, InsertBefore, false); + instrumentMemIntrinsicParam(MI, Src, Length, InsertBefore, false); return true; } @@ -367,46 +553,20 @@ static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite) { return NULL; } -void AddressSanitizer::FindDynamicInitializers(Module& M) { - // Clang generates metadata identifying all dynamically initialized globals. - NamedMDNode *DynamicGlobals = - M.getNamedMetadata("llvm.asan.dynamically_initialized_globals"); - if (!DynamicGlobals) - return; - for (int i = 0, n = DynamicGlobals->getNumOperands(); i < n; ++i) { - MDNode *MDN = DynamicGlobals->getOperand(i); - assert(MDN->getNumOperands() == 1); - Value *VG = MDN->getOperand(0); - // The optimizer may optimize away a global entirely, in which case we - // cannot instrument access to it. - if (!VG) - continue; - - GlobalVariable *G = cast<GlobalVariable>(VG); - DynamicallyInitializedGlobals.insert(G); - } -} -// Returns true if a global variable is initialized dynamically in this TU. -bool AddressSanitizer::HasDynamicInitializer(GlobalVariable *G) { - return DynamicallyInitializedGlobals.count(G); -} - -void AddressSanitizer::instrumentMop(AsanFunctionContext &AFC, Instruction *I) { - bool IsWrite; +void AddressSanitizer::instrumentMop(Instruction *I) { + bool IsWrite = false; Value *Addr = isInterestingMemoryAccess(I, &IsWrite); assert(Addr); if (ClOpt && ClOptGlobals) { if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) { // If initialization order checking is disabled, a simple access to a // dynamically initialized global is always valid. - if (!ClInitializers) + if (!CheckInitOrder) return; // If a global variable does not have dynamic initialization we don't - // have to instrument it. However, if a global has external linkage, we - // assume it has dynamic initialization, as it may have an initializer - // in a different TU. - if (G->getLinkage() != GlobalVariable::ExternalLinkage && - !HasDynamicInitializer(G)) + // have to instrument it. However, if a global does not have initailizer + // at all, we assume it has dynamic initializer (in other TU). + if (G->hasInitializer() && !DynamicallyInitializedGlobals.Contains(G)) return; } } @@ -424,14 +584,14 @@ void AddressSanitizer::instrumentMop(AsanFunctionContext &AFC, Instruction *I) { } IRBuilder<> IRB(I); - instrumentAddress(AFC, I, IRB, Addr, TypeSize, IsWrite); + instrumentAddress(I, IRB, Addr, TypeSize, IsWrite); } // Validate the result of Module::getOrInsertFunction called for an interface // function of AddressSanitizer. If the instrumented module defines a function // with the same name, their prototypes must match, otherwise // getOrInsertFunction returns a bitcast. -Function *AddressSanitizer::checkInterfaceFunction(Constant *FuncOrBitcast) { +static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { if (isa<Function>(FuncOrBitcast)) return cast<Function>(FuncOrBitcast); FuncOrBitcast->dump(); report_fatal_error("trying to redefine an AddressSanitizer " @@ -454,7 +614,7 @@ Instruction *AddressSanitizer::generateCrashCode( Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, Value *ShadowValue, uint32_t TypeSize) { - size_t Granularity = 1 << MappingScale; + size_t Granularity = 1 << MappingScale(); // Addr & (Granularity - 1) Value *LastAccessedByte = IRB.CreateAnd( AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); @@ -469,14 +629,13 @@ Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); } -void AddressSanitizer::instrumentAddress(AsanFunctionContext &AFC, - Instruction *OrigIns, +void AddressSanitizer::instrumentAddress(Instruction *OrigIns, IRBuilder<> &IRB, Value *Addr, uint32_t TypeSize, bool IsWrite) { Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); Type *ShadowTy = IntegerType::get( - *C, std::max(8U, TypeSize >> MappingScale)); + *C, std::max(8U, TypeSize >> MappingScale())); Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); Value *ShadowPtr = memToShadow(AddrLong, IRB); Value *CmpVal = Constant::getNullValue(ShadowTy); @@ -485,21 +644,23 @@ void AddressSanitizer::instrumentAddress(AsanFunctionContext &AFC, Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); - size_t Granularity = 1 << MappingScale; + size_t Granularity = 1 << MappingScale(); TerminatorInst *CrashTerm = 0; if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { - TerminatorInst *CheckTerm = splitBlockAndInsertIfThen(Cmp, false); + TerminatorInst *CheckTerm = + SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false); assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional()); BasicBlock *NextBB = CheckTerm->getSuccessor(0); IRB.SetInsertPoint(CheckTerm); Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); - BasicBlock *CrashBlock = BasicBlock::Create(*C, "", &AFC.F, NextBB); + BasicBlock *CrashBlock = + BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); CrashTerm = new UnreachableInst(*C, CrashBlock); BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); ReplaceInstWithInst(CheckTerm, NewTerm); } else { - CrashTerm = splitBlockAndInsertIfThen(Cmp, true); + CrashTerm = SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), true); } Instruction *Crash = @@ -507,9 +668,8 @@ void AddressSanitizer::instrumentAddress(AsanFunctionContext &AFC, Crash->setDebugLoc(OrigIns->getDebugLoc()); } -void AddressSanitizer::createInitializerPoisonCalls(Module &M, - Value *FirstAddr, - Value *LastAddr) { +void AddressSanitizerModule::createInitializerPoisonCalls( + Module &M, Value *FirstAddr, Value *LastAddr) { // We do all of our poisoning and unpoisoning within _GLOBAL__I_a. Function *GlobalInit = M.getFunction("_GLOBAL__I_a"); // If that function is not present, this TU contains no globals, or they have @@ -520,14 +680,6 @@ void AddressSanitizer::createInitializerPoisonCalls(Module &M, // Set up the arguments to our poison/unpoison functions. IRBuilder<> IRB(GlobalInit->begin()->getFirstInsertionPt()); - // Declare our poisoning and unpoisoning functions. - Function *AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( - kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); - AsanPoisonGlobals->setLinkage(Function::ExternalLinkage); - Function *AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( - kAsanUnpoisonGlobalsName, IRB.getVoidTy(), NULL)); - AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage); - // Add a call to poison all external globals before the given function starts. IRB.CreateCall2(AsanPoisonGlobals, FirstAddr, LastAddr); @@ -540,13 +692,14 @@ void AddressSanitizer::createInitializerPoisonCalls(Module &M, } } -bool AddressSanitizer::ShouldInstrumentGlobal(GlobalVariable *G) { +bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { Type *Ty = cast<PointerType>(G->getType())->getElementType(); - DEBUG(dbgs() << "GLOBAL: " << *G); + DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); if (BL->isIn(*G)) return false; if (!Ty->isSized()) return false; if (!G->hasInitializer()) return false; + if (GlobalWasGeneratedByAsan(G)) return false; // Our own global. // Touch only those globals that will not be defined in other modules. // Don't handle ODR type linkages since other modules may be built w/o asan. if (G->getLinkage() != GlobalVariable::ExternalLinkage && @@ -559,7 +712,7 @@ bool AddressSanitizer::ShouldInstrumentGlobal(GlobalVariable *G) { if (G->isThreadLocal()) return false; // For now, just ignore this Alloca if the alignment is large. - if (G->getAlignment() > RedzoneSize) return false; + if (G->getAlignment() > RedzoneSize()) return false; // Ignore all the globals with the names starting with "\01L_OBJC_". // Many of those are put into the .cstring section. The linker compresses @@ -598,10 +751,41 @@ bool AddressSanitizer::ShouldInstrumentGlobal(GlobalVariable *G) { return true; } +void AddressSanitizerModule::initializeCallbacks(Module &M) { + IRBuilder<> IRB(*C); + // Declare our poisoning and unpoisoning functions. + AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( + kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); + AsanPoisonGlobals->setLinkage(Function::ExternalLinkage); + AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( + kAsanUnpoisonGlobalsName, IRB.getVoidTy(), NULL)); + AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage); + // Declare functions that register/unregister globals. + AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( + kAsanRegisterGlobalsName, IRB.getVoidTy(), + IntptrTy, IntptrTy, NULL)); + AsanRegisterGlobals->setLinkage(Function::ExternalLinkage); + AsanUnregisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( + kAsanUnregisterGlobalsName, + IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); + AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage); +} + // This function replaces all global variables with new variables that have // trailing redzones. It also creates a function that poisons // redzones and inserts this function into llvm.global_ctors. -bool AddressSanitizer::insertGlobalRedzones(Module &M) { +bool AddressSanitizerModule::runOnModule(Module &M) { + if (!ClGlobals) return false; + TD = getAnalysisIfAvailable<DataLayout>(); + if (!TD) + return false; + BL.reset(new BlackList(BlacklistFile)); + if (BL->isIn(M)) return false; + C = &(M.getContext()); + IntptrTy = Type::getIntNTy(*C, TD->getPointerSizeInBits()); + initializeCallbacks(M); + DynamicallyInitializedGlobals.Init(M); + SmallVector<GlobalVariable *, 16> GlobalsToChange; for (Module::GlobalListType::iterator G = M.global_begin(), @@ -625,10 +809,10 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { IntptrTy, NULL); SmallVector<Constant *, 16> Initializers(n), DynamicInit; - IRBuilder<> IRB(CtorInsertBefore); - if (ClInitializers) - FindDynamicInitializers(M); + Function *CtorFunc = M.getFunction(kAsanModuleCtorName); + assert(CtorFunc); + IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator()); // The addresses of the first and last dynamically initialized globals in // this TU. Used in initialization order checking. @@ -639,11 +823,12 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { PointerType *PtrTy = cast<PointerType>(G->getType()); Type *Ty = PtrTy->getElementType(); uint64_t SizeInBytes = TD->getTypeAllocSize(Ty); - uint64_t RightRedzoneSize = RedzoneSize + - (RedzoneSize - (SizeInBytes % RedzoneSize)); + size_t RZ = RedzoneSize(); + uint64_t RightRedzoneSize = RZ + (RZ - (SizeInBytes % RZ)); Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); // Determine whether this global should be poisoned in initialization. - bool GlobalHasDynamicInitializer = HasDynamicInitializer(G); + bool GlobalHasDynamicInitializer = + DynamicallyInitializedGlobals.Contains(G); // Don't check initialization order if this global is blacklisted. GlobalHasDynamicInitializer &= !BL->isInInit(*G); @@ -663,7 +848,7 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { M, NewTy, G->isConstant(), G->getLinkage(), NewInitializer, "", G, G->getThreadLocalMode()); NewGlobal->copyAttributesFrom(G); - NewGlobal->setAlignment(RedzoneSize); + NewGlobal->setAlignment(RZ); Value *Indices2[2]; Indices2[0] = IRB.getInt32(0); @@ -684,13 +869,13 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { NULL); // Populate the first and last globals declared in this TU. - if (ClInitializers && GlobalHasDynamicInitializer) { + if (CheckInitOrder && GlobalHasDynamicInitializer) { LastDynamic = ConstantExpr::getPointerCast(NewGlobal, IntptrTy); if (FirstDynamic == 0) FirstDynamic = LastDynamic; } - DEBUG(dbgs() << "NEW GLOBAL:\n" << *NewGlobal); + DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); } ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n); @@ -699,14 +884,8 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { ConstantArray::get(ArrayOfGlobalStructTy, Initializers), ""); // Create calls for poisoning before initializers run and unpoisoning after. - if (ClInitializers && FirstDynamic && LastDynamic) + if (CheckInitOrder && FirstDynamic && LastDynamic) createInitializerPoisonCalls(M, FirstDynamic, LastDynamic); - - Function *AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( - kAsanRegisterGlobalsName, IRB.getVoidTy(), - IntptrTy, IntptrTy, NULL)); - AsanRegisterGlobals->setLinkage(Function::ExternalLinkage); - IRB.CreateCall2(AsanRegisterGlobals, IRB.CreatePointerCast(AllGlobals, IntptrTy), ConstantInt::get(IntptrTy, n)); @@ -718,12 +897,6 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { GlobalValue::InternalLinkage, kAsanModuleDtorName, &M); BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB)); - Function *AsanUnregisterGlobals = - checkInterfaceFunction(M.getOrInsertFunction( - kAsanUnregisterGlobalsName, - IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); - AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage); - IRB_Dtor.CreateCall2(AsanUnregisterGlobals, IRB.CreatePointerCast(AllGlobals, IntptrTy), ConstantInt::get(IntptrTy, n)); @@ -733,49 +906,55 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { return true; } +void AddressSanitizer::initializeCallbacks(Module &M) { + IRBuilder<> IRB(*C); + // Create __asan_report* callbacks. + for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { + for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; + AccessSizeIndex++) { + // IsWrite and TypeSize are encoded in the function name. + std::string FunctionName = std::string(kAsanReportErrorTemplate) + + (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex); + // If we are merging crash callbacks, they have two parameters. + AsanErrorCallback[AccessIsWrite][AccessSizeIndex] = + checkInterfaceFunction(M.getOrInsertFunction( + FunctionName, IRB.getVoidTy(), IntptrTy, NULL)); + } + } + + AsanHandleNoReturnFunc = checkInterfaceFunction(M.getOrInsertFunction( + kAsanHandleNoReturnName, IRB.getVoidTy(), NULL)); + // We insert an empty inline asm after __asan_report* to avoid callback merge. + EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), + StringRef(""), StringRef(""), + /*hasSideEffects=*/true); +} + // virtual -bool AddressSanitizer::runOnModule(Module &M) { +bool AddressSanitizer::doInitialization(Module &M) { // Initialize the private fields. No one has accessed them before. - TD = getAnalysisIfAvailable<TargetData>(); + TD = getAnalysisIfAvailable<DataLayout>(); + if (!TD) return false; - BL.reset(new BlackList(ClBlackListFile)); + BL.reset(new BlackList(BlacklistFile)); + DynamicallyInitializedGlobals.Init(M); C = &(M.getContext()); LongSize = TD->getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); - IntptrPtrTy = PointerType::get(IntptrTy, 0); AsanCtorFunction = Function::Create( FunctionType::get(Type::getVoidTy(*C), false), GlobalValue::InternalLinkage, kAsanModuleCtorName, &M); BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction); - CtorInsertBefore = ReturnInst::Create(*C, AsanCtorBB); - // call __asan_init in the module ctor. - IRBuilder<> IRB(CtorInsertBefore); + IRBuilder<> IRB(ReturnInst::Create(*C, AsanCtorBB)); AsanInitFunction = checkInterfaceFunction( M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), NULL)); AsanInitFunction->setLinkage(Function::ExternalLinkage); IRB.CreateCall(AsanInitFunction); - // Create __asan_report* callbacks. - for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { - for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; - AccessSizeIndex++) { - // IsWrite and TypeSize are encoded in the function name. - std::string FunctionName = std::string(kAsanReportErrorTemplate) + - (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex); - // If we are merging crash callbacks, they have two parameters. - AsanErrorCallback[AccessIsWrite][AccessSizeIndex] = cast<Function>( - M.getOrInsertFunction(FunctionName, IRB.getVoidTy(), IntptrTy, NULL)); - } - } - // We insert an empty inline asm after __asan_report* to avoid callback merge. - EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), - StringRef(""), StringRef(""), - /*hasSideEffects=*/true); - llvm::Triple targetTriple(M.getTargetTriple()); bool isAndroid = targetTriple.getEnvironment() == llvm::Triple::Android; @@ -789,18 +968,7 @@ bool AddressSanitizer::runOnModule(Module &M) { MappingOffset = 1ULL << ClMappingOffsetLog; } } - MappingScale = kDefaultShadowScale; - if (ClMappingScale) { - MappingScale = ClMappingScale; - } - // Redzone used for stack and globals is at least 32 bytes. - // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. - RedzoneSize = std::max(32, (int)(1 << MappingScale)); - bool Res = false; - - if (ClGlobals) - Res |= insertGlobalRedzones(M); if (ClMappingOffsetLog >= 0) { // Tell the run-time the current values of mapping offset and scale. @@ -814,21 +982,15 @@ bool AddressSanitizer::runOnModule(Module &M) { if (ClMappingScale) { GlobalValue *asan_mapping_scale = new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage, - ConstantInt::get(IntptrTy, MappingScale), + ConstantInt::get(IntptrTy, MappingScale()), kAsanMappingScaleName); // Read the global, otherwise it may be optimized away. IRB.CreateLoad(asan_mapping_scale, true); } - - for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { - if (F->isDeclaration()) continue; - Res |= handleFunction(M, *F); - } - appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndCtorPriority); - return Res; + return true; } bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { @@ -847,19 +1009,24 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { return false; } -bool AddressSanitizer::handleFunction(Module &M, Function &F) { +bool AddressSanitizer::runOnFunction(Function &F) { if (BL->isIn(F)) return false; if (&F == AsanCtorFunction) return false; + DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); + initializeCallbacks(*F.getParent()); // If needed, insert __asan_init before checking for AddressSafety attr. maybeInsertAsanInitAtFunctionEntry(F); - if (!F.hasFnAttr(Attribute::AddressSafety)) return false; + if (!F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::AddressSafety)) + return false; if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) return false; - // We want to instrument every address only once per basic block - // (unless there are calls between uses). + + // We want to instrument every address only once per basic block (unless there + // are calls between uses). SmallSet<Value*, 16> TempsToInstrument; SmallVector<Instruction*, 16> ToInstrument; SmallVector<Instruction*, 8> NoReturnCalls; @@ -897,8 +1064,6 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) { } } - AsanFunctionContext AFC(F); - // Instrument. int NumInstrumented = 0; for (size_t i = 0, n = ToInstrument.size(); i != n; i++) { @@ -906,25 +1071,24 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) { if (ClDebugMin < 0 || ClDebugMax < 0 || (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) { if (isInterestingMemoryAccess(Inst, &IsWrite)) - instrumentMop(AFC, Inst); + instrumentMop(Inst); else - instrumentMemIntrinsic(AFC, cast<MemIntrinsic>(Inst)); + instrumentMemIntrinsic(cast<MemIntrinsic>(Inst)); } NumInstrumented++; } - DEBUG(dbgs() << F); - - bool ChangedStack = poisonStackInFunction(M, F); + FunctionStackPoisoner FSP(F, *this); + bool ChangedStack = FSP.runOnFunction(); // We must unpoison the stack before every NoReturn call (throw, _exit, etc). // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37 for (size_t i = 0, n = NoReturnCalls.size(); i != n; i++) { Instruction *CI = NoReturnCalls[i]; IRBuilder<> IRB(CI); - IRB.CreateCall(M.getOrInsertFunction(kAsanHandleNoReturnName, - IRB.getVoidTy(), NULL)); + IRB.CreateCall(AsanHandleNoReturnFunc); } + DEBUG(dbgs() << "ASAN done instrumenting:\n" << F << "\n"); return NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty(); } @@ -940,10 +1104,10 @@ static uint64_t ValueForPoison(uint64_t PoisonByte, size_t ShadowRedzoneSize) { static void PoisonShadowPartialRightRedzone(uint8_t *Shadow, size_t Size, - size_t RedzoneSize, + size_t RZSize, size_t ShadowGranularity, uint8_t Magic) { - for (size_t i = 0; i < RedzoneSize; + for (size_t i = 0; i < RZSize; i+= ShadowGranularity, Shadow++) { if (i + ShadowGranularity <= Size) { *Shadow = 0; // fully addressable @@ -955,10 +1119,35 @@ static void PoisonShadowPartialRightRedzone(uint8_t *Shadow, } } -void AddressSanitizer::PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec, - IRBuilder<> IRB, - Value *ShadowBase, bool DoPoison) { - size_t ShadowRZSize = RedzoneSize >> MappingScale; +// Workaround for bug 11395: we don't want to instrument stack in functions +// with large assembly blobs (32-bit only), otherwise reg alloc may crash. +// FIXME: remove once the bug 11395 is fixed. +bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { + if (LongSize != 32) return false; + CallInst *CI = dyn_cast<CallInst>(I); + if (!CI || !CI->isInlineAsm()) return false; + if (CI->getNumArgOperands() <= 5) return false; + // We have inline assembly with quite a few arguments. + return true; +} + +void FunctionStackPoisoner::initializeCallbacks(Module &M) { + IRBuilder<> IRB(*C); + AsanStackMallocFunc = checkInterfaceFunction(M.getOrInsertFunction( + kAsanStackMallocName, IntptrTy, IntptrTy, IntptrTy, NULL)); + AsanStackFreeFunc = checkInterfaceFunction(M.getOrInsertFunction( + kAsanStackFreeName, IRB.getVoidTy(), + IntptrTy, IntptrTy, IntptrTy, NULL)); + AsanPoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction( + kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); + AsanUnpoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction( + kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); +} + +void FunctionStackPoisoner::poisonRedZones( + const ArrayRef<AllocaInst*> &AllocaVec, IRBuilder<> IRB, Value *ShadowBase, + bool DoPoison) { + size_t ShadowRZSize = RedzoneSize() >> MappingScale(); assert(ShadowRZSize >= 1 && ShadowRZSize <= 4); Type *RZTy = Type::getIntNTy(*C, ShadowRZSize * 8); Type *RZPtrTy = PointerType::get(RZTy, 0); @@ -974,12 +1163,12 @@ void AddressSanitizer::PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec, IRB.CreateStore(PoisonLeft, IRB.CreateIntToPtr(ShadowBase, RZPtrTy)); // poison all other red zones. - uint64_t Pos = RedzoneSize; + uint64_t Pos = RedzoneSize(); for (size_t i = 0, n = AllocaVec.size(); i < n; i++) { AllocaInst *AI = AllocaVec[i]; uint64_t SizeInBytes = getAllocaSizeInBytes(AI); uint64_t AlignedSize = getAlignedAllocaSize(AI); - assert(AlignedSize - SizeInBytes < RedzoneSize); + assert(AlignedSize - SizeInBytes < RedzoneSize()); Value *Ptr = NULL; Pos += AlignedSize; @@ -989,13 +1178,13 @@ void AddressSanitizer::PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec, // Poison the partial redzone at right Ptr = IRB.CreateAdd( ShadowBase, ConstantInt::get(IntptrTy, - (Pos >> MappingScale) - ShadowRZSize)); - size_t AddressableBytes = RedzoneSize - (AlignedSize - SizeInBytes); + (Pos >> MappingScale()) - ShadowRZSize)); + size_t AddressableBytes = RedzoneSize() - (AlignedSize - SizeInBytes); uint32_t Poison = 0; if (DoPoison) { PoisonShadowPartialRightRedzone((uint8_t*)&Poison, AddressableBytes, - RedzoneSize, - 1ULL << MappingScale, + RedzoneSize(), + 1ULL << MappingScale(), kAsanStackPartialRedzoneMagic); } Value *PartialPoison = ConstantInt::get(RZTy, Poison); @@ -1004,76 +1193,23 @@ void AddressSanitizer::PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec, // Poison the full redzone at right. Ptr = IRB.CreateAdd(ShadowBase, - ConstantInt::get(IntptrTy, Pos >> MappingScale)); - Value *Poison = i == AllocaVec.size() - 1 ? PoisonRight : PoisonMid; + ConstantInt::get(IntptrTy, Pos >> MappingScale())); + bool LastAlloca = (i == AllocaVec.size() - 1); + Value *Poison = LastAlloca ? PoisonRight : PoisonMid; IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, RZPtrTy)); - Pos += RedzoneSize; + Pos += RedzoneSize(); } } -// Workaround for bug 11395: we don't want to instrument stack in functions -// with large assembly blobs (32-bit only), otherwise reg alloc may crash. -// FIXME: remove once the bug 11395 is fixed. -bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { - if (LongSize != 32) return false; - CallInst *CI = dyn_cast<CallInst>(I); - if (!CI || !CI->isInlineAsm()) return false; - if (CI->getNumArgOperands() <= 5) return false; - // We have inline assembly with quite a few arguments. - return true; -} +void FunctionStackPoisoner::poisonStack() { + uint64_t LocalStackSize = TotalStackSize + + (AllocaVec.size() + 1) * RedzoneSize(); -// Find all static Alloca instructions and put -// poisoned red zones around all of them. -// Then unpoison everything back before the function returns. -// -// Stack poisoning does not play well with exception handling. -// When an exception is thrown, we essentially bypass the code -// that unpoisones the stack. This is why the run-time library has -// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire -// stack in the interceptor. This however does not work inside the -// actual function which catches the exception. Most likely because the -// compiler hoists the load of the shadow value somewhere too high. -// This causes asan to report a non-existing bug on 453.povray. -// It sounds like an LLVM bug. -bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) { - if (!ClStack) return false; - SmallVector<AllocaInst*, 16> AllocaVec; - SmallVector<Instruction*, 8> RetVec; - uint64_t TotalSize = 0; - - // Filter out Alloca instructions we want (and can) handle. - // Collect Ret instructions. - for (Function::iterator FI = F.begin(), FE = F.end(); - FI != FE; ++FI) { - BasicBlock &BB = *FI; - for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); - BI != BE; ++BI) { - if (isa<ReturnInst>(BI)) { - RetVec.push_back(BI); - continue; - } - - AllocaInst *AI = dyn_cast<AllocaInst>(BI); - if (!AI) continue; - if (AI->isArrayAllocation()) continue; - if (!AI->isStaticAlloca()) continue; - if (!AI->getAllocatedType()->isSized()) continue; - if (AI->getAlignment() > RedzoneSize) continue; - AllocaVec.push_back(AI); - uint64_t AlignedSize = getAlignedAllocaSize(AI); - TotalSize += AlignedSize; - } - } - - if (AllocaVec.empty()) return false; - - uint64_t LocalStackSize = TotalSize + (AllocaVec.size() + 1) * RedzoneSize; - - bool DoStackMalloc = ClUseAfterReturn + bool DoStackMalloc = ASan.CheckUseAfterReturn && LocalStackSize <= kMaxStackMallocSize; + assert(AllocaVec.size() > 0); Instruction *InsBefore = AllocaVec[0]; IRBuilder<> IRB(InsBefore); @@ -1081,14 +1217,14 @@ bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) { Type *ByteArrayTy = ArrayType::get(IRB.getInt8Ty(), LocalStackSize); AllocaInst *MyAlloca = new AllocaInst(ByteArrayTy, "MyAlloca", InsBefore); - MyAlloca->setAlignment(RedzoneSize); + if (ClRealignStack && StackAlignment < RedzoneSize()) + StackAlignment = RedzoneSize(); + MyAlloca->setAlignment(StackAlignment); assert(MyAlloca->isStaticAlloca()); Value *OrigStackBase = IRB.CreatePointerCast(MyAlloca, IntptrTy); Value *LocalStackBase = OrigStackBase; if (DoStackMalloc) { - Value *AsanStackMallocFunc = M.getOrInsertFunction( - kAsanStackMallocName, IntptrTy, IntptrTy, IntptrTy, NULL); LocalStackBase = IRB.CreateCall2(AsanStackMallocFunc, ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase); } @@ -1098,7 +1234,19 @@ bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) { raw_svector_ostream StackDescription(StackDescriptionStorage); StackDescription << F.getName() << " " << AllocaVec.size() << " "; - uint64_t Pos = RedzoneSize; + // Insert poison calls for lifetime intrinsics for alloca. + bool HavePoisonedAllocas = false; + for (size_t i = 0, n = AllocaPoisonCallVec.size(); i < n; i++) { + const AllocaPoisonCall &APC = AllocaPoisonCallVec[i]; + IntrinsicInst *II = APC.InsBefore; + AllocaInst *AI = findAllocaForValue(II->getArgOperand(1)); + assert(AI); + IRBuilder<> IRB(II); + poisonAlloca(AI, APC.Size, IRB, APC.DoPoison); + HavePoisonedAllocas |= APC.DoPoison; + } + + uint64_t Pos = RedzoneSize(); // Replace Alloca instructions with base+offset. for (size_t i = 0, n = AllocaVec.size(); i < n; i++) { AllocaInst *AI = AllocaVec[i]; @@ -1107,12 +1255,13 @@ bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) { StackDescription << Pos << " " << SizeInBytes << " " << Name.size() << " " << Name << " "; uint64_t AlignedSize = getAlignedAllocaSize(AI); - assert((AlignedSize % RedzoneSize) == 0); - AI->replaceAllUsesWith( - IRB.CreateIntToPtr( + assert((AlignedSize % RedzoneSize()) == 0); + Value *NewAllocaPtr = IRB.CreateIntToPtr( IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Pos)), - AI->getType())); - Pos += AlignedSize + RedzoneSize; + AI->getType()); + replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB); + AI->replaceAllUsesWith(NewAllocaPtr); + Pos += AlignedSize + RedzoneSize(); } assert(Pos == LocalStackSize); @@ -1121,45 +1270,93 @@ bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) { IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic), BasePlus0); Value *BasePlus1 = IRB.CreateAdd(LocalStackBase, - ConstantInt::get(IntptrTy, LongSize/8)); + ConstantInt::get(IntptrTy, + ASan.LongSize/8)); BasePlus1 = IRB.CreateIntToPtr(BasePlus1, IntptrPtrTy); - Value *Description = IRB.CreatePointerCast( - createPrivateGlobalForString(M, StackDescription.str()), - IntptrTy); + GlobalVariable *StackDescriptionGlobal = + createPrivateGlobalForString(*F.getParent(), StackDescription.str()); + Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, + IntptrTy); IRB.CreateStore(Description, BasePlus1); // Poison the stack redzones at the entry. - Value *ShadowBase = memToShadow(LocalStackBase, IRB); - PoisonStack(ArrayRef<AllocaInst*>(AllocaVec), IRB, ShadowBase, true); - - Value *AsanStackFreeFunc = NULL; - if (DoStackMalloc) { - AsanStackFreeFunc = M.getOrInsertFunction( - kAsanStackFreeName, IRB.getVoidTy(), - IntptrTy, IntptrTy, IntptrTy, NULL); - } + Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); + poisonRedZones(AllocaVec, IRB, ShadowBase, true); // Unpoison the stack before all ret instructions. for (size_t i = 0, n = RetVec.size(); i < n; i++) { Instruction *Ret = RetVec[i]; IRBuilder<> IRBRet(Ret); - // Mark the current frame as retired. IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), BasePlus0); // Unpoison the stack. - PoisonStack(ArrayRef<AllocaInst*>(AllocaVec), IRBRet, ShadowBase, false); - + poisonRedZones(AllocaVec, IRBRet, ShadowBase, false); if (DoStackMalloc) { + // In use-after-return mode, mark the whole stack frame unaddressable. IRBRet.CreateCall3(AsanStackFreeFunc, LocalStackBase, ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase); + } else if (HavePoisonedAllocas) { + // If we poisoned some allocas in llvm.lifetime analysis, + // unpoison whole stack frame now. + assert(LocalStackBase == OrigStackBase); + poisonAlloca(LocalStackBase, LocalStackSize, IRBRet, false); } } - if (ClDebugStack) { - DEBUG(dbgs() << F); - } + // We are done. Remove the old unused alloca instructions. + for (size_t i = 0, n = AllocaVec.size(); i < n; i++) + AllocaVec[i]->eraseFromParent(); +} - return true; +void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, + IRBuilder<> IRB, bool DoPoison) { + // For now just insert the call to ASan runtime. + Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); + Value *SizeArg = ConstantInt::get(IntptrTy, Size); + IRB.CreateCall2(DoPoison ? AsanPoisonStackMemoryFunc + : AsanUnpoisonStackMemoryFunc, + AddrArg, SizeArg); +} + +// Handling llvm.lifetime intrinsics for a given %alloca: +// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca. +// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect +// invalid accesses) and unpoison it for llvm.lifetime.start (the memory +// could be poisoned by previous llvm.lifetime.end instruction, as the +// variable may go in and out of scope several times, e.g. in loops). +// (3) if we poisoned at least one %alloca in a function, +// unpoison the whole stack frame at function exit. + +AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) { + if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) + // We're intested only in allocas we can handle. + return isInterestingAlloca(*AI) ? AI : 0; + // See if we've already calculated (or started to calculate) alloca for a + // given value. + AllocaForValueMapTy::iterator I = AllocaForValue.find(V); + if (I != AllocaForValue.end()) + return I->second; + // Store 0 while we're calculating alloca for value V to avoid + // infinite recursion if the value references itself. + AllocaForValue[V] = 0; + AllocaInst *Res = 0; + if (CastInst *CI = dyn_cast<CastInst>(V)) + Res = findAllocaForValue(CI->getOperand(0)); + else if (PHINode *PN = dyn_cast<PHINode>(V)) { + for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { + Value *IncValue = PN->getIncomingValue(i); + // Allow self-referencing phi-nodes. + if (IncValue == PN) continue; + AllocaInst *IncValueAI = findAllocaForValue(IncValue); + // AI for incoming values should exist and should all be equal. + if (IncValueAI == 0 || (Res != 0 && IncValueAI != Res)) + return 0; + Res = IncValueAI; + } + } + if (Res != 0) + AllocaForValue[V] = Res; + return Res; } diff --git a/lib/Transforms/Instrumentation/BlackList.cpp b/lib/Transforms/Instrumentation/BlackList.cpp index 2cb1199..4fcbea4 100644 --- a/lib/Transforms/Instrumentation/BlackList.cpp +++ b/lib/Transforms/Instrumentation/BlackList.cpp @@ -13,26 +13,26 @@ // //===----------------------------------------------------------------------===// -#include <utility> -#include <string> - #include "BlackList.h" #include "llvm/ADT/OwningPtr.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" -#include "llvm/Function.h" -#include "llvm/GlobalVariable.h" -#include "llvm/Module.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Module.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Regex.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/system_error.h" +#include <string> +#include <utility> namespace llvm { BlackList::BlackList(const StringRef Path) { // Validate and open blacklist file. - if (!Path.size()) return; + if (Path.empty()) return; OwningPtr<MemoryBuffer> File; if (error_code EC = MemoryBuffer::getFile(Path, File)) { report_fatal_error("Can't open blacklist file: " + Path + ": " + @@ -45,10 +45,17 @@ BlackList::BlackList(const StringRef Path) { StringMap<std::string> Regexps; for (SmallVector<StringRef, 16>::iterator I = Lines.begin(), E = Lines.end(); I != E; ++I) { + // Ignore empty lines and lines starting with "#" + if (I->empty() || I->startswith("#")) + continue; // Get our prefix and unparsed regexp. std::pair<StringRef, StringRef> SplitLine = I->split(":"); StringRef Prefix = SplitLine.first; std::string Regexp = SplitLine.second; + if (Regexp.empty()) { + // Missing ':' in the line. + report_fatal_error("malformed blacklist line: " + SplitLine.first); + } // Replace * with .* for (size_t pos = 0; (pos = Regexp.find("*", pos)) != std::string::npos; @@ -65,7 +72,7 @@ BlackList::BlackList(const StringRef Path) { } // Add this regexp into the proper group by its prefix. - if (Regexps[Prefix].size()) + if (!Regexps[Prefix].empty()) Regexps[Prefix] += "|"; Regexps[Prefix] += Regexp; } @@ -89,14 +96,29 @@ bool BlackList::isIn(const Module &M) { return inSection("src", M.getModuleIdentifier()); } +static StringRef GetGVTypeString(const GlobalVariable &G) { + // Types of GlobalVariables are always pointer types. + Type *GType = G.getType()->getElementType(); + // For now we support blacklisting struct types only. + if (StructType *SGType = dyn_cast<StructType>(GType)) { + if (!SGType->isLiteral()) + return SGType->getName(); + } + return "<unknown type>"; +} + bool BlackList::isInInit(const GlobalVariable &G) { - return isIn(*G.getParent()) || inSection("global-init", G.getName()); + return (isIn(*G.getParent()) || + inSection("global-init", G.getName()) || + inSection("global-init-type", GetGVTypeString(G))); } -bool BlackList::inSection(const StringRef Section, - const StringRef Query) { - Regex *FunctionRegex = Entries[Section]; - return FunctionRegex ? FunctionRegex->match(Query) : false; +bool BlackList::inSection(const StringRef Section, const StringRef Query) { + StringMap<Regex*>::iterator I = Entries.find(Section); + if (I == Entries.end()) return false; + + Regex *FunctionRegex = I->getValue(); + return FunctionRegex->match(Query); } } // namespace llvm diff --git a/lib/Transforms/Instrumentation/BlackList.h b/lib/Transforms/Instrumentation/BlackList.h index 73977fc..ee18a98 100644 --- a/lib/Transforms/Instrumentation/BlackList.h +++ b/lib/Transforms/Instrumentation/BlackList.h @@ -12,10 +12,13 @@ // // The blacklist disables instrumentation of various functions and global // variables. Each line contains a prefix, followed by a wild card expression. +// Empty lines and lines starting with "#" are ignored. // --- +// # Blacklisted items: // fun:*_ZN4base6subtle* // global:*global_with_bad_access_or_initialization* // global-init:*global_with_initialization_issues* +// global-init-type:*Namespace::ClassName* // src:file_with_tricky_code.cc // --- // Note that the wild card is in fact an llvm::Regex, but * is automatically diff --git a/lib/Transforms/Instrumentation/BoundsChecking.cpp b/lib/Transforms/Instrumentation/BoundsChecking.cpp index 6429081..b094d42 100644 --- a/lib/Transforms/Instrumentation/BoundsChecking.cpp +++ b/lib/Transforms/Instrumentation/BoundsChecking.cpp @@ -13,19 +13,19 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "bounds-checking" -#include "llvm/IRBuilder.h" -#include "llvm/Intrinsics.h" -#include "llvm/Pass.h" +#include "llvm/Transforms/Instrumentation.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/MemoryBuiltins.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/InstIterator.h" #include "llvm/Support/TargetFolder.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" #include "llvm/Target/TargetLibraryInfo.h" -#include "llvm/Transforms/Instrumentation.h" using namespace llvm; static cl::opt<bool> SingleTrapBB("bounds-checking-single-trap", @@ -41,25 +41,24 @@ namespace { struct BoundsChecking : public FunctionPass { static char ID; - BoundsChecking(unsigned _Penalty = 5) : FunctionPass(ID), Penalty(_Penalty){ + BoundsChecking() : FunctionPass(ID) { initializeBoundsCheckingPass(*PassRegistry::getPassRegistry()); } virtual bool runOnFunction(Function &F); virtual void getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired<TargetData>(); + AU.addRequired<DataLayout>(); AU.addRequired<TargetLibraryInfo>(); } private: - const TargetData *TD; + const DataLayout *TD; const TargetLibraryInfo *TLI; ObjectSizeOffsetEvaluator *ObjSizeEval; BuilderTy *Builder; Instruction *Inst; BasicBlock *TrapBB; - unsigned Penalty; BasicBlock *getTrapBB(); void emitBranchToTrap(Value *Cmp = 0); @@ -109,6 +108,7 @@ void BoundsChecking::emitBranchToTrap(Value *Cmp) { else Cmp = 0; // unconditional branch } + ++ChecksAdded; Instruction *Inst = Builder->GetInsertPoint(); BasicBlock *OldBB = Inst->getParent(); @@ -143,7 +143,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) { Value *Offset = SizeOffset.second; ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size); - IntegerType *IntTy = TD->getIntPtrType(Inst->getContext()); + Type *IntTy = TD->getIntPtrType(Ptr->getType()); Value *NeededSizeVal = ConstantInt::get(IntTy, NeededSize); // three checks are required to ensure safety: @@ -163,12 +163,11 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) { } emitBranchToTrap(Or); - ++ChecksAdded; return true; } bool BoundsChecking::runOnFunction(Function &F) { - TD = &getAnalysis<TargetData>(); + TD = &getAnalysis<DataLayout>(); TLI = &getAnalysis<TargetLibraryInfo>(); TrapBB = 0; @@ -208,6 +207,6 @@ bool BoundsChecking::runOnFunction(Function &F) { return MadeChange; } -FunctionPass *llvm::createBoundsCheckingPass(unsigned Penalty) { - return new BoundsChecking(Penalty); +FunctionPass *llvm::createBoundsCheckingPass() { + return new BoundsChecking(); } diff --git a/lib/Transforms/Instrumentation/CMakeLists.txt b/lib/Transforms/Instrumentation/CMakeLists.txt index 058f68c..1c9e053 100644 --- a/lib/Transforms/Instrumentation/CMakeLists.txt +++ b/lib/Transforms/Instrumentation/CMakeLists.txt @@ -4,6 +4,7 @@ add_llvm_library(LLVMInstrumentation BoundsChecking.cpp EdgeProfiling.cpp GCOVProfiling.cpp + MemorySanitizer.cpp Instrumentation.cpp OptimalEdgeProfiling.cpp PathProfiling.cpp diff --git a/lib/Transforms/Instrumentation/EdgeProfiling.cpp b/lib/Transforms/Instrumentation/EdgeProfiling.cpp index e8ef265..0b18b4c 100644 --- a/lib/Transforms/Instrumentation/EdgeProfiling.cpp +++ b/lib/Transforms/Instrumentation/EdgeProfiling.cpp @@ -18,13 +18,14 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "insert-edge-profiling" +#include "llvm/Transforms/Instrumentation.h" #include "ProfilingUtils.h" -#include "llvm/Module.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" -#include "llvm/Transforms/Instrumentation.h" -#include "llvm/ADT/Statistic.h" #include <set> using namespace llvm; @@ -54,8 +55,8 @@ ModulePass *llvm::createEdgeProfilerPass() { return new EdgeProfiler(); } bool EdgeProfiler::runOnModule(Module &M) { Function *Main = M.getFunction("main"); if (Main == 0) { - errs() << "WARNING: cannot insert edge profiling into a module" - << " with no main function!\n"; + M.getContext().emitWarning("cannot insert edge profiling into a module" + " with no main function"); return false; // No main, no instrumentation! } diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp index 9fcde31..eb0dc1e 100644 --- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp +++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp @@ -16,19 +16,19 @@ #define DEBUG_TYPE "insert-gcov-profiling" -#include "ProfilingUtils.h" #include "llvm/Transforms/Instrumentation.h" -#include "llvm/DebugInfo.h" -#include "llvm/IRBuilder.h" -#include "llvm/Instructions.h" -#include "llvm/Module.h" -#include "llvm/Pass.h" +#include "ProfilingUtils.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/UniqueVector.h" +#include "llvm/DebugInfo.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Module.h" +#include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/DebugLoc.h" #include "llvm/Support/InstIterator.h" @@ -45,13 +45,14 @@ namespace { static char ID; GCOVProfiler() : ModulePass(ID), EmitNotes(true), EmitData(true), Use402Format(false), - UseExtraChecksum(false) { + UseExtraChecksum(false), NoRedZone(false) { initializeGCOVProfilerPass(*PassRegistry::getPassRegistry()); } - GCOVProfiler(bool EmitNotes, bool EmitData, bool use402Format = false, - bool useExtraChecksum = false) + GCOVProfiler(bool EmitNotes, bool EmitData, bool use402Format, + bool useExtraChecksum, bool NoRedZone_) : ModulePass(ID), EmitNotes(EmitNotes), EmitData(EmitData), - Use402Format(use402Format), UseExtraChecksum(useExtraChecksum) { + Use402Format(use402Format), UseExtraChecksum(useExtraChecksum), + NoRedZone(NoRedZone_) { assert((EmitNotes || EmitData) && "GCOVProfiler asked to do nothing?"); initializeGCOVProfilerPass(*PassRegistry::getPassRegistry()); } @@ -90,6 +91,7 @@ namespace { // list. void insertCounterWriteout(ArrayRef<std::pair<GlobalVariable*, MDNode*> >); void insertIndirectCounterIncrement(); + void insertFlush(ArrayRef<std::pair<GlobalVariable*, MDNode*> >); std::string mangleName(DICompileUnit CU, const char *NewStem); @@ -97,6 +99,7 @@ namespace { bool EmitData; bool Use402Format; bool UseExtraChecksum; + bool NoRedZone; Module *M; LLVMContext *Ctx; @@ -109,8 +112,10 @@ INITIALIZE_PASS(GCOVProfiler, "insert-gcov-profiling", ModulePass *llvm::createGCOVProfilerPass(bool EmitNotes, bool EmitData, bool Use402Format, - bool UseExtraChecksum) { - return new GCOVProfiler(EmitNotes, EmitData, Use402Format, UseExtraChecksum); + bool UseExtraChecksum, + bool NoRedZone) { + return new GCOVProfiler(EmitNotes, EmitData, Use402Format, UseExtraChecksum, + NoRedZone); } namespace { @@ -518,6 +523,7 @@ bool GCOVProfiler::emitProfileArcs() { } insertCounterWriteout(CountersBySP); + insertFlush(CountersBySP); } if (InsertIndCounterIncrCode) @@ -538,13 +544,13 @@ GlobalVariable *GCOVProfiler::buildEdgeLookupTable( // read it. Threads and invoke make this untrue. // emit [(succs * preds) x i64*], logically [succ x [pred x i64*]]. + size_t TableSize = Succs.size() * Preds.size(); Type *Int64PtrTy = Type::getInt64PtrTy(*Ctx); - ArrayType *EdgeTableTy = ArrayType::get( - Int64PtrTy, Succs.size() * Preds.size()); + ArrayType *EdgeTableTy = ArrayType::get(Int64PtrTy, TableSize); - Constant **EdgeTable = new Constant*[Succs.size() * Preds.size()]; + OwningArrayPtr<Constant *> EdgeTable(new Constant*[TableSize]); Constant *NullValue = Constant::getNullValue(Int64PtrTy); - for (int i = 0, ie = Succs.size() * Preds.size(); i != ie; ++i) + for (size_t i = 0; i != TableSize; ++i) EdgeTable[i] = NullValue; unsigned Edge = 0; @@ -564,7 +570,7 @@ GlobalVariable *GCOVProfiler::buildEdgeLookupTable( Edge += Successors; } - ArrayRef<Constant*> V(&EdgeTable[0], Succs.size() * Preds.size()); + ArrayRef<Constant*> V(&EdgeTable[0], TableSize); GlobalVariable *EdgeTableGV = new GlobalVariable( *M, EdgeTableTy, true, GlobalValue::InternalLinkage, @@ -630,13 +636,17 @@ GlobalVariable *GCOVProfiler::getEdgeStateValue() { void GCOVProfiler::insertCounterWriteout( ArrayRef<std::pair<GlobalVariable *, MDNode *> > CountersBySP) { - FunctionType *WriteoutFTy = - FunctionType::get(Type::getVoidTy(*Ctx), false); - Function *WriteoutF = Function::Create(WriteoutFTy, - GlobalValue::InternalLinkage, - "__llvm_gcov_writeout", M); + FunctionType *WriteoutFTy = FunctionType::get(Type::getVoidTy(*Ctx), false); + Function *WriteoutF = M->getFunction("__llvm_gcov_writeout"); + if (!WriteoutF) + WriteoutF = Function::Create(WriteoutFTy, GlobalValue::InternalLinkage, + "__llvm_gcov_writeout", M); WriteoutF->setUnnamedAddr(true); - BasicBlock *BB = BasicBlock::Create(*Ctx, "", WriteoutF); + WriteoutF->addFnAttr(Attribute::NoInline); + if (NoRedZone) + WriteoutF->addFnAttr(Attribute::NoRedZone); + + BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", WriteoutF); IRBuilder<> Builder(BB); Constant *StartFile = getStartFileFunc(); @@ -647,8 +657,8 @@ void GCOVProfiler::insertCounterWriteout( NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu"); if (CU_Nodes) { for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) { - DICompileUnit compile_unit(CU_Nodes->getOperand(i)); - std::string FilenameGcda = mangleName(compile_unit, "gcda"); + DICompileUnit CU(CU_Nodes->getOperand(i)); + std::string FilenameGcda = mangleName(CU, "gcda"); Builder.CreateCall(StartFile, Builder.CreateGlobalStringPtr(FilenameGcda)); for (ArrayRef<std::pair<GlobalVariable *, MDNode *> >::iterator @@ -680,6 +690,8 @@ void GCOVProfiler::insertCounterWriteout( F->setUnnamedAddr(true); F->setLinkage(GlobalValue::InternalLinkage); F->addFnAttr(Attribute::NoInline); + if (NoRedZone) + F->addFnAttr(Attribute::NoRedZone); BB = BasicBlock::Create(*Ctx, "entry", F); Builder.SetInsertPoint(BB); @@ -699,6 +711,8 @@ void GCOVProfiler::insertIndirectCounterIncrement() { Fn->setUnnamedAddr(true); Fn->setLinkage(GlobalValue::InternalLinkage); Fn->addFnAttr(Attribute::NoInline); + if (NoRedZone) + Fn->addFnAttr(Attribute::NoRedZone); Type *Int32Ty = Type::getInt32Ty(*Ctx); Type *Int64Ty = Type::getInt64Ty(*Ctx); @@ -744,3 +758,45 @@ void GCOVProfiler::insertIndirectCounterIncrement() { Builder.SetInsertPoint(Exit); Builder.CreateRetVoid(); } + +void GCOVProfiler:: +insertFlush(ArrayRef<std::pair<GlobalVariable*, MDNode*> > CountersBySP) { + FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false); + Function *FlushF = M->getFunction("__gcov_flush"); + if (!FlushF) + FlushF = Function::Create(FTy, GlobalValue::InternalLinkage, + "__gcov_flush", M); + else + FlushF->setLinkage(GlobalValue::InternalLinkage); + FlushF->setUnnamedAddr(true); + FlushF->addFnAttr(Attribute::NoInline); + if (NoRedZone) + FlushF->addFnAttr(Attribute::NoRedZone); + + BasicBlock *Entry = BasicBlock::Create(*Ctx, "entry", FlushF); + + // Write out the current counters. + Constant *WriteoutF = M->getFunction("__llvm_gcov_writeout"); + assert(WriteoutF && "Need to create the writeout function first!"); + + IRBuilder<> Builder(Entry); + Builder.CreateCall(WriteoutF); + + // Zero out the counters. + for (ArrayRef<std::pair<GlobalVariable *, MDNode *> >::iterator + I = CountersBySP.begin(), E = CountersBySP.end(); + I != E; ++I) { + GlobalVariable *GV = I->first; + Constant *Null = Constant::getNullValue(GV->getType()->getElementType()); + Builder.CreateStore(Null, GV); + } + + Type *RetTy = FlushF->getReturnType(); + if (RetTy == Type::getVoidTy(*Ctx)) + Builder.CreateRetVoid(); + else if (RetTy->isIntegerTy()) + // Used if __gcov_flush was implicitly declared. + Builder.CreateRet(ConstantInt::get(RetTy, 0)); + else + report_fatal_error("invalid return type for __gcov_flush"); +} diff --git a/lib/Transforms/Instrumentation/Instrumentation.cpp b/lib/Transforms/Instrumentation/Instrumentation.cpp index 1e0b4a3..8ba1025 100644 --- a/lib/Transforms/Instrumentation/Instrumentation.cpp +++ b/lib/Transforms/Instrumentation/Instrumentation.cpp @@ -21,11 +21,13 @@ using namespace llvm; /// library. void llvm::initializeInstrumentation(PassRegistry &Registry) { initializeAddressSanitizerPass(Registry); + initializeAddressSanitizerModulePass(Registry); initializeBoundsCheckingPass(Registry); initializeEdgeProfilerPass(Registry); initializeGCOVProfilerPass(Registry); initializeOptimalEdgeProfilerPass(Registry); initializePathProfilerPass(Registry); + initializeMemorySanitizerPass(Registry); initializeThreadSanitizerPass(Registry); } diff --git a/lib/Transforms/Instrumentation/MaximumSpanningTree.h b/lib/Transforms/Instrumentation/MaximumSpanningTree.h index a4bb5a6..363539b 100644 --- a/lib/Transforms/Instrumentation/MaximumSpanningTree.h +++ b/lib/Transforms/Instrumentation/MaximumSpanningTree.h @@ -15,10 +15,10 @@ #ifndef LLVM_ANALYSIS_MAXIMUMSPANNINGTREE_H #define LLVM_ANALYSIS_MAXIMUMSPANNINGTREE_H -#include "llvm/BasicBlock.h" #include "llvm/ADT/EquivalenceClasses.h" -#include <vector> +#include "llvm/IR/BasicBlock.h" #include <algorithm> +#include <vector> namespace llvm { diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp new file mode 100644 index 0000000..58d5801 --- /dev/null +++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -0,0 +1,1857 @@ +//===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// \file +/// This file is a part of MemorySanitizer, a detector of uninitialized +/// reads. +/// +/// Status: early prototype. +/// +/// The algorithm of the tool is similar to Memcheck +/// (http://goo.gl/QKbem). We associate a few shadow bits with every +/// byte of the application memory, poison the shadow of the malloc-ed +/// or alloca-ed memory, load the shadow bits on every memory read, +/// propagate the shadow bits through some of the arithmetic +/// instruction (including MOV), store the shadow bits on every memory +/// write, report a bug on some other instructions (e.g. JMP) if the +/// associated shadow is poisoned. +/// +/// But there are differences too. The first and the major one: +/// compiler instrumentation instead of binary instrumentation. This +/// gives us much better register allocation, possible compiler +/// optimizations and a fast start-up. But this brings the major issue +/// as well: msan needs to see all program events, including system +/// calls and reads/writes in system libraries, so we either need to +/// compile *everything* with msan or use a binary translation +/// component (e.g. DynamoRIO) to instrument pre-built libraries. +/// Another difference from Memcheck is that we use 8 shadow bits per +/// byte of application memory and use a direct shadow mapping. This +/// greatly simplifies the instrumentation code and avoids races on +/// shadow updates (Memcheck is single-threaded so races are not a +/// concern there. Memcheck uses 2 shadow bits per byte with a slow +/// path storage that uses 8 bits per byte). +/// +/// The default value of shadow is 0, which means "clean" (not poisoned). +/// +/// Every module initializer should call __msan_init to ensure that the +/// shadow memory is ready. On error, __msan_warning is called. Since +/// parameters and return values may be passed via registers, we have a +/// specialized thread-local shadow for return values +/// (__msan_retval_tls) and parameters (__msan_param_tls). +/// +/// Origin tracking. +/// +/// MemorySanitizer can track origins (allocation points) of all uninitialized +/// values. This behavior is controlled with a flag (msan-track-origins) and is +/// disabled by default. +/// +/// Origins are 4-byte values created and interpreted by the runtime library. +/// They are stored in a second shadow mapping, one 4-byte value for 4 bytes +/// of application memory. Propagation of origins is basically a bunch of +/// "select" instructions that pick the origin of a dirty argument, if an +/// instruction has one. +/// +/// Every 4 aligned, consecutive bytes of application memory have one origin +/// value associated with them. If these bytes contain uninitialized data +/// coming from 2 different allocations, the last store wins. Because of this, +/// MemorySanitizer reports can show unrelated origins, but this is unlikely in +/// practice. +/// +/// Origins are meaningless for fully initialized values, so MemorySanitizer +/// avoids storing origin to memory when a fully initialized value is stored. +/// This way it avoids needless overwritting origin of the 4-byte region on +/// a short (i.e. 1 byte) clean store, and it is also good for performance. +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "msan" + +#include "llvm/Transforms/Instrumentation.h" +#include "BlackList.h" +#include "llvm/ADT/DepthFirstIterator.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/ValueMap.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/MDBuilder.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/InstVisitor.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Local.h" +#include "llvm/Transforms/Utils/ModuleUtils.h" + +using namespace llvm; + +static const uint64_t kShadowMask32 = 1ULL << 31; +static const uint64_t kShadowMask64 = 1ULL << 46; +static const uint64_t kOriginOffset32 = 1ULL << 30; +static const uint64_t kOriginOffset64 = 1ULL << 45; +static const unsigned kMinOriginAlignment = 4; +static const unsigned kShadowTLSAlignment = 8; + +/// \brief Track origins of uninitialized values. +/// +/// Adds a section to MemorySanitizer report that points to the allocation +/// (stack or heap) the uninitialized bits came from originally. +static cl::opt<bool> ClTrackOrigins("msan-track-origins", + cl::desc("Track origins (allocation sites) of poisoned memory"), + cl::Hidden, cl::init(false)); +static cl::opt<bool> ClKeepGoing("msan-keep-going", + cl::desc("keep going after reporting a UMR"), + cl::Hidden, cl::init(false)); +static cl::opt<bool> ClPoisonStack("msan-poison-stack", + cl::desc("poison uninitialized stack variables"), + cl::Hidden, cl::init(true)); +static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call", + cl::desc("poison uninitialized stack variables with a call"), + cl::Hidden, cl::init(false)); +static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern", + cl::desc("poison uninitialized stack variables with the given patter"), + cl::Hidden, cl::init(0xff)); + +static cl::opt<bool> ClHandleICmp("msan-handle-icmp", + cl::desc("propagate shadow through ICmpEQ and ICmpNE"), + cl::Hidden, cl::init(true)); + +static cl::opt<bool> ClStoreCleanOrigin("msan-store-clean-origin", + cl::desc("store origin for clean (fully initialized) values"), + cl::Hidden, cl::init(false)); + +// This flag controls whether we check the shadow of the address +// operand of load or store. Such bugs are very rare, since load from +// a garbage address typically results in SEGV, but still happen +// (e.g. only lower bits of address are garbage, or the access happens +// early at program startup where malloc-ed memory is more likely to +// be zeroed. As of 2012-08-28 this flag adds 20% slowdown. +static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address", + cl::desc("report accesses through a pointer which has poisoned shadow"), + cl::Hidden, cl::init(true)); + +static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions", + cl::desc("print out instructions with default strict semantics"), + cl::Hidden, cl::init(false)); + +static cl::opt<std::string> ClBlacklistFile("msan-blacklist", + cl::desc("File containing the list of functions where MemorySanitizer " + "should not report bugs"), cl::Hidden); + +namespace { + +/// \brief An instrumentation pass implementing detection of uninitialized +/// reads. +/// +/// MemorySanitizer: instrument the code in module to find +/// uninitialized reads. +class MemorySanitizer : public FunctionPass { + public: + MemorySanitizer(bool TrackOrigins = false, + StringRef BlacklistFile = StringRef()) + : FunctionPass(ID), + TrackOrigins(TrackOrigins || ClTrackOrigins), + TD(0), + WarningFn(0), + BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile + : BlacklistFile) { } + const char *getPassName() const { return "MemorySanitizer"; } + bool runOnFunction(Function &F); + bool doInitialization(Module &M); + static char ID; // Pass identification, replacement for typeid. + + private: + void initializeCallbacks(Module &M); + + /// \brief Track origins (allocation points) of uninitialized values. + bool TrackOrigins; + + DataLayout *TD; + LLVMContext *C; + Type *IntptrTy; + Type *OriginTy; + /// \brief Thread-local shadow storage for function parameters. + GlobalVariable *ParamTLS; + /// \brief Thread-local origin storage for function parameters. + GlobalVariable *ParamOriginTLS; + /// \brief Thread-local shadow storage for function return value. + GlobalVariable *RetvalTLS; + /// \brief Thread-local origin storage for function return value. + GlobalVariable *RetvalOriginTLS; + /// \brief Thread-local shadow storage for in-register va_arg function + /// parameters (x86_64-specific). + GlobalVariable *VAArgTLS; + /// \brief Thread-local shadow storage for va_arg overflow area + /// (x86_64-specific). + GlobalVariable *VAArgOverflowSizeTLS; + /// \brief Thread-local space used to pass origin value to the UMR reporting + /// function. + GlobalVariable *OriginTLS; + + /// \brief The run-time callback to print a warning. + Value *WarningFn; + /// \brief Run-time helper that copies origin info for a memory range. + Value *MsanCopyOriginFn; + /// \brief Run-time helper that generates a new origin value for a stack + /// allocation. + Value *MsanSetAllocaOriginFn; + /// \brief Run-time helper that poisons stack on function entry. + Value *MsanPoisonStackFn; + /// \brief MSan runtime replacements for memmove, memcpy and memset. + Value *MemmoveFn, *MemcpyFn, *MemsetFn; + + /// \brief Address mask used in application-to-shadow address calculation. + /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask. + uint64_t ShadowMask; + /// \brief Offset of the origin shadow from the "normal" shadow. + /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL + uint64_t OriginOffset; + /// \brief Branch weights for error reporting. + MDNode *ColdCallWeights; + /// \brief Branch weights for origin store. + MDNode *OriginStoreWeights; + /// \bried Path to blacklist file. + SmallString<64> BlacklistFile; + /// \brief The blacklist. + OwningPtr<BlackList> BL; + /// \brief An empty volatile inline asm that prevents callback merge. + InlineAsm *EmptyAsm; + + friend struct MemorySanitizerVisitor; + friend struct VarArgAMD64Helper; +}; +} // namespace + +char MemorySanitizer::ID = 0; +INITIALIZE_PASS(MemorySanitizer, "msan", + "MemorySanitizer: detects uninitialized reads.", + false, false) + +FunctionPass *llvm::createMemorySanitizerPass(bool TrackOrigins, + StringRef BlacklistFile) { + return new MemorySanitizer(TrackOrigins, BlacklistFile); +} + +/// \brief Create a non-const global initialized with the given string. +/// +/// Creates a writable global for Str so that we can pass it to the +/// run-time lib. Runtime uses first 4 bytes of the string to store the +/// frame ID, so the string needs to be mutable. +static GlobalVariable *createPrivateNonConstGlobalForString(Module &M, + StringRef Str) { + Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); + return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false, + GlobalValue::PrivateLinkage, StrConst, ""); +} + + +/// \brief Insert extern declaration of runtime-provided functions and globals. +void MemorySanitizer::initializeCallbacks(Module &M) { + // Only do this once. + if (WarningFn) + return; + + IRBuilder<> IRB(*C); + // Create the callback. + // FIXME: this function should have "Cold" calling conv, + // which is not yet implemented. + StringRef WarningFnName = ClKeepGoing ? "__msan_warning" + : "__msan_warning_noreturn"; + WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL); + + MsanCopyOriginFn = M.getOrInsertFunction( + "__msan_copy_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(), + IRB.getInt8PtrTy(), IntptrTy, NULL); + MsanSetAllocaOriginFn = M.getOrInsertFunction( + "__msan_set_alloca_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, + IRB.getInt8PtrTy(), NULL); + MsanPoisonStackFn = M.getOrInsertFunction( + "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL); + MemmoveFn = M.getOrInsertFunction( + "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), + IRB.getInt8PtrTy(), IntptrTy, NULL); + MemcpyFn = M.getOrInsertFunction( + "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), + IntptrTy, NULL); + MemsetFn = M.getOrInsertFunction( + "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), + IntptrTy, NULL); + + // Create globals. + RetvalTLS = new GlobalVariable( + M, ArrayType::get(IRB.getInt64Ty(), 8), false, + GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0, + GlobalVariable::GeneralDynamicTLSModel); + RetvalOriginTLS = new GlobalVariable( + M, OriginTy, false, GlobalVariable::ExternalLinkage, 0, + "__msan_retval_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel); + + ParamTLS = new GlobalVariable( + M, ArrayType::get(IRB.getInt64Ty(), 1000), false, + GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0, + GlobalVariable::GeneralDynamicTLSModel); + ParamOriginTLS = new GlobalVariable( + M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage, + 0, "__msan_param_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel); + + VAArgTLS = new GlobalVariable( + M, ArrayType::get(IRB.getInt64Ty(), 1000), false, + GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0, + GlobalVariable::GeneralDynamicTLSModel); + VAArgOverflowSizeTLS = new GlobalVariable( + M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0, + "__msan_va_arg_overflow_size_tls", 0, + GlobalVariable::GeneralDynamicTLSModel); + OriginTLS = new GlobalVariable( + M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0, + "__msan_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel); + + // We insert an empty inline asm after __msan_report* to avoid callback merge. + EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), + StringRef(""), StringRef(""), + /*hasSideEffects=*/true); +} + +/// \brief Module-level initialization. +/// +/// inserts a call to __msan_init to the module's constructor list. +bool MemorySanitizer::doInitialization(Module &M) { + TD = getAnalysisIfAvailable<DataLayout>(); + if (!TD) + return false; + BL.reset(new BlackList(BlacklistFile)); + C = &(M.getContext()); + unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0); + switch (PtrSize) { + case 64: + ShadowMask = kShadowMask64; + OriginOffset = kOriginOffset64; + break; + case 32: + ShadowMask = kShadowMask32; + OriginOffset = kOriginOffset32; + break; + default: + report_fatal_error("unsupported pointer size"); + break; + } + + IRBuilder<> IRB(*C); + IntptrTy = IRB.getIntPtrTy(TD); + OriginTy = IRB.getInt32Ty(); + + ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000); + OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000); + + // Insert a call to __msan_init/__msan_track_origins into the module's CTORs. + appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction( + "__msan_init", IRB.getVoidTy(), NULL)), 0); + + new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, + IRB.getInt32(TrackOrigins), "__msan_track_origins"); + + return true; +} + +namespace { + +/// \brief A helper class that handles instrumentation of VarArg +/// functions on a particular platform. +/// +/// Implementations are expected to insert the instrumentation +/// necessary to propagate argument shadow through VarArg function +/// calls. Visit* methods are called during an InstVisitor pass over +/// the function, and should avoid creating new basic blocks. A new +/// instance of this class is created for each instrumented function. +struct VarArgHelper { + /// \brief Visit a CallSite. + virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0; + + /// \brief Visit a va_start call. + virtual void visitVAStartInst(VAStartInst &I) = 0; + + /// \brief Visit a va_copy call. + virtual void visitVACopyInst(VACopyInst &I) = 0; + + /// \brief Finalize function instrumentation. + /// + /// This method is called after visiting all interesting (see above) + /// instructions in a function. + virtual void finalizeInstrumentation() = 0; + + virtual ~VarArgHelper() {} +}; + +struct MemorySanitizerVisitor; + +VarArgHelper* +CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, + MemorySanitizerVisitor &Visitor); + +/// This class does all the work for a given function. Store and Load +/// instructions store and load corresponding shadow and origin +/// values. Most instructions propagate shadow from arguments to their +/// return values. Certain instructions (most importantly, BranchInst) +/// test their argument shadow and print reports (with a runtime call) if it's +/// non-zero. +struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { + Function &F; + MemorySanitizer &MS; + SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes; + ValueMap<Value*, Value*> ShadowMap, OriginMap; + bool InsertChecks; + OwningPtr<VarArgHelper> VAHelper; + + // An unfortunate workaround for asymmetric lowering of va_arg stuff. + // See a comment in visitCallSite for more details. + static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7 + static const unsigned AMD64FpEndOffset = 176; + + struct ShadowOriginAndInsertPoint { + Instruction *Shadow; + Instruction *Origin; + Instruction *OrigIns; + ShadowOriginAndInsertPoint(Instruction *S, Instruction *O, Instruction *I) + : Shadow(S), Origin(O), OrigIns(I) { } + ShadowOriginAndInsertPoint() : Shadow(0), Origin(0), OrigIns(0) { } + }; + SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList; + SmallVector<Instruction*, 16> StoreList; + + MemorySanitizerVisitor(Function &F, MemorySanitizer &MS) + : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) { + InsertChecks = !MS.BL->isIn(F); + DEBUG(if (!InsertChecks) + dbgs() << "MemorySanitizer is not inserting checks into '" + << F.getName() << "'\n"); + } + + void materializeStores() { + for (size_t i = 0, n = StoreList.size(); i < n; i++) { + StoreInst& I = *dyn_cast<StoreInst>(StoreList[i]); + + IRBuilder<> IRB(&I); + Value *Val = I.getValueOperand(); + Value *Addr = I.getPointerOperand(); + Value *Shadow = getShadow(Val); + Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); + + StoreInst *NewSI = + IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment()); + DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); + (void)NewSI; + // If the store is volatile, add a check. + if (I.isVolatile()) + insertCheck(Val, &I); + if (ClCheckAccessAddress) + insertCheck(Addr, &I); + + if (MS.TrackOrigins) { + unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment()); + if (ClStoreCleanOrigin || isa<StructType>(Shadow->getType())) { + IRB.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRB), + Alignment); + } else { + Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); + + Constant *Cst = dyn_cast_or_null<Constant>(ConvertedShadow); + // TODO(eugenis): handle non-zero constant shadow by inserting an + // unconditional check (can not simply fail compilation as this could + // be in the dead code). + if (Cst) + continue; + + Value *Cmp = IRB.CreateICmpNE(ConvertedShadow, + getCleanShadow(ConvertedShadow), "_mscmp"); + Instruction *CheckTerm = + SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false, + MS.OriginStoreWeights); + IRBuilder<> IRBNew(CheckTerm); + IRBNew.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRBNew), + Alignment); + } + } + } + } + + void materializeChecks() { + for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) { + Instruction *Shadow = InstrumentationList[i].Shadow; + Instruction *OrigIns = InstrumentationList[i].OrigIns; + IRBuilder<> IRB(OrigIns); + DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n"); + Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); + DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n"); + Value *Cmp = IRB.CreateICmpNE(ConvertedShadow, + getCleanShadow(ConvertedShadow), "_mscmp"); + Instruction *CheckTerm = + SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), + /* Unreachable */ !ClKeepGoing, + MS.ColdCallWeights); + + IRB.SetInsertPoint(CheckTerm); + if (MS.TrackOrigins) { + Instruction *Origin = InstrumentationList[i].Origin; + IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0), + MS.OriginTLS); + } + CallInst *Call = IRB.CreateCall(MS.WarningFn); + Call->setDebugLoc(OrigIns->getDebugLoc()); + IRB.CreateCall(MS.EmptyAsm); + DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); + } + DEBUG(dbgs() << "DONE:\n" << F); + } + + /// \brief Add MemorySanitizer instrumentation to a function. + bool runOnFunction() { + MS.initializeCallbacks(*F.getParent()); + if (!MS.TD) return false; + + // In the presence of unreachable blocks, we may see Phi nodes with + // incoming nodes from such blocks. Since InstVisitor skips unreachable + // blocks, such nodes will not have any shadow value associated with them. + // It's easier to remove unreachable blocks than deal with missing shadow. + removeUnreachableBlocks(F); + + // Iterate all BBs in depth-first order and create shadow instructions + // for all instructions (where applicable). + // For PHI nodes we create dummy shadow PHIs which will be finalized later. + for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()), + DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { + BasicBlock *BB = *DI; + visit(*BB); + } + + // Finalize PHI nodes. + for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) { + PHINode *PN = ShadowPHINodes[i]; + PHINode *PNS = cast<PHINode>(getShadow(PN)); + PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : 0; + size_t NumValues = PN->getNumIncomingValues(); + for (size_t v = 0; v < NumValues; v++) { + PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v)); + if (PNO) + PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v)); + } + } + + VAHelper->finalizeInstrumentation(); + + // Delayed instrumentation of StoreInst. + // This may add new checks to be inserted later. + materializeStores(); + + // Insert shadow value checks. + materializeChecks(); + + return true; + } + + /// \brief Compute the shadow type that corresponds to a given Value. + Type *getShadowTy(Value *V) { + return getShadowTy(V->getType()); + } + + /// \brief Compute the shadow type that corresponds to a given Type. + Type *getShadowTy(Type *OrigTy) { + if (!OrigTy->isSized()) { + return 0; + } + // For integer type, shadow is the same as the original type. + // This may return weird-sized types like i1. + if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy)) + return IT; + if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) { + uint32_t EltSize = MS.TD->getTypeStoreSizeInBits(VT->getElementType()); + return VectorType::get(IntegerType::get(*MS.C, EltSize), + VT->getNumElements()); + } + if (StructType *ST = dyn_cast<StructType>(OrigTy)) { + SmallVector<Type*, 4> Elements; + for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) + Elements.push_back(getShadowTy(ST->getElementType(i))); + StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked()); + DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); + return Res; + } + uint32_t TypeSize = MS.TD->getTypeStoreSizeInBits(OrigTy); + return IntegerType::get(*MS.C, TypeSize); + } + + /// \brief Flatten a vector type. + Type *getShadowTyNoVec(Type *ty) { + if (VectorType *vt = dyn_cast<VectorType>(ty)) + return IntegerType::get(*MS.C, vt->getBitWidth()); + return ty; + } + + /// \brief Convert a shadow value to it's flattened variant. + Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) { + Type *Ty = V->getType(); + Type *NoVecTy = getShadowTyNoVec(Ty); + if (Ty == NoVecTy) return V; + return IRB.CreateBitCast(V, NoVecTy); + } + + /// \brief Compute the shadow address that corresponds to a given application + /// address. + /// + /// Shadow = Addr & ~ShadowMask. + Value *getShadowPtr(Value *Addr, Type *ShadowTy, + IRBuilder<> &IRB) { + Value *ShadowLong = + IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy), + ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask)); + return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0)); + } + + /// \brief Compute the origin address that corresponds to a given application + /// address. + /// + /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL + Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) { + Value *ShadowLong = + IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy), + ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask)); + Value *Add = + IRB.CreateAdd(ShadowLong, + ConstantInt::get(MS.IntptrTy, MS.OriginOffset)); + Value *SecondAnd = + IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL)); + return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0)); + } + + /// \brief Compute the shadow address for a given function argument. + /// + /// Shadow = ParamTLS+ArgOffset. + Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB, + int ArgOffset) { + Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy); + Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); + return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), + "_msarg"); + } + + /// \brief Compute the origin address for a given function argument. + Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB, + int ArgOffset) { + if (!MS.TrackOrigins) return 0; + Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy); + Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); + return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0), + "_msarg_o"); + } + + /// \brief Compute the shadow address for a retval. + Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) { + Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy); + return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), + "_msret"); + } + + /// \brief Compute the origin address for a retval. + Value *getOriginPtrForRetval(IRBuilder<> &IRB) { + // We keep a single origin for the entire retval. Might be too optimistic. + return MS.RetvalOriginTLS; + } + + /// \brief Set SV to be the shadow value for V. + void setShadow(Value *V, Value *SV) { + assert(!ShadowMap.count(V) && "Values may only have one shadow"); + ShadowMap[V] = SV; + } + + /// \brief Set Origin to be the origin value for V. + void setOrigin(Value *V, Value *Origin) { + if (!MS.TrackOrigins) return; + assert(!OriginMap.count(V) && "Values may only have one origin"); + DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n"); + OriginMap[V] = Origin; + } + + /// \brief Create a clean shadow value for a given value. + /// + /// Clean shadow (all zeroes) means all bits of the value are defined + /// (initialized). + Value *getCleanShadow(Value *V) { + Type *ShadowTy = getShadowTy(V); + if (!ShadowTy) + return 0; + return Constant::getNullValue(ShadowTy); + } + + /// \brief Create a dirty shadow of a given shadow type. + Constant *getPoisonedShadow(Type *ShadowTy) { + assert(ShadowTy); + if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) + return Constant::getAllOnesValue(ShadowTy); + StructType *ST = cast<StructType>(ShadowTy); + SmallVector<Constant *, 4> Vals; + for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) + Vals.push_back(getPoisonedShadow(ST->getElementType(i))); + return ConstantStruct::get(ST, Vals); + } + + /// \brief Create a clean (zero) origin. + Value *getCleanOrigin() { + return Constant::getNullValue(MS.OriginTy); + } + + /// \brief Get the shadow value for a given Value. + /// + /// This function either returns the value set earlier with setShadow, + /// or extracts if from ParamTLS (for function arguments). + Value *getShadow(Value *V) { + if (Instruction *I = dyn_cast<Instruction>(V)) { + // For instructions the shadow is already stored in the map. + Value *Shadow = ShadowMap[V]; + if (!Shadow) { + DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent())); + (void)I; + assert(Shadow && "No shadow for a value"); + } + return Shadow; + } + if (UndefValue *U = dyn_cast<UndefValue>(V)) { + Value *AllOnes = getPoisonedShadow(getShadowTy(V)); + DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n"); + (void)U; + return AllOnes; + } + if (Argument *A = dyn_cast<Argument>(V)) { + // For arguments we compute the shadow on demand and store it in the map. + Value **ShadowPtr = &ShadowMap[V]; + if (*ShadowPtr) + return *ShadowPtr; + Function *F = A->getParent(); + IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI()); + unsigned ArgOffset = 0; + for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end(); + AI != AE; ++AI) { + if (!AI->getType()->isSized()) { + DEBUG(dbgs() << "Arg is not sized\n"); + continue; + } + unsigned Size = AI->hasByValAttr() + ? MS.TD->getTypeAllocSize(AI->getType()->getPointerElementType()) + : MS.TD->getTypeAllocSize(AI->getType()); + if (A == AI) { + Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset); + if (AI->hasByValAttr()) { + // ByVal pointer itself has clean shadow. We copy the actual + // argument shadow to the underlying memory. + Value *Cpy = EntryIRB.CreateMemCpy( + getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), + Base, Size, AI->getParamAlignment()); + DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); + (void)Cpy; + *ShadowPtr = getCleanShadow(V); + } else { + *ShadowPtr = EntryIRB.CreateLoad(Base); + } + DEBUG(dbgs() << " ARG: " << *AI << " ==> " << + **ShadowPtr << "\n"); + if (MS.TrackOrigins) { + Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset); + setOrigin(A, EntryIRB.CreateLoad(OriginPtr)); + } + } + ArgOffset += DataLayout::RoundUpAlignment(Size, 8); + } + assert(*ShadowPtr && "Could not find shadow for an argument"); + return *ShadowPtr; + } + // For everything else the shadow is zero. + return getCleanShadow(V); + } + + /// \brief Get the shadow for i-th argument of the instruction I. + Value *getShadow(Instruction *I, int i) { + return getShadow(I->getOperand(i)); + } + + /// \brief Get the origin for a value. + Value *getOrigin(Value *V) { + if (!MS.TrackOrigins) return 0; + if (isa<Instruction>(V) || isa<Argument>(V)) { + Value *Origin = OriginMap[V]; + if (!Origin) { + DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n"); + Origin = getCleanOrigin(); + } + return Origin; + } + return getCleanOrigin(); + } + + /// \brief Get the origin for i-th argument of the instruction I. + Value *getOrigin(Instruction *I, int i) { + return getOrigin(I->getOperand(i)); + } + + /// \brief Remember the place where a shadow check should be inserted. + /// + /// This location will be later instrumented with a check that will print a + /// UMR warning in runtime if the value is not fully defined. + void insertCheck(Value *Val, Instruction *OrigIns) { + assert(Val); + if (!InsertChecks) return; + Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val)); + if (!Shadow) return; +#ifndef NDEBUG + Type *ShadowTy = Shadow->getType(); + assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) && + "Can only insert checks for integer and vector shadow types"); +#endif + Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val)); + InstrumentationList.push_back( + ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns)); + } + + // ------------------- Visitors. + + /// \brief Instrument LoadInst + /// + /// Loads the corresponding shadow and (optionally) origin. + /// Optionally, checks that the load address is fully defined. + void visitLoadInst(LoadInst &I) { + assert(I.getType()->isSized() && "Load type must have size"); + IRBuilder<> IRB(&I); + Type *ShadowTy = getShadowTy(&I); + Value *Addr = I.getPointerOperand(); + Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); + setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld")); + + if (ClCheckAccessAddress) + insertCheck(I.getPointerOperand(), &I); + + if (MS.TrackOrigins) { + unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment()); + setOrigin(&I, IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment)); + } + } + + /// \brief Instrument StoreInst + /// + /// Stores the corresponding shadow and (optionally) origin. + /// Optionally, checks that the store address is fully defined. + /// Volatile stores check that the value being stored is fully defined. + void visitStoreInst(StoreInst &I) { + StoreList.push_back(&I); + } + + // Vector manipulation. + void visitExtractElementInst(ExtractElementInst &I) { + insertCheck(I.getOperand(1), &I); + IRBuilder<> IRB(&I); + setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1), + "_msprop")); + setOrigin(&I, getOrigin(&I, 0)); + } + + void visitInsertElementInst(InsertElementInst &I) { + insertCheck(I.getOperand(2), &I); + IRBuilder<> IRB(&I); + setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1), + I.getOperand(2), "_msprop")); + setOriginForNaryOp(I); + } + + void visitShuffleVectorInst(ShuffleVectorInst &I) { + insertCheck(I.getOperand(2), &I); + IRBuilder<> IRB(&I); + setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1), + I.getOperand(2), "_msprop")); + setOriginForNaryOp(I); + } + + // Casts. + void visitSExtInst(SExtInst &I) { + IRBuilder<> IRB(&I); + setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop")); + setOrigin(&I, getOrigin(&I, 0)); + } + + void visitZExtInst(ZExtInst &I) { + IRBuilder<> IRB(&I); + setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop")); + setOrigin(&I, getOrigin(&I, 0)); + } + + void visitTruncInst(TruncInst &I) { + IRBuilder<> IRB(&I); + setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop")); + setOrigin(&I, getOrigin(&I, 0)); + } + + void visitBitCastInst(BitCastInst &I) { + IRBuilder<> IRB(&I); + setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I))); + setOrigin(&I, getOrigin(&I, 0)); + } + + void visitPtrToIntInst(PtrToIntInst &I) { + IRBuilder<> IRB(&I); + setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, + "_msprop_ptrtoint")); + setOrigin(&I, getOrigin(&I, 0)); + } + + void visitIntToPtrInst(IntToPtrInst &I) { + IRBuilder<> IRB(&I); + setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, + "_msprop_inttoptr")); + setOrigin(&I, getOrigin(&I, 0)); + } + + void visitFPToSIInst(CastInst& I) { handleShadowOr(I); } + void visitFPToUIInst(CastInst& I) { handleShadowOr(I); } + void visitSIToFPInst(CastInst& I) { handleShadowOr(I); } + void visitUIToFPInst(CastInst& I) { handleShadowOr(I); } + void visitFPExtInst(CastInst& I) { handleShadowOr(I); } + void visitFPTruncInst(CastInst& I) { handleShadowOr(I); } + + /// \brief Propagate shadow for bitwise AND. + /// + /// This code is exact, i.e. if, for example, a bit in the left argument + /// is defined and 0, then neither the value not definedness of the + /// corresponding bit in B don't affect the resulting shadow. + void visitAnd(BinaryOperator &I) { + IRBuilder<> IRB(&I); + // "And" of 0 and a poisoned value results in unpoisoned value. + // 1&1 => 1; 0&1 => 0; p&1 => p; + // 1&0 => 0; 0&0 => 0; p&0 => 0; + // 1&p => p; 0&p => 0; p&p => p; + // S = (S1 & S2) | (V1 & S2) | (S1 & V2) + Value *S1 = getShadow(&I, 0); + Value *S2 = getShadow(&I, 1); + Value *V1 = I.getOperand(0); + Value *V2 = I.getOperand(1); + if (V1->getType() != S1->getType()) { + V1 = IRB.CreateIntCast(V1, S1->getType(), false); + V2 = IRB.CreateIntCast(V2, S2->getType(), false); + } + Value *S1S2 = IRB.CreateAnd(S1, S2); + Value *V1S2 = IRB.CreateAnd(V1, S2); + Value *S1V2 = IRB.CreateAnd(S1, V2); + setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); + setOriginForNaryOp(I); + } + + void visitOr(BinaryOperator &I) { + IRBuilder<> IRB(&I); + // "Or" of 1 and a poisoned value results in unpoisoned value. + // 1|1 => 1; 0|1 => 1; p|1 => 1; + // 1|0 => 1; 0|0 => 0; p|0 => p; + // 1|p => 1; 0|p => p; p|p => p; + // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2) + Value *S1 = getShadow(&I, 0); + Value *S2 = getShadow(&I, 1); + Value *V1 = IRB.CreateNot(I.getOperand(0)); + Value *V2 = IRB.CreateNot(I.getOperand(1)); + if (V1->getType() != S1->getType()) { + V1 = IRB.CreateIntCast(V1, S1->getType(), false); + V2 = IRB.CreateIntCast(V2, S2->getType(), false); + } + Value *S1S2 = IRB.CreateAnd(S1, S2); + Value *V1S2 = IRB.CreateAnd(V1, S2); + Value *S1V2 = IRB.CreateAnd(S1, V2); + setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); + setOriginForNaryOp(I); + } + + /// \brief Default propagation of shadow and/or origin. + /// + /// This class implements the general case of shadow propagation, used in all + /// cases where we don't know and/or don't care about what the operation + /// actually does. It converts all input shadow values to a common type + /// (extending or truncating as necessary), and bitwise OR's them. + /// + /// This is much cheaper than inserting checks (i.e. requiring inputs to be + /// fully initialized), and less prone to false positives. + /// + /// This class also implements the general case of origin propagation. For a + /// Nary operation, result origin is set to the origin of an argument that is + /// not entirely initialized. If there is more than one such arguments, the + /// rightmost of them is picked. It does not matter which one is picked if all + /// arguments are initialized. + template <bool CombineShadow> + class Combiner { + Value *Shadow; + Value *Origin; + IRBuilder<> &IRB; + MemorySanitizerVisitor *MSV; + + public: + Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) : + Shadow(0), Origin(0), IRB(IRB), MSV(MSV) {} + + /// \brief Add a pair of shadow and origin values to the mix. + Combiner &Add(Value *OpShadow, Value *OpOrigin) { + if (CombineShadow) { + assert(OpShadow); + if (!Shadow) + Shadow = OpShadow; + else { + OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType()); + Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop"); + } + } + + if (MSV->MS.TrackOrigins) { + assert(OpOrigin); + if (!Origin) { + Origin = OpOrigin; + } else { + Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB); + Value *Cond = IRB.CreateICmpNE(FlatShadow, + MSV->getCleanShadow(FlatShadow)); + Origin = IRB.CreateSelect(Cond, OpOrigin, Origin); + } + } + return *this; + } + + /// \brief Add an application value to the mix. + Combiner &Add(Value *V) { + Value *OpShadow = MSV->getShadow(V); + Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : 0; + return Add(OpShadow, OpOrigin); + } + + /// \brief Set the current combined values as the given instruction's shadow + /// and origin. + void Done(Instruction *I) { + if (CombineShadow) { + assert(Shadow); + Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I)); + MSV->setShadow(I, Shadow); + } + if (MSV->MS.TrackOrigins) { + assert(Origin); + MSV->setOrigin(I, Origin); + } + } + }; + + typedef Combiner<true> ShadowAndOriginCombiner; + typedef Combiner<false> OriginCombiner; + + /// \brief Propagate origin for arbitrary operation. + void setOriginForNaryOp(Instruction &I) { + if (!MS.TrackOrigins) return; + IRBuilder<> IRB(&I); + OriginCombiner OC(this, IRB); + for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) + OC.Add(OI->get()); + OC.Done(&I); + } + + size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) { + assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) && + "Vector of pointers is not a valid shadow type"); + return Ty->isVectorTy() ? + Ty->getVectorNumElements() * Ty->getScalarSizeInBits() : + Ty->getPrimitiveSizeInBits(); + } + + /// \brief Cast between two shadow types, extending or truncating as + /// necessary. + Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy) { + Type *srcTy = V->getType(); + if (dstTy->isIntegerTy() && srcTy->isIntegerTy()) + return IRB.CreateIntCast(V, dstTy, false); + if (dstTy->isVectorTy() && srcTy->isVectorTy() && + dstTy->getVectorNumElements() == srcTy->getVectorNumElements()) + return IRB.CreateIntCast(V, dstTy, false); + size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy); + size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy); + Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits)); + Value *V2 = + IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), false); + return IRB.CreateBitCast(V2, dstTy); + // TODO: handle struct types. + } + + /// \brief Propagate shadow for arbitrary operation. + void handleShadowOr(Instruction &I) { + IRBuilder<> IRB(&I); + ShadowAndOriginCombiner SC(this, IRB); + for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) + SC.Add(OI->get()); + SC.Done(&I); + } + + void visitFAdd(BinaryOperator &I) { handleShadowOr(I); } + void visitFSub(BinaryOperator &I) { handleShadowOr(I); } + void visitFMul(BinaryOperator &I) { handleShadowOr(I); } + void visitAdd(BinaryOperator &I) { handleShadowOr(I); } + void visitSub(BinaryOperator &I) { handleShadowOr(I); } + void visitXor(BinaryOperator &I) { handleShadowOr(I); } + void visitMul(BinaryOperator &I) { handleShadowOr(I); } + + void handleDiv(Instruction &I) { + IRBuilder<> IRB(&I); + // Strict on the second argument. + insertCheck(I.getOperand(1), &I); + setShadow(&I, getShadow(&I, 0)); + setOrigin(&I, getOrigin(&I, 0)); + } + + void visitUDiv(BinaryOperator &I) { handleDiv(I); } + void visitSDiv(BinaryOperator &I) { handleDiv(I); } + void visitFDiv(BinaryOperator &I) { handleDiv(I); } + void visitURem(BinaryOperator &I) { handleDiv(I); } + void visitSRem(BinaryOperator &I) { handleDiv(I); } + void visitFRem(BinaryOperator &I) { handleDiv(I); } + + /// \brief Instrument == and != comparisons. + /// + /// Sometimes the comparison result is known even if some of the bits of the + /// arguments are not. + void handleEqualityComparison(ICmpInst &I) { + IRBuilder<> IRB(&I); + Value *A = I.getOperand(0); + Value *B = I.getOperand(1); + Value *Sa = getShadow(A); + Value *Sb = getShadow(B); + if (A->getType()->isPointerTy()) + A = IRB.CreatePointerCast(A, MS.IntptrTy); + if (B->getType()->isPointerTy()) + B = IRB.CreatePointerCast(B, MS.IntptrTy); + // A == B <==> (C = A^B) == 0 + // A != B <==> (C = A^B) != 0 + // Sc = Sa | Sb + Value *C = IRB.CreateXor(A, B); + Value *Sc = IRB.CreateOr(Sa, Sb); + // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now) + // Result is defined if one of the following is true + // * there is a defined 1 bit in C + // * C is fully defined + // Si = !(C & ~Sc) && Sc + Value *Zero = Constant::getNullValue(Sc->getType()); + Value *MinusOne = Constant::getAllOnesValue(Sc->getType()); + Value *Si = + IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero), + IRB.CreateICmpEQ( + IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero)); + Si->setName("_msprop_icmp"); + setShadow(&I, Si); + setOriginForNaryOp(I); + } + + /// \brief Instrument signed relational comparisons. + /// + /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by + /// propagating the highest bit of the shadow. Everything else is delegated + /// to handleShadowOr(). + void handleSignedRelationalComparison(ICmpInst &I) { + Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0)); + Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1)); + Value* op = NULL; + CmpInst::Predicate pre = I.getPredicate(); + if (constOp0 && constOp0->isNullValue() && + (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) { + op = I.getOperand(1); + } else if (constOp1 && constOp1->isNullValue() && + (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) { + op = I.getOperand(0); + } + if (op) { + IRBuilder<> IRB(&I); + Value* Shadow = + IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt"); + setShadow(&I, Shadow); + setOrigin(&I, getOrigin(op)); + } else { + handleShadowOr(I); + } + } + + void visitICmpInst(ICmpInst &I) { + if (ClHandleICmp && I.isEquality()) + handleEqualityComparison(I); + else if (ClHandleICmp && I.isSigned() && I.isRelational()) + handleSignedRelationalComparison(I); + else + handleShadowOr(I); + } + + void visitFCmpInst(FCmpInst &I) { + handleShadowOr(I); + } + + void handleShift(BinaryOperator &I) { + IRBuilder<> IRB(&I); + // If any of the S2 bits are poisoned, the whole thing is poisoned. + // Otherwise perform the same shift on S1. + Value *S1 = getShadow(&I, 0); + Value *S2 = getShadow(&I, 1); + Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), + S2->getType()); + Value *V2 = I.getOperand(1); + Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2); + setShadow(&I, IRB.CreateOr(Shift, S2Conv)); + setOriginForNaryOp(I); + } + + void visitShl(BinaryOperator &I) { handleShift(I); } + void visitAShr(BinaryOperator &I) { handleShift(I); } + void visitLShr(BinaryOperator &I) { handleShift(I); } + + /// \brief Instrument llvm.memmove + /// + /// At this point we don't know if llvm.memmove will be inlined or not. + /// If we don't instrument it and it gets inlined, + /// our interceptor will not kick in and we will lose the memmove. + /// If we instrument the call here, but it does not get inlined, + /// we will memove the shadow twice: which is bad in case + /// of overlapping regions. So, we simply lower the intrinsic to a call. + /// + /// Similar situation exists for memcpy and memset. + void visitMemMoveInst(MemMoveInst &I) { + IRBuilder<> IRB(&I); + IRB.CreateCall3( + MS.MemmoveFn, + IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), + IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), + IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); + I.eraseFromParent(); + } + + // Similar to memmove: avoid copying shadow twice. + // This is somewhat unfortunate as it may slowdown small constant memcpys. + // FIXME: consider doing manual inline for small constant sizes and proper + // alignment. + void visitMemCpyInst(MemCpyInst &I) { + IRBuilder<> IRB(&I); + IRB.CreateCall3( + MS.MemcpyFn, + IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), + IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), + IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); + I.eraseFromParent(); + } + + // Same as memcpy. + void visitMemSetInst(MemSetInst &I) { + IRBuilder<> IRB(&I); + IRB.CreateCall3( + MS.MemsetFn, + IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), + IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false), + IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); + I.eraseFromParent(); + } + + void visitVAStartInst(VAStartInst &I) { + VAHelper->visitVAStartInst(I); + } + + void visitVACopyInst(VACopyInst &I) { + VAHelper->visitVACopyInst(I); + } + + enum IntrinsicKind { + IK_DoesNotAccessMemory, + IK_OnlyReadsMemory, + IK_WritesMemory + }; + + static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) { + const int DoesNotAccessMemory = IK_DoesNotAccessMemory; + const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory; + const int OnlyReadsMemory = IK_OnlyReadsMemory; + const int OnlyAccessesArgumentPointees = IK_WritesMemory; + const int UnknownModRefBehavior = IK_WritesMemory; +#define GET_INTRINSIC_MODREF_BEHAVIOR +#define ModRefBehavior IntrinsicKind +#include "llvm/IR/Intrinsics.gen" +#undef ModRefBehavior +#undef GET_INTRINSIC_MODREF_BEHAVIOR + } + + /// \brief Handle vector store-like intrinsics. + /// + /// Instrument intrinsics that look like a simple SIMD store: writes memory, + /// has 1 pointer argument and 1 vector argument, returns void. + bool handleVectorStoreIntrinsic(IntrinsicInst &I) { + IRBuilder<> IRB(&I); + Value* Addr = I.getArgOperand(0); + Value *Shadow = getShadow(&I, 1); + Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); + + // We don't know the pointer alignment (could be unaligned SSE store!). + // Have to assume to worst case. + IRB.CreateAlignedStore(Shadow, ShadowPtr, 1); + + if (ClCheckAccessAddress) + insertCheck(Addr, &I); + + // FIXME: use ClStoreCleanOrigin + // FIXME: factor out common code from materializeStores + if (MS.TrackOrigins) + IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB)); + return true; + } + + /// \brief Handle vector load-like intrinsics. + /// + /// Instrument intrinsics that look like a simple SIMD load: reads memory, + /// has 1 pointer argument, returns a vector. + bool handleVectorLoadIntrinsic(IntrinsicInst &I) { + IRBuilder<> IRB(&I); + Value *Addr = I.getArgOperand(0); + + Type *ShadowTy = getShadowTy(&I); + Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); + // We don't know the pointer alignment (could be unaligned SSE load!). + // Have to assume to worst case. + setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld")); + + if (ClCheckAccessAddress) + insertCheck(Addr, &I); + + if (MS.TrackOrigins) + setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB))); + return true; + } + + /// \brief Handle (SIMD arithmetic)-like intrinsics. + /// + /// Instrument intrinsics with any number of arguments of the same type, + /// equal to the return type. The type should be simple (no aggregates or + /// pointers; vectors are fine). + /// Caller guarantees that this intrinsic does not access memory. + bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) { + Type *RetTy = I.getType(); + if (!(RetTy->isIntOrIntVectorTy() || + RetTy->isFPOrFPVectorTy() || + RetTy->isX86_MMXTy())) + return false; + + unsigned NumArgOperands = I.getNumArgOperands(); + + for (unsigned i = 0; i < NumArgOperands; ++i) { + Type *Ty = I.getArgOperand(i)->getType(); + if (Ty != RetTy) + return false; + } + + IRBuilder<> IRB(&I); + ShadowAndOriginCombiner SC(this, IRB); + for (unsigned i = 0; i < NumArgOperands; ++i) + SC.Add(I.getArgOperand(i)); + SC.Done(&I); + + return true; + } + + /// \brief Heuristically instrument unknown intrinsics. + /// + /// The main purpose of this code is to do something reasonable with all + /// random intrinsics we might encounter, most importantly - SIMD intrinsics. + /// We recognize several classes of intrinsics by their argument types and + /// ModRefBehaviour and apply special intrumentation when we are reasonably + /// sure that we know what the intrinsic does. + /// + /// We special-case intrinsics where this approach fails. See llvm.bswap + /// handling as an example of that. + bool handleUnknownIntrinsic(IntrinsicInst &I) { + unsigned NumArgOperands = I.getNumArgOperands(); + if (NumArgOperands == 0) + return false; + + Intrinsic::ID iid = I.getIntrinsicID(); + IntrinsicKind IK = getIntrinsicKind(iid); + bool OnlyReadsMemory = IK == IK_OnlyReadsMemory; + bool WritesMemory = IK == IK_WritesMemory; + assert(!(OnlyReadsMemory && WritesMemory)); + + if (NumArgOperands == 2 && + I.getArgOperand(0)->getType()->isPointerTy() && + I.getArgOperand(1)->getType()->isVectorTy() && + I.getType()->isVoidTy() && + WritesMemory) { + // This looks like a vector store. + return handleVectorStoreIntrinsic(I); + } + + if (NumArgOperands == 1 && + I.getArgOperand(0)->getType()->isPointerTy() && + I.getType()->isVectorTy() && + OnlyReadsMemory) { + // This looks like a vector load. + return handleVectorLoadIntrinsic(I); + } + + if (!OnlyReadsMemory && !WritesMemory) + if (maybeHandleSimpleNomemIntrinsic(I)) + return true; + + // FIXME: detect and handle SSE maskstore/maskload + return false; + } + + void handleBswap(IntrinsicInst &I) { + IRBuilder<> IRB(&I); + Value *Op = I.getArgOperand(0); + Type *OpType = Op->getType(); + Function *BswapFunc = Intrinsic::getDeclaration( + F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1)); + setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op))); + setOrigin(&I, getOrigin(Op)); + } + + void visitIntrinsicInst(IntrinsicInst &I) { + switch (I.getIntrinsicID()) { + case llvm::Intrinsic::bswap: + handleBswap(I); + break; + default: + if (!handleUnknownIntrinsic(I)) + visitInstruction(I); + break; + } + } + + void visitCallSite(CallSite CS) { + Instruction &I = *CS.getInstruction(); + assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite"); + if (CS.isCall()) { + CallInst *Call = cast<CallInst>(&I); + + // For inline asm, do the usual thing: check argument shadow and mark all + // outputs as clean. Note that any side effects of the inline asm that are + // not immediately visible in its constraints are not handled. + if (Call->isInlineAsm()) { + visitInstruction(I); + return; + } + + // Allow only tail calls with the same types, otherwise + // we may have a false positive: shadow for a non-void RetVal + // will get propagated to a void RetVal. + if (Call->isTailCall() && Call->getType() != Call->getParent()->getType()) + Call->setTailCall(false); + + assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere"); + + // We are going to insert code that relies on the fact that the callee + // will become a non-readonly function after it is instrumented by us. To + // prevent this code from being optimized out, mark that function + // non-readonly in advance. + if (Function *Func = Call->getCalledFunction()) { + // Clear out readonly/readnone attributes. + AttrBuilder B; + B.addAttribute(Attribute::ReadOnly) + .addAttribute(Attribute::ReadNone); + Func->removeAttribute(AttributeSet::FunctionIndex, + Attribute::get(Func->getContext(), B)); + } + } + IRBuilder<> IRB(&I); + unsigned ArgOffset = 0; + DEBUG(dbgs() << " CallSite: " << I << "\n"); + for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); + ArgIt != End; ++ArgIt) { + Value *A = *ArgIt; + unsigned i = ArgIt - CS.arg_begin(); + if (!A->getType()->isSized()) { + DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n"); + continue; + } + unsigned Size = 0; + Value *Store = 0; + // Compute the Shadow for arg even if it is ByVal, because + // in that case getShadow() will copy the actual arg shadow to + // __msan_param_tls. + Value *ArgShadow = getShadow(A); + Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset); + DEBUG(dbgs() << " Arg#" << i << ": " << *A << + " Shadow: " << *ArgShadow << "\n"); + if (CS.paramHasAttr(i + 1, Attribute::ByVal)) { + assert(A->getType()->isPointerTy() && + "ByVal argument is not a pointer!"); + Size = MS.TD->getTypeAllocSize(A->getType()->getPointerElementType()); + unsigned Alignment = CS.getParamAlignment(i + 1); + Store = IRB.CreateMemCpy(ArgShadowBase, + getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB), + Size, Alignment); + } else { + Size = MS.TD->getTypeAllocSize(A->getType()); + Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, + kShadowTLSAlignment); + } + if (MS.TrackOrigins) + IRB.CreateStore(getOrigin(A), + getOriginPtrForArgument(A, IRB, ArgOffset)); + assert(Size != 0 && Store != 0); + DEBUG(dbgs() << " Param:" << *Store << "\n"); + ArgOffset += DataLayout::RoundUpAlignment(Size, 8); + } + DEBUG(dbgs() << " done with call args\n"); + + FunctionType *FT = + cast<FunctionType>(CS.getCalledValue()->getType()-> getContainedType(0)); + if (FT->isVarArg()) { + VAHelper->visitCallSite(CS, IRB); + } + + // Now, get the shadow for the RetVal. + if (!I.getType()->isSized()) return; + IRBuilder<> IRBBefore(&I); + // Untill we have full dynamic coverage, make sure the retval shadow is 0. + Value *Base = getShadowPtrForRetval(&I, IRBBefore); + IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment); + Instruction *NextInsn = 0; + if (CS.isCall()) { + NextInsn = I.getNextNode(); + } else { + BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest(); + if (!NormalDest->getSinglePredecessor()) { + // FIXME: this case is tricky, so we are just conservative here. + // Perhaps we need to split the edge between this BB and NormalDest, + // but a naive attempt to use SplitEdge leads to a crash. + setShadow(&I, getCleanShadow(&I)); + setOrigin(&I, getCleanOrigin()); + return; + } + NextInsn = NormalDest->getFirstInsertionPt(); + assert(NextInsn && + "Could not find insertion point for retval shadow load"); + } + IRBuilder<> IRBAfter(NextInsn); + Value *RetvalShadow = + IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter), + kShadowTLSAlignment, "_msret"); + setShadow(&I, RetvalShadow); + if (MS.TrackOrigins) + setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter))); + } + + void visitReturnInst(ReturnInst &I) { + IRBuilder<> IRB(&I); + if (Value *RetVal = I.getReturnValue()) { + // Set the shadow for the RetVal. + Value *Shadow = getShadow(RetVal); + Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB); + DEBUG(dbgs() << "Return: " << *Shadow << "\n" << *ShadowPtr << "\n"); + IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); + if (MS.TrackOrigins) + IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB)); + } + } + + void visitPHINode(PHINode &I) { + IRBuilder<> IRB(&I); + ShadowPHINodes.push_back(&I); + setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(), + "_msphi_s")); + if (MS.TrackOrigins) + setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(), + "_msphi_o")); + } + + void visitAllocaInst(AllocaInst &I) { + setShadow(&I, getCleanShadow(&I)); + if (!ClPoisonStack) return; + IRBuilder<> IRB(I.getNextNode()); + uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType()); + if (ClPoisonStackWithCall) { + IRB.CreateCall2(MS.MsanPoisonStackFn, + IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), + ConstantInt::get(MS.IntptrTy, Size)); + } else { + Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB); + IRB.CreateMemSet(ShadowBase, IRB.getInt8(ClPoisonStackPattern), + Size, I.getAlignment()); + } + + if (MS.TrackOrigins) { + setOrigin(&I, getCleanOrigin()); + SmallString<2048> StackDescriptionStorage; + raw_svector_ostream StackDescription(StackDescriptionStorage); + // We create a string with a description of the stack allocation and + // pass it into __msan_set_alloca_origin. + // It will be printed by the run-time if stack-originated UMR is found. + // The first 4 bytes of the string are set to '----' and will be replaced + // by __msan_va_arg_overflow_size_tls at the first call. + StackDescription << "----" << I.getName() << "@" << F.getName(); + Value *Descr = + createPrivateNonConstGlobalForString(*F.getParent(), + StackDescription.str()); + IRB.CreateCall3(MS.MsanSetAllocaOriginFn, + IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), + ConstantInt::get(MS.IntptrTy, Size), + IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())); + } + } + + void visitSelectInst(SelectInst& I) { + IRBuilder<> IRB(&I); + setShadow(&I, IRB.CreateSelect(I.getCondition(), + getShadow(I.getTrueValue()), getShadow(I.getFalseValue()), + "_msprop")); + if (MS.TrackOrigins) { + // Origins are always i32, so any vector conditions must be flattened. + // FIXME: consider tracking vector origins for app vectors? + Value *Cond = I.getCondition(); + if (Cond->getType()->isVectorTy()) { + Value *ConvertedShadow = convertToShadowTyNoVec(Cond, IRB); + Cond = IRB.CreateICmpNE(ConvertedShadow, + getCleanShadow(ConvertedShadow), "_mso_select"); + } + setOrigin(&I, IRB.CreateSelect(Cond, + getOrigin(I.getTrueValue()), getOrigin(I.getFalseValue()))); + } + } + + void visitLandingPadInst(LandingPadInst &I) { + // Do nothing. + // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1 + setShadow(&I, getCleanShadow(&I)); + setOrigin(&I, getCleanOrigin()); + } + + void visitGetElementPtrInst(GetElementPtrInst &I) { + handleShadowOr(I); + } + + void visitExtractValueInst(ExtractValueInst &I) { + IRBuilder<> IRB(&I); + Value *Agg = I.getAggregateOperand(); + DEBUG(dbgs() << "ExtractValue: " << I << "\n"); + Value *AggShadow = getShadow(Agg); + DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); + Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices()); + DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n"); + setShadow(&I, ResShadow); + setOrigin(&I, getCleanOrigin()); + } + + void visitInsertValueInst(InsertValueInst &I) { + IRBuilder<> IRB(&I); + DEBUG(dbgs() << "InsertValue: " << I << "\n"); + Value *AggShadow = getShadow(I.getAggregateOperand()); + Value *InsShadow = getShadow(I.getInsertedValueOperand()); + DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); + DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n"); + Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices()); + DEBUG(dbgs() << " Res: " << *Res << "\n"); + setShadow(&I, Res); + setOrigin(&I, getCleanOrigin()); + } + + void dumpInst(Instruction &I) { + if (CallInst *CI = dyn_cast<CallInst>(&I)) { + errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n"; + } else { + errs() << "ZZZ " << I.getOpcodeName() << "\n"; + } + errs() << "QQQ " << I << "\n"; + } + + void visitResumeInst(ResumeInst &I) { + DEBUG(dbgs() << "Resume: " << I << "\n"); + // Nothing to do here. + } + + void visitInstruction(Instruction &I) { + // Everything else: stop propagating and check for poisoned shadow. + if (ClDumpStrictInstructions) + dumpInst(I); + DEBUG(dbgs() << "DEFAULT: " << I << "\n"); + for (size_t i = 0, n = I.getNumOperands(); i < n; i++) + insertCheck(I.getOperand(i), &I); + setShadow(&I, getCleanShadow(&I)); + setOrigin(&I, getCleanOrigin()); + } +}; + +/// \brief AMD64-specific implementation of VarArgHelper. +struct VarArgAMD64Helper : public VarArgHelper { + // An unfortunate workaround for asymmetric lowering of va_arg stuff. + // See a comment in visitCallSite for more details. + static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7 + static const unsigned AMD64FpEndOffset = 176; + + Function &F; + MemorySanitizer &MS; + MemorySanitizerVisitor &MSV; + Value *VAArgTLSCopy; + Value *VAArgOverflowSize; + + SmallVector<CallInst*, 16> VAStartInstrumentationList; + + VarArgAMD64Helper(Function &F, MemorySanitizer &MS, + MemorySanitizerVisitor &MSV) + : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(0), VAArgOverflowSize(0) { } + + enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory }; + + ArgKind classifyArgument(Value* arg) { + // A very rough approximation of X86_64 argument classification rules. + Type *T = arg->getType(); + if (T->isFPOrFPVectorTy() || T->isX86_MMXTy()) + return AK_FloatingPoint; + if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64) + return AK_GeneralPurpose; + if (T->isPointerTy()) + return AK_GeneralPurpose; + return AK_Memory; + } + + // For VarArg functions, store the argument shadow in an ABI-specific format + // that corresponds to va_list layout. + // We do this because Clang lowers va_arg in the frontend, and this pass + // only sees the low level code that deals with va_list internals. + // A much easier alternative (provided that Clang emits va_arg instructions) + // would have been to associate each live instance of va_list with a copy of + // MSanParamTLS, and extract shadow on va_arg() call in the argument list + // order. + void visitCallSite(CallSite &CS, IRBuilder<> &IRB) { + unsigned GpOffset = 0; + unsigned FpOffset = AMD64GpEndOffset; + unsigned OverflowOffset = AMD64FpEndOffset; + for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); + ArgIt != End; ++ArgIt) { + Value *A = *ArgIt; + ArgKind AK = classifyArgument(A); + if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) + AK = AK_Memory; + if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset) + AK = AK_Memory; + Value *Base; + switch (AK) { + case AK_GeneralPurpose: + Base = getShadowPtrForVAArgument(A, IRB, GpOffset); + GpOffset += 8; + break; + case AK_FloatingPoint: + Base = getShadowPtrForVAArgument(A, IRB, FpOffset); + FpOffset += 16; + break; + case AK_Memory: + uint64_t ArgSize = MS.TD->getTypeAllocSize(A->getType()); + Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset); + OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8); + } + IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); + } + Constant *OverflowSize = + ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset); + IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS); + } + + /// \brief Compute the shadow address for a given va_arg. + Value *getShadowPtrForVAArgument(Value *A, IRBuilder<> &IRB, + int ArgOffset) { + Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); + Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); + return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(A), 0), + "_msarg"); + } + + void visitVAStartInst(VAStartInst &I) { + IRBuilder<> IRB(&I); + VAStartInstrumentationList.push_back(&I); + Value *VAListTag = I.getArgOperand(0); + Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); + + // Unpoison the whole __va_list_tag. + // FIXME: magic ABI constants. + IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), + /* size */24, /* alignment */16, false); + } + + void visitVACopyInst(VACopyInst &I) { + IRBuilder<> IRB(&I); + Value *VAListTag = I.getArgOperand(0); + Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); + + // Unpoison the whole __va_list_tag. + // FIXME: magic ABI constants. + IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), + /* size */ 24, /* alignment */ 16, false); + } + + void finalizeInstrumentation() { + assert(!VAArgOverflowSize && !VAArgTLSCopy && + "finalizeInstrumentation called twice"); + if (!VAStartInstrumentationList.empty()) { + // If there is a va_start in this function, make a backup copy of + // va_arg_tls somewhere in the function entry block. + IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); + VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); + Value *CopySize = + IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), + VAArgOverflowSize); + VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); + IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); + } + + // Instrument va_start. + // Copy va_list shadow from the backup copy of the TLS contents. + for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { + CallInst *OrigInst = VAStartInstrumentationList[i]; + IRBuilder<> IRB(OrigInst->getNextNode()); + Value *VAListTag = OrigInst->getArgOperand(0); + + Value *RegSaveAreaPtrPtr = + IRB.CreateIntToPtr( + IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), + ConstantInt::get(MS.IntptrTy, 16)), + Type::getInt64PtrTy(*MS.C)); + Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); + Value *RegSaveAreaShadowPtr = + MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB); + IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, + AMD64FpEndOffset, 16); + + Value *OverflowArgAreaPtrPtr = + IRB.CreateIntToPtr( + IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), + ConstantInt::get(MS.IntptrTy, 8)), + Type::getInt64PtrTy(*MS.C)); + Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr); + Value *OverflowArgAreaShadowPtr = + MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB); + Value *SrcPtr = + getShadowPtrForVAArgument(VAArgTLSCopy, IRB, AMD64FpEndOffset); + IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16); + } + } +}; + +VarArgHelper* CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, + MemorySanitizerVisitor &Visitor) { + return new VarArgAMD64Helper(Func, Msan, Visitor); +} + +} // namespace + +bool MemorySanitizer::runOnFunction(Function &F) { + MemorySanitizerVisitor Visitor(F, *this); + + // Clear out readonly/readnone attributes. + AttrBuilder B; + B.addAttribute(Attribute::ReadOnly) + .addAttribute(Attribute::ReadNone); + F.removeAttribute(AttributeSet::FunctionIndex, + Attribute::get(F.getContext(), B)); + + return Visitor.runOnFunction(); +} diff --git a/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp b/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp index 1fe1254..c5a1fe9 100644 --- a/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp +++ b/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp @@ -13,20 +13,21 @@ // //===----------------------------------------------------------------------===// #define DEBUG_TYPE "insert-optimal-edge-profiling" +#include "llvm/Transforms/Instrumentation.h" +#include "MaximumSpanningTree.h" #include "ProfilingUtils.h" -#include "llvm/Constants.h" -#include "llvm/Module.h" -#include "llvm/Pass.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/Statistic.h" #include "llvm/Analysis/Passes.h" #include "llvm/Analysis/ProfileInfo.h" #include "llvm/Analysis/ProfileInfoLoader.h" -#include "llvm/Support/raw_ostream.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/Pass.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" -#include "llvm/Transforms/Instrumentation.h" -#include "llvm/ADT/DenseSet.h" -#include "llvm/ADT/Statistic.h" -#include "MaximumSpanningTree.h" using namespace llvm; STATISTIC(NumEdgesInserted, "The # of edges inserted."); @@ -75,8 +76,8 @@ inline static void printEdgeCounter(ProfileInfo::Edge e, bool OptimalEdgeProfiler::runOnModule(Module &M) { Function *Main = M.getFunction("main"); if (Main == 0) { - errs() << "WARNING: cannot insert edge profiling into a module" - << " with no main function!\n"; + M.getContext().emitWarning("cannot insert edge profiling into a module" + " with no main function"); return false; // No main, no instrumentation! } diff --git a/lib/Transforms/Instrumentation/PathProfiling.cpp b/lib/Transforms/Instrumentation/PathProfiling.cpp index cc27146..358bbeb 100644 --- a/lib/Transforms/Instrumentation/PathProfiling.cpp +++ b/lib/Transforms/Instrumentation/PathProfiling.cpp @@ -45,24 +45,23 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "insert-path-profiling" -#include "llvm/DerivedTypes.h" +#include "llvm/Transforms/Instrumentation.h" #include "ProfilingUtils.h" #include "llvm/Analysis/PathNumbering.h" -#include "llvm/Constants.h" -#include "llvm/DerivedTypes.h" -#include "llvm/InstrTypes.h" -#include "llvm/Instructions.h" -#include "llvm/LLVMContext.h" -#include "llvm/Module.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/TypeBuilder.h" #include "llvm/Pass.h" -#include "llvm/TypeBuilder.h" -#include "llvm/Support/Compiler.h" #include "llvm/Support/CFG.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" -#include "llvm/Transforms/Instrumentation.h" #include <vector> #define HASH_THRESHHOLD 100000 @@ -1346,8 +1345,8 @@ bool PathProfiler::runOnModule(Module &M) { Main = M.getFunction("MAIN__"); if (!Main) { - errs() << "WARNING: cannot insert path profiling into a module" - << " with no main function!\n"; + Context->emitWarning("cannot insert edge profiling into a module" + " with no main function"); return false; } diff --git a/lib/Transforms/Instrumentation/ProfilingUtils.cpp b/lib/Transforms/Instrumentation/ProfilingUtils.cpp index de57cd1..4b3de6d 100644 --- a/lib/Transforms/Instrumentation/ProfilingUtils.cpp +++ b/lib/Transforms/Instrumentation/ProfilingUtils.cpp @@ -15,11 +15,11 @@ //===----------------------------------------------------------------------===// #include "ProfilingUtils.h" -#include "llvm/Constants.h" -#include "llvm/DerivedTypes.h" -#include "llvm/Instructions.h" -#include "llvm/LLVMContext.h" -#include "llvm/Module.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName, GlobalValue *Array, diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index 17b7775..29d2ece 100644 --- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -21,32 +21,41 @@ #define DEBUG_TYPE "tsan" +#include "llvm/Transforms/Instrumentation.h" #include "BlackList.h" -#include "llvm/Function.h" -#include "llvm/IRBuilder.h" -#include "llvm/Intrinsics.h" -#include "llvm/LLVMContext.h" -#include "llvm/Metadata.h" -#include "llvm/Module.h" -#include "llvm/Type.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/ModuleUtils.h" using namespace llvm; -static cl::opt<std::string> ClBlackListFile("tsan-blacklist", +static cl::opt<std::string> ClBlacklistFile("tsan-blacklist", cl::desc("Blacklist file"), cl::Hidden); +static cl::opt<bool> ClInstrumentMemoryAccesses( + "tsan-instrument-memory-accesses", cl::init(true), + cl::desc("Instrument memory accesses"), cl::Hidden); +static cl::opt<bool> ClInstrumentFuncEntryExit( + "tsan-instrument-func-entry-exit", cl::init(true), + cl::desc("Instrument function entry and exit"), cl::Hidden); +static cl::opt<bool> ClInstrumentAtomics( + "tsan-instrument-atomics", cl::init(true), + cl::desc("Instrument atomics"), cl::Hidden); STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); @@ -62,13 +71,18 @@ namespace { /// ThreadSanitizer: instrument the code in module to find races. struct ThreadSanitizer : public FunctionPass { - ThreadSanitizer(); + ThreadSanitizer(StringRef BlacklistFile = StringRef()) + : FunctionPass(ID), + TD(0), + BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile + : BlacklistFile) { } const char *getPassName() const; bool runOnFunction(Function &F); bool doInitialization(Module &M); static char ID; // Pass identification, replacement for typeid. private: + void initializeCallbacks(Module &M); bool instrumentLoadOrStore(Instruction *I); bool instrumentAtomic(Instruction *I); void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local, @@ -76,7 +90,8 @@ struct ThreadSanitizer : public FunctionPass { bool addrPointsToConstantData(Value *Addr); int getMemoryAccessFuncIndex(Value *Addr); - TargetData *TD; + DataLayout *TD; + SmallString<64> BlacklistFile; OwningPtr<BlackList> BL; IntegerType *OrdTy; // Callbacks to run-time library are computed in doInitialization. @@ -88,6 +103,10 @@ struct ThreadSanitizer : public FunctionPass { Function *TsanWrite[kNumberOfAccessSizes]; Function *TsanAtomicLoad[kNumberOfAccessSizes]; Function *TsanAtomicStore[kNumberOfAccessSizes]; + Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes]; + Function *TsanAtomicCAS[kNumberOfAccessSizes]; + Function *TsanAtomicThreadFence; + Function *TsanAtomicSignalFence; Function *TsanVptrUpdate; }; } // namespace @@ -101,13 +120,8 @@ const char *ThreadSanitizer::getPassName() const { return "ThreadSanitizer"; } -ThreadSanitizer::ThreadSanitizer() - : FunctionPass(ID), - TD(NULL) { -} - -FunctionPass *llvm::createThreadSanitizerPass() { - return new ThreadSanitizer(); +FunctionPass *llvm::createThreadSanitizerPass(StringRef BlacklistFile) { + return new ThreadSanitizer(BlacklistFile); } static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { @@ -117,18 +131,8 @@ static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { report_fatal_error("ThreadSanitizer interface function redefined"); } -bool ThreadSanitizer::doInitialization(Module &M) { - TD = getAnalysisIfAvailable<TargetData>(); - if (!TD) - return false; - BL.reset(new BlackList(ClBlackListFile)); - - // Always insert a call to __tsan_init into the module's CTORs. +void ThreadSanitizer::initializeCallbacks(Module &M) { IRBuilder<> IRB(M.getContext()); - Value *TsanInit = M.getOrInsertFunction("__tsan_init", - IRB.getVoidTy(), NULL); - appendToGlobalCtors(M, cast<Function>(TsanInit), 0); - // Initialize the callbacks. TsanFuncEntry = checkInterfaceFunction(M.getOrInsertFunction( "__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL)); @@ -158,10 +162,58 @@ bool ThreadSanitizer::doInitialization(Module &M) { TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction( AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy, NULL)); + + for (int op = AtomicRMWInst::FIRST_BINOP; + op <= AtomicRMWInst::LAST_BINOP; ++op) { + TsanAtomicRMW[op][i] = NULL; + const char *NamePart = NULL; + if (op == AtomicRMWInst::Xchg) + NamePart = "_exchange"; + else if (op == AtomicRMWInst::Add) + NamePart = "_fetch_add"; + else if (op == AtomicRMWInst::Sub) + NamePart = "_fetch_sub"; + else if (op == AtomicRMWInst::And) + NamePart = "_fetch_and"; + else if (op == AtomicRMWInst::Or) + NamePart = "_fetch_or"; + else if (op == AtomicRMWInst::Xor) + NamePart = "_fetch_xor"; + else if (op == AtomicRMWInst::Nand) + NamePart = "_fetch_nand"; + else + continue; + SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart); + TsanAtomicRMW[op][i] = checkInterfaceFunction(M.getOrInsertFunction( + RMWName, Ty, PtrTy, Ty, OrdTy, NULL)); + } + + SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) + + "_compare_exchange_val"); + TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction( + AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, NULL)); } TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction( "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), NULL)); + TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction( + "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL)); + TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction( + "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL)); +} + +bool ThreadSanitizer::doInitialization(Module &M) { + TD = getAnalysisIfAvailable<DataLayout>(); + if (!TD) + return false; + BL.reset(new BlackList(BlacklistFile)); + + // Always insert a call to __tsan_init into the module's CTORs. + IRBuilder<> IRB(M.getContext()); + Value *TsanInit = M.getOrInsertFunction("__tsan_init", + IRB.getVoidTy(), NULL); + appendToGlobalCtors(M, cast<Function>(TsanInit), 0); + return true; } @@ -244,14 +296,15 @@ static bool isAtomic(Instruction *I) { return true; if (isa<AtomicCmpXchgInst>(I)) return true; - if (FenceInst *FI = dyn_cast<FenceInst>(I)) - return FI->getSynchScope() == CrossThread; + if (isa<FenceInst>(I)) + return true; return false; } bool ThreadSanitizer::runOnFunction(Function &F) { if (!TD) return false; if (BL->isIn(F)) return false; + initializeCallbacks(*F.getParent()); SmallVector<Instruction*, 8> RetVec; SmallVector<Instruction*, 8> AllLoadsAndStores; SmallVector<Instruction*, 8> LocalLoadsAndStores; @@ -284,17 +337,19 @@ bool ThreadSanitizer::runOnFunction(Function &F) { // (e.g. variables that do not escape, etc). // Instrument memory accesses. - for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) { - Res |= instrumentLoadOrStore(AllLoadsAndStores[i]); - } + if (ClInstrumentMemoryAccesses) + for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) { + Res |= instrumentLoadOrStore(AllLoadsAndStores[i]); + } // Instrument atomic memory accesses. - for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) { - Res |= instrumentAtomic(AtomicAccesses[i]); - } + if (ClInstrumentAtomics) + for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) { + Res |= instrumentAtomic(AtomicAccesses[i]); + } // Instrument function entry/exit points if there were instrumented accesses. - if (Res || HasCalls) { + if ((Res || HasCalls) && ClInstrumentFuncEntryExit) { IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); Value *ReturnAddress = IRB.CreateCall( Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress), @@ -343,16 +398,39 @@ static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { switch (ord) { case NotAtomic: assert(false); case Unordered: // Fall-through. - case Monotonic: v = 1 << 0; break; - // case Consume: v = 1 << 1; break; // Not specified yet. - case Acquire: v = 1 << 2; break; - case Release: v = 1 << 3; break; - case AcquireRelease: v = 1 << 4; break; - case SequentiallyConsistent: v = 1 << 5; break; + case Monotonic: v = 0; break; + // case Consume: v = 1; break; // Not specified yet. + case Acquire: v = 2; break; + case Release: v = 3; break; + case AcquireRelease: v = 4; break; + case SequentiallyConsistent: v = 5; break; + } + return IRB->getInt32(v); +} + +static ConstantInt *createFailOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { + uint32_t v = 0; + switch (ord) { + case NotAtomic: assert(false); + case Unordered: // Fall-through. + case Monotonic: v = 0; break; + // case Consume: v = 1; break; // Not specified yet. + case Acquire: v = 2; break; + case Release: v = 0; break; + case AcquireRelease: v = 2; break; + case SequentiallyConsistent: v = 5; break; } return IRB->getInt32(v); } +// Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x +// standards. For background see C++11 standard. A slightly older, publically +// available draft of the standard (not entirely up-to-date, but close enough +// for casual browsing) is available here: +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf +// The following page contains more background information: +// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ + bool ThreadSanitizer::instrumentAtomic(Instruction *I) { IRBuilder<> IRB(I); if (LoadInst *LI = dyn_cast<LoadInst>(I)) { @@ -385,12 +463,45 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) { CallInst *C = CallInst::Create(TsanAtomicStore[Idx], ArrayRef<Value*>(Args)); ReplaceInstWithInst(I, C); - } else if (isa<AtomicRMWInst>(I)) { - // FIXME: Not yet supported. - } else if (isa<AtomicCmpXchgInst>(I)) { - // FIXME: Not yet supported. - } else if (isa<FenceInst>(I)) { - // FIXME: Not yet supported. + } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) { + Value *Addr = RMWI->getPointerOperand(); + int Idx = getMemoryAccessFuncIndex(Addr); + if (Idx < 0) + return false; + Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx]; + if (F == NULL) + return false; + const size_t ByteSize = 1 << Idx; + const size_t BitSize = ByteSize * 8; + Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); + Type *PtrTy = Ty->getPointerTo(); + Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), + IRB.CreateIntCast(RMWI->getValOperand(), Ty, false), + createOrdering(&IRB, RMWI->getOrdering())}; + CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args)); + ReplaceInstWithInst(I, C); + } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) { + Value *Addr = CASI->getPointerOperand(); + int Idx = getMemoryAccessFuncIndex(Addr); + if (Idx < 0) + return false; + const size_t ByteSize = 1 << Idx; + const size_t BitSize = ByteSize * 8; + Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); + Type *PtrTy = Ty->getPointerTo(); + Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), + IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false), + IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false), + createOrdering(&IRB, CASI->getOrdering()), + createFailOrdering(&IRB, CASI->getOrdering())}; + CallInst *C = CallInst::Create(TsanAtomicCAS[Idx], ArrayRef<Value*>(Args)); + ReplaceInstWithInst(I, C); + } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) { + Value *Args[] = {createOrdering(&IRB, FI->getOrdering())}; + Function *F = FI->getSynchScope() == SingleThread ? + TsanAtomicSignalFence : TsanAtomicThreadFence; + CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args)); + ReplaceInstWithInst(I, C); } return true; } |
