aboutsummaryrefslogtreecommitdiffstats
path: root/test/Instrumentation
diff options
context:
space:
mode:
authorEvgeniy Stepanov <eugeni.stepanov@gmail.com>2013-02-28 11:25:14 +0000
committerEvgeniy Stepanov <eugeni.stepanov@gmail.com>2013-02-28 11:25:14 +0000
commit5e812139690ce077d568ef6559992b2cf74eb536 (patch)
tree3ee2e2d0dbe7014af4c134e9f1650bd504c91a81 /test/Instrumentation
parent4a4fb84d4b1ae48199489707a4398e2a968b8933 (diff)
downloadexternal_llvm-5e812139690ce077d568ef6559992b2cf74eb536.zip
external_llvm-5e812139690ce077d568ef6559992b2cf74eb536.tar.gz
external_llvm-5e812139690ce077d568ef6559992b2cf74eb536.tar.bz2
[msan] Implement sanitize_memory attribute.
Shadow checks are disabled and memory loads always produce fully initialized values in functions that don't have a sanitize_memory attribute. Value and argument shadow is propagated as usual. This change also updates blacklist behaviour to match the above. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@176247 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Instrumentation')
-rw-r--r--test/Instrumentation/MemorySanitizer/msan_basic.ll97
1 files changed, 62 insertions, 35 deletions
diff --git a/test/Instrumentation/MemorySanitizer/msan_basic.ll b/test/Instrumentation/MemorySanitizer/msan_basic.ll
index 60f946f..1e7a317 100644
--- a/test/Instrumentation/MemorySanitizer/msan_basic.ll
+++ b/test/Instrumentation/MemorySanitizer/msan_basic.ll
@@ -11,7 +11,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; Check instrumentation of stores
-define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable {
+define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
entry:
store i32 %x, i32* %p, align 4
ret void
@@ -39,7 +39,7 @@ entry:
; Shadow store has the same alignment as the original store; origin store
; does not specify explicit alignment.
-define void @AlignedStore(i32* nocapture %p, i32 %x) nounwind uwtable {
+define void @AlignedStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
entry:
store i32 %x, i32* %p, align 32
ret void
@@ -64,7 +64,7 @@ entry:
; load followed by cmp: check that we load the shadow and call __msan_warning.
-define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable {
+define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
entry:
%0 = load i32* %a, align 4
%tobool = icmp eq i32 %0, 0
@@ -89,7 +89,7 @@ declare void @foo(...)
; CHECK: ret void
; Check that we store the shadow for the retval.
-define i32 @ReturnInt() nounwind uwtable readnone {
+define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory {
entry:
ret i32 123
}
@@ -99,7 +99,7 @@ entry:
; CHECK: ret i32
; Check that we get the shadow for the retval.
-define void @CopyRetVal(i32* nocapture %a) nounwind uwtable {
+define void @CopyRetVal(i32* nocapture %a) nounwind uwtable sanitize_memory {
entry:
%call = tail call i32 @ReturnInt() nounwind
store i32 %call, i32* %a, align 4
@@ -114,7 +114,7 @@ entry:
; Check that we generate PHIs for shadow.
-define void @FuncWithPhi(i32* nocapture %a, i32* %b, i32* nocapture %c) nounwind uwtable {
+define void @FuncWithPhi(i32* nocapture %a, i32* %b, i32* nocapture %c) nounwind uwtable sanitize_memory {
entry:
%tobool = icmp eq i32* %b, null
br i1 %tobool, label %if.else, label %if.then
@@ -141,7 +141,7 @@ entry:
; CHECK: ret void
; Compute shadow for "x << 10"
-define void @ShlConst(i32* nocapture %x) nounwind uwtable {
+define void @ShlConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
entry:
%0 = load i32* %x, align 4
%1 = shl i32 %0, 10
@@ -159,7 +159,7 @@ entry:
; CHECK: ret void
; Compute shadow for "10 << x": it should have 'sext i1'.
-define void @ShlNonConst(i32* nocapture %x) nounwind uwtable {
+define void @ShlNonConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
entry:
%0 = load i32* %x, align 4
%1 = shl i32 10, %0
@@ -176,7 +176,7 @@ entry:
; CHECK: ret void
; SExt
-define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable {
+define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable sanitize_memory {
entry:
%0 = load i16* %b, align 2
%1 = sext i16 %0 to i32
@@ -195,7 +195,7 @@ entry:
; memset
-define void @MemSet(i8* nocapture %x) nounwind uwtable {
+define void @MemSet(i8* nocapture %x) nounwind uwtable sanitize_memory {
entry:
call void @llvm.memset.p0i8.i64(i8* %x, i8 42, i64 10, i32 1, i1 false)
ret void
@@ -209,7 +209,7 @@ declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
; memcpy
-define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable {
+define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
entry:
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false)
ret void
@@ -223,7 +223,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32,
; memmove is lowered to a call
-define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable {
+define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
entry:
call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false)
ret void
@@ -238,7 +238,7 @@ declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32,
; Check that we propagate shadow for "select"
-define i32 @Select(i32 %a, i32 %b, i32 %c) nounwind uwtable readnone {
+define i32 @Select(i32 %a, i32 %b, i32 %c) nounwind uwtable readnone sanitize_memory {
entry:
%tobool = icmp ne i32 %c, 0
%cond = select i1 %tobool, i32 %a, i32 %b
@@ -255,7 +255,7 @@ entry:
; Select condition is flattened to i1, which is then used to select one of the
; argument origins.
-define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone {
+define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory {
entry:
%cond = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %cond
@@ -268,7 +268,7 @@ entry:
; CHECK-ORIGINS: ret <8 x i16>
-define i8* @IntToPtr(i64 %x) nounwind uwtable readnone {
+define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
entry:
%0 = inttoptr i64 %x to i8*
ret i8* %0
@@ -281,7 +281,7 @@ entry:
; CHECK: ret i8
-define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone {
+define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory {
entry:
%0 = inttoptr i16 %x to i8*
ret i8* %0
@@ -296,7 +296,7 @@ entry:
; Check that we insert exactly one check on udiv
; (2nd arg shadow is checked, 1st arg shadow is propagated)
-define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone {
+define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
entry:
%div = udiv i32 %a, %b
ret i32 %div
@@ -313,7 +313,7 @@ entry:
; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
-define zeroext i1 @ICmpSLT(i32 %x) nounwind uwtable readnone {
+define zeroext i1 @ICmpSLT(i32 %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp slt i32 %x, 0
ret i1 %1
}
@@ -325,7 +325,7 @@ define zeroext i1 @ICmpSLT(i32 %x) nounwind uwtable readnone {
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i1
-define zeroext i1 @ICmpSGE(i32 %x) nounwind uwtable readnone {
+define zeroext i1 @ICmpSGE(i32 %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp sge i32 %x, 0
ret i1 %1
}
@@ -337,7 +337,7 @@ define zeroext i1 @ICmpSGE(i32 %x) nounwind uwtable readnone {
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i1
-define zeroext i1 @ICmpSGT(i32 %x) nounwind uwtable readnone {
+define zeroext i1 @ICmpSGT(i32 %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp sgt i32 0, %x
ret i1 %1
}
@@ -349,7 +349,7 @@ define zeroext i1 @ICmpSGT(i32 %x) nounwind uwtable readnone {
; CHECK-NOT: call void @__msan_warning
; CHECK: ret i1
-define zeroext i1 @ICmpSLE(i32 %x) nounwind uwtable readnone {
+define zeroext i1 @ICmpSLE(i32 %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp sle i32 0, %x
ret i1 %1
}
@@ -365,7 +365,7 @@ define zeroext i1 @ICmpSLE(i32 %x) nounwind uwtable readnone {
; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
; of the vector arguments.
-define <2 x i1> @ICmpSLT_vector(<2 x i32*> %x) nounwind uwtable readnone {
+define <2 x i1> @ICmpSLT_vector(<2 x i32*> %x) nounwind uwtable readnone sanitize_memory {
%1 = icmp slt <2 x i32*> %x, zeroinitializer
ret <2 x i1> %1
}
@@ -381,7 +381,7 @@ define <2 x i1> @ICmpSLT_vector(<2 x i32*> %x) nounwind uwtable readnone {
; Check that we propagate shadow for unsigned relational comparisons with
; constants
-define zeroext i1 @ICmpUGTConst(i32 %x) nounwind uwtable readnone {
+define zeroext i1 @ICmpUGTConst(i32 %x) nounwind uwtable readnone sanitize_memory {
entry:
%cmp = icmp ugt i32 %x, 7
ret i1 %cmp
@@ -400,7 +400,7 @@ entry:
; Check that loads of shadow have the same aligment as the original loads.
; Check that loads of origin have the aligment of max(4, original alignment).
-define i32 @ShadowLoadAlignmentLarge() nounwind uwtable {
+define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
%y = alloca i32, align 64
%1 = load volatile i32* %y, align 64
ret i32 %1
@@ -411,7 +411,7 @@ define i32 @ShadowLoadAlignmentLarge() nounwind uwtable {
; CHECK: load volatile i32* {{.*}} align 64
; CHECK: ret i32
-define i32 @ShadowLoadAlignmentSmall() nounwind uwtable {
+define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
%y = alloca i32, align 2
%1 = load volatile i32* %y, align 2
ret i32 %1
@@ -433,7 +433,7 @@ define i32 @ShadowLoadAlignmentSmall() nounwind uwtable {
; Check that the same bit manipulation is applied to the shadow values.
; Check that there is a zero test of the shadow of %idx argument, where present.
-define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) {
+define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
%x = extractelement <4 x i32> %vec, i32 %idx
ret i32 %x
}
@@ -444,7 +444,7 @@ define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) {
; CHECK: extractelement
; CHECK: ret i32
-define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) {
+define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory {
%vec1 = insertelement <4 x i32> %vec, i32 %x, i32 %idx
ret <4 x i32> %vec1
}
@@ -455,7 +455,7 @@ define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) {
; CHECK: insertelement
; CHECK: ret <4 x i32>
-define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) {
+define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory {
%vec2 = shufflevector <4 x i32> %vec, <4 x i32> %vec1,
<4 x i32> <i32 0, i32 4, i32 1, i32 5>
ret <4 x i32> %vec2
@@ -469,7 +469,7 @@ define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) {
; Test bswap intrinsic instrumentation
-define i32 @BSwap(i32 %x) nounwind uwtable readnone {
+define i32 @BSwap(i32 %x) nounwind uwtable readnone sanitize_memory {
%y = tail call i32 @llvm.bswap.i32(i32 %x)
ret i32 %y
}
@@ -487,7 +487,7 @@ declare i32 @llvm.bswap.i32(i32) nounwind readnone
; Store intrinsic.
-define void @StoreIntrinsic(i8* %p, <4 x float> %x) nounwind uwtable {
+define void @StoreIntrinsic(i8* %p, <4 x float> %x) nounwind uwtable sanitize_memory {
call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x)
ret void
}
@@ -504,7 +504,7 @@ declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
; Load intrinsic.
-define <16 x i8> @LoadIntrinsic(i8* %p) nounwind uwtable {
+define <16 x i8> @LoadIntrinsic(i8* %p) nounwind uwtable sanitize_memory {
%call = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p)
ret <16 x i8> %call
}
@@ -530,7 +530,7 @@ declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) nounwind
; Check that shadow is OR'ed, and origin is Select'ed
; And no shadow checks!
-define <8 x i16> @Paddsw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable {
+define <8 x i16> @Paddsw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable sanitize_memory {
%call = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
ret <8 x i16> %call
}
@@ -559,7 +559,7 @@ declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) nounwind
; Test handling of vectors of pointers.
; Check that shadow of such vector is a vector of integers.
-define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable {
+define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memory {
%x = load <8 x i8*>* %p
ret <8 x i8*> %x
}
@@ -574,7 +574,7 @@ define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable {
declare void @llvm.va_copy(i8*, i8*) nounwind
-define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable {
+define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory {
call void @llvm.va_copy(i8* %p1, i8* %p2) nounwind
ret void
}
@@ -587,7 +587,7 @@ define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable {
; Test handling of volatile stores.
; Check that MemorySanitizer does not add a check of the value being stored.
-define void @VolatileStore(i32* nocapture %p, i32 %x) nounwind uwtable {
+define void @VolatileStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
entry:
store volatile i32 %x, i32* %p, align 4
ret void
@@ -596,3 +596,30 @@ entry:
; CHECK: @VolatileStore
; CHECK-NOT: @__msan_warning
; CHECK: ret void
+
+
+; Test that checks are omitted but shadow propagation is kept if
+; sanitize_memory attribute is missing.
+
+define i32 @NoSanitizeMemory(i32 %x) uwtable {
+entry:
+ %tobool = icmp eq i32 %x, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ tail call void @bar()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret i32 %x
+}
+
+declare void @bar()
+
+; CHECK: @NoSanitizeMemory
+; CHECK-NOT: @__msan_warning
+; CHECK: load i32* {{.*}} @__msan_param_tls
+; CHECK-NOT: @__msan_warning
+; CHECK: store {{.*}} @__msan_retval_tls
+; CHECK-NOT: @__msan_warning
+; CHECK: ret i32