aboutsummaryrefslogtreecommitdiffstats
path: root/test/Transforms/SROA/alignment.ll
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2012-10-03 08:14:02 +0000
committerChandler Carruth <chandlerc@gmail.com>2012-10-03 08:14:02 +0000
commitf710fb14ee951fe533b94eb1046d422e94d4f1f3 (patch)
treebe17660b119386bc889a3a1ecdf0f0fca8f7a16e /test/Transforms/SROA/alignment.ll
parent76ad43c6e1619ed4c087b8ccb2cd573eb9d7093e (diff)
downloadexternal_llvm-f710fb14ee951fe533b94eb1046d422e94d4f1f3.zip
external_llvm-f710fb14ee951fe533b94eb1046d422e94d4f1f3.tar.gz
external_llvm-f710fb14ee951fe533b94eb1046d422e94d4f1f3.tar.bz2
Try to use a better set of abstractions for computing the alignment
necessary during rewriting. As part of this, fix a real think-o here where we might have left off an alignment specification when the address is in fact underaligned. I haven't come up with any way to trigger this, as there is always some other factor that reduces the alignment, but it certainly might have been an observable bug in some way I can't think of. This also slightly changes the strategy for placing explicit alignments on loads and stores to only do so when the alignment does not match that required by the ABI. This causes a few redundant alignments to go away from test cases. I've also added a couple of tests that really push on the alignment that we end up with on loads and stores. More to come here as I try to fix an underlying bug I have conjectured and produced test cases for, although it's not clear if this bug is the one currently hitting dragonegg's gcc47 bootstrap. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@165100 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms/SROA/alignment.ll')
-rw-r--r--test/Transforms/SROA/alignment.ll63
1 files changed, 59 insertions, 4 deletions
diff --git a/test/Transforms/SROA/alignment.ll b/test/Transforms/SROA/alignment.ll
index 4f35a8a..f8f3270 100644
--- a/test/Transforms/SROA/alignment.ll
+++ b/test/Transforms/SROA/alignment.ll
@@ -31,8 +31,8 @@ entry:
define void @test2() {
; CHECK: @test2
; CHECK: alloca i16
-; CHECK: load i8* %{{.*}}, align 1
-; CHECK: store i8 42, i8* %{{.*}}, align 1
+; CHECK: load i8* %{{.*}}
+; CHECK: store i8 42, i8* %{{.*}}
; CHECK: ret void
entry:
@@ -41,8 +41,8 @@ entry:
%cast1 = bitcast i8* %gep1 to i16*
store volatile i16 0, i16* %cast1
%gep2 = getelementptr { i8, i8, i8, i8 }* %a, i32 0, i32 2
- %result = load i8* %gep2, align 2
- store i8 42, i8* %gep2, align 2
+ %result = load i8* %gep2
+ store i8 42, i8* %gep2
ret void
}
@@ -114,3 +114,58 @@ entry:
ret void
}
+
+define void @test5() {
+; Test that we preserve underaligned loads and stores when splitting.
+; CHECK: @test5
+; CHECK: alloca [9 x i8]
+; CHECK: alloca [9 x i8]
+; CHECK: store volatile double 0.0{{.*}}, double* %{{.*}}, align 1
+; CHECK: load i16* %{{.*}}, align 1
+; CHECK: load double* %{{.*}}, align 1
+; CHECK: store volatile double %{{.*}}, double* %{{.*}}, align 1
+; CHECK: load i16* %{{.*}}, align 1
+; CHECK: ret void
+
+entry:
+ %a = alloca [18 x i8]
+ %raw1 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 0
+ %ptr1 = bitcast i8* %raw1 to double*
+ store volatile double 0.0, double* %ptr1, align 1
+ %weird_gep1 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 7
+ %weird_cast1 = bitcast i8* %weird_gep1 to i16*
+ %weird_load1 = load i16* %weird_cast1, align 1
+
+ %raw2 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 9
+ %ptr2 = bitcast i8* %raw2 to double*
+ %d1 = load double* %ptr1, align 1
+ store volatile double %d1, double* %ptr2, align 1
+ %weird_gep2 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 16
+ %weird_cast2 = bitcast i8* %weird_gep2 to i16*
+ %weird_load2 = load i16* %weird_cast2, align 1
+
+ ret void
+}
+
+define void @test6() {
+; Test that we promote alignment when the underlying alloca switches to one
+; that innately provides it.
+; CHECK: @test6
+; CHECK: alloca double
+; CHECK: alloca double
+; CHECK-NOT: align
+; CHECK: ret void
+
+entry:
+ %a = alloca [16 x i8]
+ %raw1 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 0
+ %ptr1 = bitcast i8* %raw1 to double*
+ store volatile double 0.0, double* %ptr1, align 1
+
+ %raw2 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 8
+ %ptr2 = bitcast i8* %raw2 to double*
+ %val = load double* %ptr1, align 1
+ store volatile double %val, double* %ptr2, align 1
+
+ ret void
+}