aboutsummaryrefslogtreecommitdiffstats
path: root/test/Transforms/SROA/basictest.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/SROA/basictest.ll')
-rw-r--r--test/Transforms/SROA/basictest.ll155
1 files changed, 155 insertions, 0 deletions
diff --git a/test/Transforms/SROA/basictest.ll b/test/Transforms/SROA/basictest.ll
index dc2b165..e3f762a 100644
--- a/test/Transforms/SROA/basictest.ll
+++ b/test/Transforms/SROA/basictest.ll
@@ -1440,3 +1440,158 @@ entry:
ret void
}
+define float @test25() {
+; Check that we split up stores in order to promote the smaller SSA values.. These types
+; of patterns can arise because LLVM maps small memcpy's to integer load and
+; stores. If we get a memcpy of an aggregate (such as C and C++ frontends would
+; produce, but so might any language frontend), this will in many cases turn into
+; an integer load and store. SROA needs to be extremely powerful to correctly
+; handle these cases and form splitable and promotable SSA values.
+;
+; CHECK-LABEL: @test25(
+; CHECK-NOT: alloca
+; CHECK: %[[F1:.*]] = bitcast i32 0 to float
+; CHECK: %[[F2:.*]] = bitcast i32 1065353216 to float
+; CHECK: %[[SUM:.*]] = fadd float %[[F1]], %[[F2]]
+; CHECK: ret float %[[SUM]]
+
+entry:
+ %a = alloca i64
+ %b = alloca i64
+ %a.cast = bitcast i64* %a to [2 x float]*
+ %a.gep1 = getelementptr [2 x float]* %a.cast, i32 0, i32 0
+ %a.gep2 = getelementptr [2 x float]* %a.cast, i32 0, i32 1
+ %b.cast = bitcast i64* %b to [2 x float]*
+ %b.gep1 = getelementptr [2 x float]* %b.cast, i32 0, i32 0
+ %b.gep2 = getelementptr [2 x float]* %b.cast, i32 0, i32 1
+ store float 0.0, float* %a.gep1
+ store float 1.0, float* %a.gep2
+ %v = load i64* %a
+ store i64 %v, i64* %b
+ %f1 = load float* %b.gep1
+ %f2 = load float* %b.gep2
+ %ret = fadd float %f1, %f2
+ ret float %ret
+}
+
+@complex1 = external global [2 x float]
+@complex2 = external global [2 x float]
+
+define void @test26() {
+; Test a case of splitting up loads and stores against a globals.
+;
+; CHECK-LABEL: @test26(
+; CHECK-NOT: alloca
+; CHECK: %[[L1:.*]] = load i32* bitcast
+; CHECK: %[[L2:.*]] = load i32* bitcast
+; CHECK: %[[F1:.*]] = bitcast i32 %[[L1]] to float
+; CHECK: %[[F2:.*]] = bitcast i32 %[[L2]] to float
+; CHECK: %[[SUM:.*]] = fadd float %[[F1]], %[[F2]]
+; CHECK: %[[C1:.*]] = bitcast float %[[SUM]] to i32
+; CHECK: %[[C2:.*]] = bitcast float %[[SUM]] to i32
+; CHECK: store i32 %[[C1]], i32* bitcast
+; CHECK: store i32 %[[C2]], i32* bitcast
+; CHECK: ret void
+
+entry:
+ %a = alloca i64
+ %a.cast = bitcast i64* %a to [2 x float]*
+ %a.gep1 = getelementptr [2 x float]* %a.cast, i32 0, i32 0
+ %a.gep2 = getelementptr [2 x float]* %a.cast, i32 0, i32 1
+ %v1 = load i64* bitcast ([2 x float]* @complex1 to i64*)
+ store i64 %v1, i64* %a
+ %f1 = load float* %a.gep1
+ %f2 = load float* %a.gep2
+ %sum = fadd float %f1, %f2
+ store float %sum, float* %a.gep1
+ store float %sum, float* %a.gep2
+ %v2 = load i64* %a
+ store i64 %v2, i64* bitcast ([2 x float]* @complex2 to i64*)
+ ret void
+}
+
+define float @test27() {
+; Another, more complex case of splittable i64 loads and stores. This example
+; is a particularly challenging one because the load and store both point into
+; the alloca SROA is processing, and they overlap but at an offset.
+;
+; CHECK-LABEL: @test27(
+; CHECK-NOT: alloca
+; CHECK: %[[F1:.*]] = bitcast i32 0 to float
+; CHECK: %[[F2:.*]] = bitcast i32 1065353216 to float
+; CHECK: %[[SUM:.*]] = fadd float %[[F1]], %[[F2]]
+; CHECK: ret float %[[SUM]]
+
+entry:
+ %a = alloca [12 x i8]
+ %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0
+ %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4
+ %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8
+ %iptr1 = bitcast i8* %gep1 to i64*
+ %iptr2 = bitcast i8* %gep2 to i64*
+ %fptr1 = bitcast i8* %gep1 to float*
+ %fptr2 = bitcast i8* %gep2 to float*
+ %fptr3 = bitcast i8* %gep3 to float*
+ store float 0.0, float* %fptr1
+ store float 1.0, float* %fptr2
+ %v = load i64* %iptr1
+ store i64 %v, i64* %iptr2
+ %f1 = load float* %fptr2
+ %f2 = load float* %fptr3
+ %ret = fadd float %f1, %f2
+ ret float %ret
+}
+
+define i32 @PR22093() {
+; Test that we don't try to pre-split a splittable store of a splittable but
+; not pre-splittable load over the same alloca. We "handle" this case when the
+; load is unsplittable but unrelated to this alloca by just generating extra
+; loads without touching the original, but when the original load was out of
+; this alloca we need to handle it specially to ensure the splits line up
+; properly for rewriting.
+;
+; CHECK-LABEL: @PR22093(
+; CHECK-NOT: alloca
+; CHECK: alloca i16
+; CHECK-NOT: alloca
+; CHECK: store volatile i16
+
+entry:
+ %a = alloca i32
+ %a.cast = bitcast i32* %a to i16*
+ store volatile i16 42, i16* %a.cast
+ %load = load i32* %a
+ store i32 %load, i32* %a
+ ret i32 %load
+}
+
+define void @PR22093.2() {
+; Another way that we end up being unable to split a particular set of loads
+; and stores can even have ordering importance. Here we have a load which is
+; pre-splittable by itself, and the first store is also compatible. But the
+; second store of the load makes the load unsplittable because of a mismatch of
+; splits. Because this makes the load unsplittable, we also have to go back and
+; remove the first store from the presplit candidates as its load won't be
+; presplit.
+;
+; CHECK-LABEL: @PR22093.2(
+; CHECK-NOT: alloca
+; CHECK: alloca i16
+; CHECK-NEXT: alloca i8
+; CHECK-NOT: alloca
+; CHECK: store volatile i16
+; CHECK: store volatile i8
+
+entry:
+ %a = alloca i64
+ %a.cast1 = bitcast i64* %a to i32*
+ %a.cast2 = bitcast i64* %a to i16*
+ store volatile i16 42, i16* %a.cast2
+ %load = load i32* %a.cast1
+ store i32 %load, i32* %a.cast1
+ %a.gep1 = getelementptr i32* %a.cast1, i32 1
+ %a.cast3 = bitcast i32* %a.gep1 to i8*
+ store volatile i8 13, i8* %a.cast3
+ store i32 %load, i32* %a.gep1
+ ret void
+}