aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen
diff options
context:
space:
mode:
authorEric Christopher <echristo@apple.com>2011-03-12 01:09:29 +0000
committerEric Christopher <echristo@apple.com>2011-03-12 01:09:29 +0000
commitaf3dce51494b366024958b6dc9a15b95cb03011f (patch)
tree216866c8203d89fc3eb3dab436c76f48a6a13be2 /test/CodeGen
parent7d3a16a6f8a696a8d73fe369f5a0f57c9f3f9597 (diff)
downloadexternal_llvm-af3dce51494b366024958b6dc9a15b95cb03011f.zip
external_llvm-af3dce51494b366024958b6dc9a15b95cb03011f.tar.gz
external_llvm-af3dce51494b366024958b6dc9a15b95cb03011f.tar.bz2
Sometimes isPredicable lies to us and tells us we don't need the operands.
Go ahead and add them on when we might want to use them and let later passes remove them. Fixes rdar://9118569 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@127518 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/ARM/fast-isel-pred.ll60
1 files changed, 60 insertions, 0 deletions
diff --git a/test/CodeGen/ARM/fast-isel-pred.ll b/test/CodeGen/ARM/fast-isel-pred.ll
new file mode 100644
index 0000000..ef43654
--- /dev/null
+++ b/test/CodeGen/ARM/fast-isel-pred.ll
@@ -0,0 +1,60 @@
+; ModuleID = 'test.c'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64-n32"
+target triple = "armv7-apple-darwin"
+
+define i32 @main() nounwind ssp {
+entry:
+ %retval = alloca i32, align 4
+ %X = alloca <4 x i32>, align 16
+ %Y = alloca <4 x float>, align 16
+ store i32 0, i32* %retval
+ %tmp = load <4 x i32>* %X, align 16
+ call void @__aa(<4 x i32> %tmp, i8* null, i32 3, <4 x float>* %Y)
+ %0 = load i32* %retval
+ ret i32 %0
+}
+
+define internal void @__aa(<4 x i32> %v, i8* %p, i32 %offset, <4 x float>* %constants) nounwind inlinehint ssp {
+entry:
+ %__a.addr.i = alloca <4 x i32>, align 16
+ %v.addr = alloca <4 x i32>, align 16
+ %p.addr = alloca i8*, align 4
+ %offset.addr = alloca i32, align 4
+ %constants.addr = alloca <4 x float>*, align 4
+ store <4 x i32> %v, <4 x i32>* %v.addr, align 16
+ store i8* %p, i8** %p.addr, align 4
+ store i32 %offset, i32* %offset.addr, align 4
+ store <4 x float>* %constants, <4 x float>** %constants.addr, align 4
+ %tmp = load <4 x i32>* %v.addr, align 16
+ store <4 x i32> %tmp, <4 x i32>* %__a.addr.i, align 16
+ %tmp.i = load <4 x i32>* %__a.addr.i, align 16
+ %0 = bitcast <4 x i32> %tmp.i to <16 x i8>
+ %1 = bitcast <16 x i8> %0 to <4 x i32>
+ %vcvt.i = sitofp <4 x i32> %1 to <4 x float>
+ %tmp1 = load i8** %p.addr, align 4
+ %tmp2 = load i32* %offset.addr, align 4
+ %tmp3 = load <4 x float>** %constants.addr, align 4
+ call void @__bb(<4 x float> %vcvt.i, i8* %tmp1, i32 %tmp2, <4 x float>* %tmp3)
+ ret void
+}
+
+define internal void @__bb(<4 x float> %v, i8* %p, i32 %offset, <4 x float>* %constants) nounwind inlinehint ssp {
+entry:
+ %v.addr = alloca <4 x float>, align 16
+ %p.addr = alloca i8*, align 4
+ %offset.addr = alloca i32, align 4
+ %constants.addr = alloca <4 x float>*, align 4
+ %data = alloca i64, align 4
+ store <4 x float> %v, <4 x float>* %v.addr, align 16
+ store i8* %p, i8** %p.addr, align 4
+ store i32 %offset, i32* %offset.addr, align 4
+ store <4 x float>* %constants, <4 x float>** %constants.addr, align 4
+ %tmp = load i64* %data, align 4
+ %tmp1 = load i8** %p.addr, align 4
+ %tmp2 = load i32* %offset.addr, align 4
+ %add.ptr = getelementptr i8* %tmp1, i32 %tmp2
+ %0 = bitcast i8* %add.ptr to i64*
+ %arrayidx = getelementptr inbounds i64* %0, i32 0
+ store i64 %tmp, i64* %arrayidx
+ ret void
+}