aboutsummaryrefslogtreecommitdiffstats
path: root/test/Transforms/InstCombine/align-addr.ll
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2008-04-10 18:43:06 +0000
committerDan Gohman <gohman@apple.com>2008-04-10 18:43:06 +0000
commiteee962e1cebb1b70ccd3f5d35f3a5a8c9ba942b5 (patch)
treed90f6ccf109abe8b81158ffe74a3dfae18b24235 /test/Transforms/InstCombine/align-addr.ll
parent172b70c62a315de654a4d1683dbb5544e67afa90 (diff)
downloadexternal_llvm-eee962e1cebb1b70ccd3f5d35f3a5a8c9ba942b5.zip
external_llvm-eee962e1cebb1b70ccd3f5d35f3a5a8c9ba942b5.tar.gz
external_llvm-eee962e1cebb1b70ccd3f5d35f3a5a8c9ba942b5.tar.bz2
Teach InstCombine's ComputeMaskedBits to handle pointer expressions
in addition to integer expressions. Rewrite GetOrEnforceKnownAlignment as a ComputeMaskedBits problem, moving all of its special alignment knowledge to ComputeMaskedBits as low-zero-bits knowledge. Also, teach ComputeMaskedBits a few basic things about Mul and PHI instructions. This improves ComputeMaskedBits-based simplifications in a few cases, but more noticeably it significantly improves instcombine's alignment detection for loads, stores, and memory intrinsics. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49492 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms/InstCombine/align-addr.ll')
-rw-r--r--test/Transforms/InstCombine/align-addr.ll30
1 files changed, 30 insertions, 0 deletions
diff --git a/test/Transforms/InstCombine/align-addr.ll b/test/Transforms/InstCombine/align-addr.ll
new file mode 100644
index 0000000..a05c513
--- /dev/null
+++ b/test/Transforms/InstCombine/align-addr.ll
@@ -0,0 +1,30 @@
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {align 16} | count 1
+
+; Instcombine should be able to prove vector alignment in the
+; presence of a few mild address computation tricks.
+
+define void @foo(i8* %b, i64 %n, i64 %u, i64 %y) nounwind {
+entry:
+ %c = ptrtoint i8* %b to i64
+ %d = and i64 %c, -16
+ %e = inttoptr i64 %d to double*
+ %v = mul i64 %u, 2
+ %z = and i64 %y, -2
+ %t1421 = icmp eq i64 %n, 0
+ br i1 %t1421, label %return, label %bb
+
+bb:
+ %i = phi i64 [ %indvar.next, %bb ], [ 20, %entry ]
+ %j = mul i64 %i, %v
+ %h = add i64 %j, %z
+ %t8 = getelementptr double* %e, i64 %h
+ %p = bitcast double* %t8 to <2 x double>*
+ store <2 x double><double 0.0, double 0.0>, <2 x double>* %p, align 8
+ %indvar.next = add i64 %i, 1
+ %exitcond = icmp eq i64 %indvar.next, %n
+ br i1 %exitcond, label %return, label %bb
+
+return:
+ ret void
+}
+