diff options
author | Andrew Trick <atrick@apple.com> | 2011-08-06 07:00:37 +0000 |
---|---|---|
committer | Andrew Trick <atrick@apple.com> | 2011-08-06 07:00:37 +0000 |
commit | 06988bcf6a5c74e81cf7e76f06a686aa822ec00a (patch) | |
tree | ff0b2f17005dae608bc8820f0a33b32e716d77d0 /test | |
parent | ccfa446450c9e3e0b3591343c4c5bea1e4cdc043 (diff) | |
download | external_llvm-06988bcf6a5c74e81cf7e76f06a686aa822ec00a.zip external_llvm-06988bcf6a5c74e81cf7e76f06a686aa822ec00a.tar.gz external_llvm-06988bcf6a5c74e81cf7e76f06a686aa822ec00a.tar.bz2 |
Made SCEV's UDiv expressions more canonical. When dividing a
recurrence, the initial values low bits can sometimes be ignored.
To take advantage of this, added FoldIVUser to IndVarSimplify to fold
an IV operand into a udiv/lshr if the operator doesn't affect the
result.
-indvars -disable-iv-rewrite now transforms
i = phi i4
i1 = i0 + 1
idx = i1 >> (2 or more)
i4 = i + 4
into
i = phi i4
idx = i0 >> ...
i4 = i + 4
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@137013 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r-- | test/Transforms/IndVarSimplify/iv-fold.ll | 56 |
1 files changed, 56 insertions, 0 deletions
diff --git a/test/Transforms/IndVarSimplify/iv-fold.ll b/test/Transforms/IndVarSimplify/iv-fold.ll new file mode 100644 index 0000000..7e11cdf --- /dev/null +++ b/test/Transforms/IndVarSimplify/iv-fold.ll @@ -0,0 +1,56 @@ +; RUN: opt < %s -indvars -disable-iv-rewrite -S | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n:32:64" + +; Indvars should be able to fold IV increments into shr when low bits are zero. +; +; CHECK: @foldIncShr +; CHECK: shr.1 = lshr i32 %0, 5 +define i32 @foldIncShr(i32* %bitmap, i32 %bit_addr, i32 %nbits) nounwind { +entry: + br label %while.body + +while.body: + %0 = phi i32 [ 0, %entry ], [ %inc.2, %while.body ] + %shr = lshr i32 %0, 5 + %arrayidx = getelementptr inbounds i32* %bitmap, i32 %shr + %tmp6 = load i32* %arrayidx, align 4 + %inc.1 = add i32 %0, 1 + %shr.1 = lshr i32 %inc.1, 5 + %arrayidx.1 = getelementptr inbounds i32* %bitmap, i32 %shr.1 + %tmp6.1 = load i32* %arrayidx.1, align 4 + %inc.2 = add i32 %inc.1, 1 + %exitcond.3 = icmp eq i32 %inc.2, 128 + br i1 %exitcond.3, label %while.end, label %while.body + +while.end: + %r = add i32 %tmp6, %tmp6.1 + ret i32 %r +} + +; Invdars should not fold an increment into shr unless 2^shiftBits is +; a multiple of the recurrence step. +; +; CHECK: @noFoldIncShr +; CHECK: shr.1 = lshr i32 %inc.1, 5 +define i32 @noFoldIncShr(i32* %bitmap, i32 %bit_addr, i32 %nbits) nounwind { +entry: + br label %while.body + +while.body: + %0 = phi i32 [ 0, %entry ], [ %inc.3, %while.body ] + %shr = lshr i32 %0, 5 + %arrayidx = getelementptr inbounds i32* %bitmap, i32 %shr + %tmp6 = load i32* %arrayidx, align 4 + %inc.1 = add i32 %0, 1 + %shr.1 = lshr i32 %inc.1, 5 + %arrayidx.1 = getelementptr inbounds i32* %bitmap, i32 %shr.1 + %tmp6.1 = load i32* %arrayidx.1, align 4 + %inc.3 = add i32 %inc.1, 2 + %exitcond.3 = icmp eq i32 %inc.3, 96 + br i1 %exitcond.3, label %while.end, label %while.body + +while.end: + %r = add i32 %tmp6, %tmp6.1 + ret i32 %r +} |