diff options
author | Evan Cheng <evan.cheng@apple.com> | 2009-11-12 07:35:05 +0000 |
---|---|---|
committer | Evan Cheng <evan.cheng@apple.com> | 2009-11-12 07:35:05 +0000 |
commit | 586f69a11881d828c056ce017b3fb432341d9657 (patch) | |
tree | caf876c0d7890ff0b865674caac6d2118ff94046 /test/CodeGen | |
parent | b9d2c03d200bea99470766b0fb53dd07e11b086a (diff) | |
download | external_llvm-586f69a11881d828c056ce017b3fb432341d9657.zip external_llvm-586f69a11881d828c056ce017b3fb432341d9657.tar.gz external_llvm-586f69a11881d828c056ce017b3fb432341d9657.tar.bz2 |
- Teach LSR to avoid changing cmp iv stride if it will create an immediate that
cannot be folded into target cmp instruction.
- Avoid a phase ordering issue where early cmp optimization would prevent the
later count-to-zero optimization.
- Add missing checks which could cause LSR to reuse stride that does not have
users.
- Fix a bug in count-to-zero optimization code which failed to find the pre-inc
iv's phi node.
- Remove, tighten, loosen some incorrect checks disable valid transformations.
- Quite a bit of code clean up.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@86969 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r-- | test/CodeGen/Thumb2/lsr-deficiency.ll | 37 |
1 files changed, 37 insertions, 0 deletions
diff --git a/test/CodeGen/Thumb2/lsr-deficiency.ll b/test/CodeGen/Thumb2/lsr-deficiency.ll new file mode 100644 index 0000000..7b1b57a --- /dev/null +++ b/test/CodeGen/Thumb2/lsr-deficiency.ll @@ -0,0 +1,37 @@ +; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic | FileCheck %s +; rdar://7387640 + +; FIXME: We still need to rewrite array reference iv of stride -4 with loop +; count iv of stride -1. + +@G = external global i32 ; <i32*> [#uses=2] +@array = external global i32* ; <i32**> [#uses=1] + +define arm_apcscc void @t() nounwind optsize { +; CHECK: t: +; CHECK: mov.w r2, #4000 +; CHECK: movw r3, #1001 +entry: + %.pre = load i32* @G, align 4 ; <i32> [#uses=1] + br label %bb + +bb: ; preds = %bb, %entry +; CHECK: LBB1_1: +; CHECK: subs r3, #1 +; CHECK: cmp r3, #0 +; CHECK: sub.w r2, r2, #4 + %0 = phi i32 [ %.pre, %entry ], [ %3, %bb ] ; <i32> [#uses=1] + %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2] + %tmp5 = sub i32 1000, %indvar ; <i32> [#uses=1] + %1 = load i32** @array, align 4 ; <i32*> [#uses=1] + %scevgep = getelementptr i32* %1, i32 %tmp5 ; <i32*> [#uses=1] + %2 = load i32* %scevgep, align 4 ; <i32> [#uses=1] + %3 = add nsw i32 %2, %0 ; <i32> [#uses=2] + store i32 %3, i32* @G, align 4 + %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2] + %exitcond = icmp eq i32 %indvar.next, 1001 ; <i1> [#uses=1] + br i1 %exitcond, label %return, label %bb + +return: ; preds = %bb + ret void +} |