diff options
author | Arnold Schwaighofer <aschwaighofer@apple.com> | 2013-06-24 12:09:15 +0000 |
---|---|---|
committer | Arnold Schwaighofer <aschwaighofer@apple.com> | 2013-06-24 12:09:15 +0000 |
commit | bc7c58d2b1a04514ee19f59a58e0027798e59d56 (patch) | |
tree | 65303c2d338b69d52086c781ee790aeb1fa19ac0 /test/Transforms | |
parent | 7e96b4dfce63f967f150c617f0a69ded9f1f7416 (diff) | |
download | external_llvm-bc7c58d2b1a04514ee19f59a58e0027798e59d56.zip external_llvm-bc7c58d2b1a04514ee19f59a58e0027798e59d56.tar.gz external_llvm-bc7c58d2b1a04514ee19f59a58e0027798e59d56.tar.bz2 |
Reapply 184685 after the SetVector iteration order fix.
This should hopefully have fixed the stage2/stage3 miscompare on the dragonegg
testers.
"LoopVectorize: Use the dependence test utility class
We now no longer need alias analysis - the cases that alias analysis would
handle are now handled as accesses with a large dependence distance.
We can now vectorize loops with simple constant dependence distances.
for (i = 8; i < 256; ++i) {
a[i] = a[i+4] * a[i+8];
}
for (i = 8; i < 256; ++i) {
a[i] = a[i-4] * a[i-8];
}
We would be able to vectorize about 200 more loops (in many cases the cost model
instructs us no to) in the test suite now. Results on x86-64 are a wash.
I have seen one degradation in ammp. Interestingly, the function in which we
now vectorize a loop is never executed so we probably see some instruction
cache effects. There is a 2% improvement in h264ref. There is one or the other
TSCV loop kernel that speeds up.
radar://13681598"
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@184724 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms')
-rw-r--r-- | test/Transforms/LoopVectorize/12-12-11-if-conv.ll | 2 | ||||
-rw-r--r-- | test/Transforms/LoopVectorize/memdep.ll | 222 | ||||
-rw-r--r-- | test/Transforms/LoopVectorize/runtime-check.ll | 2 |
3 files changed, 224 insertions, 2 deletions
diff --git a/test/Transforms/LoopVectorize/12-12-11-if-conv.ll b/test/Transforms/LoopVectorize/12-12-11-if-conv.ll index bab6300..6ef1010 100644 --- a/test/Transforms/LoopVectorize/12-12-11-if-conv.ll +++ b/test/Transforms/LoopVectorize/12-12-11-if-conv.ll @@ -30,7 +30,7 @@ if.then: ; preds = %for.body if.end: ; preds = %for.body, %if.then %z.0 = phi i32 [ %add1, %if.then ], [ 9, %for.body ] store i32 %z.0, i32* %arrayidx, align 4 - %indvars.iv.next = add i64 %indvars.iv, 1 + %indvars.iv.next = add nsw i64 %indvars.iv, 1 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 %exitcond = icmp eq i32 %lftr.wideiv, %x br i1 %exitcond, label %for.end, label %for.body diff --git a/test/Transforms/LoopVectorize/memdep.ll b/test/Transforms/LoopVectorize/memdep.ll new file mode 100644 index 0000000..56e86a4 --- /dev/null +++ b/test/Transforms/LoopVectorize/memdep.ll @@ -0,0 +1,222 @@ +; RUN: opt < %s -loop-vectorize -force-vector-width=2 -force-vector-unroll=1 -S | FileCheck %s +; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-unroll=1 -S | FileCheck %s -check-prefix=WIDTH + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" + +; Vectorization with dependence checks. + +; No plausible dependence - can be vectorized. +; for (i = 0; i < 1024; ++i) +; A[i] = A[i + 1] + 1; + +; CHECK: f1_vec +; CHECK: <2 x i32> + +define void @f1_vec(i32* %A) { +entry: + br label %for.body + +for.body: + %indvars.iv = phi i32 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %indvars.iv.next = add i32 %indvars.iv, 1 + %arrayidx = getelementptr inbounds i32* %A, i32 %indvars.iv.next + %0 = load i32* %arrayidx, align 4 + %add1 = add nsw i32 %0, 1 + %arrayidx3 = getelementptr inbounds i32* %A, i32 %indvars.iv + store i32 %add1, i32* %arrayidx3, align 4 + %exitcond = icmp ne i32 %indvars.iv.next, 1024 + br i1 %exitcond, label %for.body, label %for.end + +for.end: + ret void +} + +; Plausible dependence of distance 1 - can't be vectorized. +; for (i = 0; i < 1024; ++i) +; A[i+1] = A[i] + 1; + +; CHECK: f2_novec +; CHECK-NOT: <2 x i32> + +define void @f2_novec(i32* %A) { +entry: + br label %for.body + +for.body: + %indvars.iv = phi i32 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32* %A, i32 %indvars.iv + %0 = load i32* %arrayidx, align 4 + %add = add nsw i32 %0, 1 + %indvars.iv.next = add i32 %indvars.iv, 1 + %arrayidx3 = getelementptr inbounds i32* %A, i32 %indvars.iv.next + store i32 %add, i32* %arrayidx3, align 4 + %exitcond = icmp ne i32 %indvars.iv.next, 1024 + br i1 %exitcond, label %for.body, label %for.end + +for.end: + ret void +} + +; Plausible dependence of distance 2 - can be vectorized with a width of 2. +; for (i = 0; i < 1024; ++i) +; A[i+2] = A[i] + 1; + +; CHECK: f3_vec_len +; CHECK: <2 x i32> + +; WIDTH: f3_vec_len +; WIDTH-NOT: <4 x i32> + +define void @f3_vec_len(i32* %A) { +entry: + br label %for.body + +for.body: + %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %idxprom = sext i32 %i.01 to i64 + %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom + %0 = load i32* %arrayidx, align 4 + %add = add nsw i32 %0, 1 + %add1 = add nsw i32 %i.01, 2 + %idxprom2 = sext i32 %add1 to i64 + %arrayidx3 = getelementptr inbounds i32* %A, i64 %idxprom2 + store i32 %add, i32* %arrayidx3, align 4 + %inc = add nsw i32 %i.01, 1 + %cmp = icmp slt i32 %inc, 1024 + br i1 %cmp, label %for.body, label %for.end + +for.end: + ret void +} + +; Plausible dependence of distance 1 - cannot be vectorized (without reordering +; accesses). +; for (i = 0; i < 1024; ++i) { +; B[i] = A[i]; +; A[i] = B[i + 1]; +; } + +; CHECK: f5 +; CHECK-NOT: <2 x i32> + +define void @f5(i32* %A, i32* %B) { +entry: + br label %for.body + +for.body: + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv + %0 = load i32* %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds i32* %B, i64 %indvars.iv + store i32 %0, i32* %arrayidx2, align 4 + %indvars.iv.next = add nsw i64 %indvars.iv, 1 + %arrayidx4 = getelementptr inbounds i32* %B, i64 %indvars.iv.next + %1 = load i32* %arrayidx4, align 4 + store i32 %1, i32* %arrayidx, align 4 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 + %exitcond = icmp ne i32 %lftr.wideiv, 1024 + br i1 %exitcond, label %for.body, label %for.end + +for.end: + ret void +} + +; Dependence through a phi node - must not vectorize. +; for (i = 0; i < 1024; ++i) { +; a[i+1] = tmp; +; tmp = a[i]; +; } + +; CHECK: f6 +; CHECK-NOT: <2 x i32> + +define i32 @f6(i32* %a, i32 %tmp) { +entry: + br label %for.body + +for.body: + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %tmp.addr.08 = phi i32 [ %tmp, %entry ], [ %0, %for.body ] + %indvars.iv.next = add nsw i64 %indvars.iv, 1 + %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv.next + store i32 %tmp.addr.08, i32* %arrayidx, align 4 + %arrayidx3 = getelementptr inbounds i32* %a, i64 %indvars.iv + %0 = load i32* %arrayidx3, align 4 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 + %exitcond = icmp ne i32 %lftr.wideiv, 1024 + br i1 %exitcond, label %for.body, label %for.end + +for.end: + ret i32 undef +} + +; Don't vectorize true loop carried dependencies that are not a multiple of the +; vector width. +; Example: +; for (int i = ...; ++i) { +; a[i] = a[i-3] + ...; +; It is a bad idea to vectorize this loop because store-load forwarding will not +; happen. +; + +; CHECK: @nostoreloadforward +; CHECK-NOT: <2 x i32> + +define void @nostoreloadforward(i32* %A) { +entry: + br label %for.body + +for.body: + %indvars.iv = phi i64 [ 16, %entry ], [ %indvars.iv.next, %for.body ] + %0 = add nsw i64 %indvars.iv, -3 + %arrayidx = getelementptr inbounds i32* %A, i64 %0 + %1 = load i32* %arrayidx, align 4 + %2 = add nsw i64 %indvars.iv, 4 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %2 + %3 = load i32* %arrayidx2, align 4 + %add3 = add nsw i32 %3, %1 + %arrayidx5 = getelementptr inbounds i32* %A, i64 %indvars.iv + store i32 %add3, i32* %arrayidx5, align 4 + %indvars.iv.next = add i64 %indvars.iv, 1 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 + %exitcond = icmp ne i32 %lftr.wideiv, 128 + br i1 %exitcond, label %for.body, label %for.end + +for.end: + ret void +} + +; Example: +; for (int i = ...; ++i) { +; a[i] = b[i]; +; c[i] = a[i-3] + ...; +; It is a bad idea to vectorize this loop because store-load forwarding will not +; happen. +; + +; CHECK: @nostoreloadforward2 +; CHECK-NOT: <2 x i32> + +define void @nostoreloadforward2(i32* noalias %A, i32* noalias %B, i32* noalias %C) { +entry: + br label %for.body + +for.body: + %indvars.iv = phi i64 [ 16, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32* %B, i64 %indvars.iv + %0 = load i32* %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %indvars.iv + store i32 %0, i32* %arrayidx2, align 4 + %1 = add nsw i64 %indvars.iv, -3 + %arrayidx4 = getelementptr inbounds i32* %A, i64 %1 + %2 = load i32* %arrayidx4, align 4 + %arrayidx6 = getelementptr inbounds i32* %C, i64 %indvars.iv + store i32 %2, i32* %arrayidx6, align 4 + %indvars.iv.next = add i64 %indvars.iv, 1 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 + %exitcond = icmp ne i32 %lftr.wideiv, 128 + br i1 %exitcond, label %for.body, label %for.end + +for.end: + ret void +} diff --git a/test/Transforms/LoopVectorize/runtime-check.ll b/test/Transforms/LoopVectorize/runtime-check.ll index 014c4fc..4772256 100644 --- a/test/Transforms/LoopVectorize/runtime-check.ll +++ b/test/Transforms/LoopVectorize/runtime-check.ll @@ -12,7 +12,7 @@ target triple = "x86_64-apple-macosx10.9.0" ;CHECK: for.body.preheader: ;CHECK: br i1 %cmp.zero, label %middle.block, label %vector.memcheck ;CHECK: vector.memcheck: -;CHECK: br i1 %found.conflict, label %middle.block, label %vector.ph +;CHECK: br i1 %memcheck.conflict, label %middle.block, label %vector.ph ;CHECK: load <4 x float> define i32 @foo(float* nocapture %a, float* nocapture %b, i32 %n) nounwind uwtable ssp { entry: |