aboutsummaryrefslogtreecommitdiffstats
path: root/test/Transforms
diff options
context:
space:
mode:
authorArnold Schwaighofer <aschwaighofer@apple.com>2013-10-29 01:33:57 +0000
committerArnold Schwaighofer <aschwaighofer@apple.com>2013-10-29 01:33:57 +0000
commitc04d241d13820b33224b5cbd89a427fc08e5d1d9 (patch)
treedbf40b8b49ab3278f68615ad0af8db89758d9f07 /test/Transforms
parent7e8cebf22d170769b0bf0c2a69309faa0e36ac4c (diff)
downloadexternal_llvm-c04d241d13820b33224b5cbd89a427fc08e5d1d9.zip
external_llvm-c04d241d13820b33224b5cbd89a427fc08e5d1d9.tar.gz
external_llvm-c04d241d13820b33224b5cbd89a427fc08e5d1d9.tar.bz2
ARM cost model: Unaligned vectorized double stores are expensive
Updated a test case that assumed that <2 x double> would vectorize to use <4 x float>. radar://15338229 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@193574 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms')
-rw-r--r--test/Transforms/LoopVectorize/ARM/width-detect.ll18
-rw-r--r--test/Transforms/SLPVectorizer/ARM/memory.ll20
2 files changed, 29 insertions, 9 deletions
diff --git a/test/Transforms/LoopVectorize/ARM/width-detect.ll b/test/Transforms/LoopVectorize/ARM/width-detect.ll
index c0795b6..99d7fa7 100644
--- a/test/Transforms/LoopVectorize/ARM/width-detect.ll
+++ b/test/Transforms/LoopVectorize/ARM/width-detect.ll
@@ -3,27 +3,27 @@
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
target triple = "thumbv7-apple-ios3.0.0"
-;CHECK:foo_F64
-;CHECK: <2 x double>
+;CHECK:foo_F32
+;CHECK: <4 x float>
;CHECK:ret
-define double @foo_F64(double* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
+define float @foo_F32(float* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
%1 = icmp sgt i32 %n, 0
br i1 %1, label %.lr.ph, label %._crit_edge
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
- %prod.01 = phi double [ %4, %.lr.ph ], [ 0.000000e+00, %0 ]
- %2 = getelementptr inbounds double* %A, i64 %indvars.iv
- %3 = load double* %2, align 8
- %4 = fmul fast double %prod.01, %3
+ %prod.01 = phi float [ %4, %.lr.ph ], [ 0.000000e+00, %0 ]
+ %2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ %3 = load float* %2, align 8
+ %4 = fmul fast float %prod.01, %3
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
br i1 %exitcond, label %._crit_edge, label %.lr.ph
._crit_edge: ; preds = %.lr.ph, %0
- %prod.0.lcssa = phi double [ 0.000000e+00, %0 ], [ %4, %.lr.ph ]
- ret double %prod.0.lcssa
+ %prod.0.lcssa = phi float [ 0.000000e+00, %0 ], [ %4, %.lr.ph ]
+ ret float %prod.0.lcssa
}
;CHECK:foo_I8
diff --git a/test/Transforms/SLPVectorizer/ARM/memory.ll b/test/Transforms/SLPVectorizer/ARM/memory.ll
new file mode 100644
index 0000000..383c808
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/ARM/memory.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+
+; On swift unaligned <2 x double> stores need 4uops and it is there for cheaper
+; to do this scalar.
+
+; CHECK-LABEL: expensive_double_store
+; CHECK-NOT: load <2 x double>
+; CHECK-NOT: store <2 x double>
+define void @expensive_double_store(double* noalias %dst, double* noalias %src, i64 %count) {
+entry:
+ %0 = load double* %src, align 8
+ store double %0, double* %dst, align 8
+ %arrayidx2 = getelementptr inbounds double* %src, i64 1
+ %1 = load double* %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds double* %dst, i64 1
+ store double %1, double* %arrayidx3, align 8
+ ret void
+}