aboutsummaryrefslogtreecommitdiffstats
path: root/test/Transforms/LoopVectorize
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2013-03-05 23:27:24 -0800
committerStephen Hines <srhines@google.com>2013-03-05 23:27:24 -0800
commit5adb136be579e8fff3734461580cb34d1d2983b8 (patch)
treebff1a422e9c9789df563aaf9a7e91e63e8ec0384 /test/Transforms/LoopVectorize
parent227a4a4ade38716ba9eb3205f48b52910f3b955e (diff)
parentb3201c5cf1e183d840f7c99ff779d57f1549d8e5 (diff)
downloadexternal_llvm-5adb136be579e8fff3734461580cb34d1d2983b8.zip
external_llvm-5adb136be579e8fff3734461580cb34d1d2983b8.tar.gz
external_llvm-5adb136be579e8fff3734461580cb34d1d2983b8.tar.bz2
Merge commit 'b3201c5cf1e183d840f7c99ff779d57f1549d8e5' into merge_20130226
Conflicts: include/llvm/Support/ELF.h lib/Support/DeltaAlgorithm.cpp Change-Id: I24a4fbce62eb39d924efee3c687b55e1e17b30cd
Diffstat (limited to 'test/Transforms/LoopVectorize')
-rw-r--r--test/Transforms/LoopVectorize/12-12-11-if-conv.ll2
-rw-r--r--test/Transforms/LoopVectorize/ARM/arm-unroll.ll32
-rw-r--r--test/Transforms/LoopVectorize/ARM/gcc-examples.ll60
-rw-r--r--test/Transforms/LoopVectorize/ARM/lit.local.cfg6
-rw-r--r--test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll114
-rw-r--r--test/Transforms/LoopVectorize/ARM/width-detect.ll52
-rw-r--r--test/Transforms/LoopVectorize/X86/avx1.ll2
-rw-r--r--test/Transforms/LoopVectorize/X86/conversion-cost.ll2
-rw-r--r--test/Transforms/LoopVectorize/X86/gcc-examples.ll6
-rw-r--r--test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll28
-rw-r--r--test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll52
-rw-r--r--test/Transforms/LoopVectorize/X86/parallel-loops.ll114
-rw-r--r--test/Transforms/LoopVectorize/X86/small-size.ll (renamed from test/Transforms/LoopVectorize/small-size.ll)2
-rw-r--r--test/Transforms/LoopVectorize/X86/unroll-small-loops.ll2
-rw-r--r--test/Transforms/LoopVectorize/X86/unroll_selection.ll71
-rw-r--r--test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll150
-rw-r--r--test/Transforms/LoopVectorize/bzip_reverse_loops.ll71
-rw-r--r--test/Transforms/LoopVectorize/calloc.ll2
-rw-r--r--test/Transforms/LoopVectorize/cast-induction.ll2
-rw-r--r--test/Transforms/LoopVectorize/cpp-new-array.ll2
-rw-r--r--test/Transforms/LoopVectorize/flags.ll2
-rw-r--r--test/Transforms/LoopVectorize/gcc-examples.ll4
-rw-r--r--test/Transforms/LoopVectorize/global_alias.ll1078
-rw-r--r--test/Transforms/LoopVectorize/i8-induction.ll35
-rw-r--r--test/Transforms/LoopVectorize/if-conversion-reduction.ll2
-rw-r--r--test/Transforms/LoopVectorize/if-conversion.ll2
-rw-r--r--test/Transforms/LoopVectorize/increment.ll2
-rw-r--r--test/Transforms/LoopVectorize/intrinsic.ll2
-rw-r--r--test/Transforms/LoopVectorize/no_int_induction.ll2
-rw-r--r--test/Transforms/LoopVectorize/nofloat.ll2
-rw-r--r--test/Transforms/LoopVectorize/non-const-n.ll2
-rw-r--r--test/Transforms/LoopVectorize/nsw-crash.ll25
-rw-r--r--test/Transforms/LoopVectorize/ptr_loops.ll74
-rw-r--r--test/Transforms/LoopVectorize/read-only.ll2
-rw-r--r--test/Transforms/LoopVectorize/reduction.ll2
-rw-r--r--test/Transforms/LoopVectorize/runtime-check.ll6
-rw-r--r--test/Transforms/LoopVectorize/same-base-access.ll2
-rw-r--r--test/Transforms/LoopVectorize/scalar-select.ll2
-rw-r--r--test/Transforms/LoopVectorize/simple-unroll.ll2
-rw-r--r--test/Transforms/LoopVectorize/small-loop.ll2
-rw-r--r--test/Transforms/LoopVectorize/struct_access.ll50
-rw-r--r--test/Transforms/LoopVectorize/write-only.ll2
42 files changed, 2044 insertions, 30 deletions
diff --git a/test/Transforms/LoopVectorize/12-12-11-if-conv.ll b/test/Transforms/LoopVectorize/12-12-11-if-conv.ll
index f285887..2dd7fe3 100644
--- a/test/Transforms/LoopVectorize/12-12-11-if-conv.ll
+++ b/test/Transforms/LoopVectorize/12-12-11-if-conv.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -enable-if-conversion -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -enable-if-conversion -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/ARM/arm-unroll.ll b/test/Transforms/LoopVectorize/ARM/arm-unroll.ll
new file mode 100644
index 0000000..c8d307f
--- /dev/null
+++ b/test/Transforms/LoopVectorize/ARM/arm-unroll.ll
@@ -0,0 +1,32 @@
+; RUN: opt < %s -loop-vectorize -mtriple=thumbv7-apple-ios3.0.0 -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift -S | FileCheck %s --check-prefix=SWIFT
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+target triple = "thumbv7-apple-ios3.0.0"
+
+;CHECK: @foo
+;CHECK: load <4 x i32>
+;CHECK-NOT: load <4 x i32>
+;CHECK: ret
+;SWIFT: @foo
+;SWIFT: load <4 x i32>
+;SWIFT: load <4 x i32>
+;SWIFT: ret
+define i32 @foo(i32* nocapture %A, i32 %n) nounwind readonly ssp {
+ %1 = icmp sgt i32 %n, 0
+ br i1 %1, label %.lr.ph, label %._crit_edge
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %i.02 = phi i32 [ %5, %.lr.ph ], [ 0, %0 ]
+ %sum.01 = phi i32 [ %4, %.lr.ph ], [ 0, %0 ]
+ %2 = getelementptr inbounds i32* %A, i32 %i.02
+ %3 = load i32* %2, align 4
+ %4 = add nsw i32 %3, %sum.01
+ %5 = add nsw i32 %i.02, 1
+ %exitcond = icmp eq i32 %5, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ %sum.0.lcssa = phi i32 [ 0, %0 ], [ %4, %.lr.ph ]
+ ret i32 %sum.0.lcssa
+}
diff --git a/test/Transforms/LoopVectorize/ARM/gcc-examples.ll b/test/Transforms/LoopVectorize/ARM/gcc-examples.ll
new file mode 100644
index 0000000..6a68e81
--- /dev/null
+++ b/test/Transforms/LoopVectorize/ARM/gcc-examples.ll
@@ -0,0 +1,60 @@
+; RUN: opt < %s -loop-vectorize -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift -S -dce | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+target triple = "thumbv7-apple-ios3.0.0"
+
+@b = common global [2048 x i32] zeroinitializer, align 16
+@c = common global [2048 x i32] zeroinitializer, align 16
+@a = common global [2048 x i32] zeroinitializer, align 16
+
+; Select VF = 8;
+;CHECK: @example1
+;CHECK: load <4 x i32>
+;CHECK: add nsw <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret void
+define void @example1() nounwind uwtable ssp {
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
+ %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %3 = load i32* %2, align 4
+ %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %5 = load i32* %4, align 4
+ %6 = add nsw i32 %5, %3
+ %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ store i32 %6, i32* %7, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 256
+ br i1 %exitcond, label %8, label %1
+
+; <label>:8 ; preds = %1
+ ret void
+}
+
+;CHECK: @example10b
+;CHECK: load <4 x i16>
+;CHECK: sext <4 x i16>
+;CHECK: store <4 x i32>
+;CHECK: ret void
+define void @example10b(i16* noalias nocapture %sa, i16* noalias nocapture %sb, i16* noalias nocapture %sc, i32* noalias nocapture %ia, i32* noalias nocapture %ib, i32* noalias nocapture %ic) nounwind uwtable ssp {
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
+ %2 = getelementptr inbounds i16* %sb, i64 %indvars.iv
+ %3 = load i16* %2, align 2
+ %4 = sext i16 %3 to i32
+ %5 = getelementptr inbounds i32* %ia, i64 %indvars.iv
+ store i32 %4, i32* %5, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %6, label %1
+
+; <label>:6 ; preds = %1
+ ret void
+}
+
diff --git a/test/Transforms/LoopVectorize/ARM/lit.local.cfg b/test/Transforms/LoopVectorize/ARM/lit.local.cfg
new file mode 100644
index 0000000..cb77b09
--- /dev/null
+++ b/test/Transforms/LoopVectorize/ARM/lit.local.cfg
@@ -0,0 +1,6 @@
+config.suffixes = ['.ll', '.c', '.cpp']
+
+targets = set(config.root.targets_to_build.split())
+if not 'ARM' in targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll b/test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll
new file mode 100644
index 0000000..d2e3de2
--- /dev/null
+++ b/test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll
@@ -0,0 +1,114 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=armv7-linux-gnueabihf -mcpu=cortex-a9 | FileCheck --check-prefix=COST %s
+; To see the assembly output: llc -mcpu=cortex-a9 < %s | FileCheck --check-prefix=ASM %s
+; ASM lines below are only for reference, tests on that direction should go to tests/CodeGen/ARM
+
+; ModuleID = 'arm.ll'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7--linux-gnueabihf"
+
+%T216 = type <2 x i16>
+%T232 = type <2 x i32>
+%T264 = type <2 x i64>
+
+%T416 = type <4 x i16>
+%T432 = type <4 x i32>
+%T464 = type <4 x i64>
+
+define void @direct(%T432* %loadaddr, %T432* %loadaddr2, %T432* %storeaddr) {
+; COST: function 'direct':
+ %v0 = load %T432* %loadaddr
+; ASM: vld1.64
+ %v1 = load %T432* %loadaddr2
+; ASM: vld1.64
+ %r3 = mul %T432 %v0, %v1
+; COST: cost of 2 for instruction: {{.*}} mul <4 x i32>
+; ASM: vmul.i32
+ store %T432 %r3, %T432* %storeaddr
+; ASM: vst1.64
+ ret void
+}
+
+define void @ups1632(%T416* %loadaddr, %T416* %loadaddr2, %T432* %storeaddr) {
+; COST: function 'ups1632':
+ %v0 = load %T416* %loadaddr
+; ASM: vldr
+ %v1 = load %T416* %loadaddr2
+; ASM: vldr
+ %r1 = sext %T416 %v0 to %T432
+ %r2 = sext %T416 %v1 to %T432
+; COST: cost of 0 for instruction: {{.*}} sext <4 x i16> {{.*}} to <4 x i32>
+ %r3 = mul %T432 %r1, %r2
+; COST: cost of 2 for instruction: {{.*}} mul <4 x i32>
+; ASM: vmull.s16
+ store %T432 %r3, %T432* %storeaddr
+; ASM: vst1.64
+ ret void
+}
+
+define void @upu1632(%T416* %loadaddr, %T416* %loadaddr2, %T432* %storeaddr) {
+; COST: function 'upu1632':
+ %v0 = load %T416* %loadaddr
+; ASM: vldr
+ %v1 = load %T416* %loadaddr2
+; ASM: vldr
+ %r1 = zext %T416 %v0 to %T432
+ %r2 = zext %T416 %v1 to %T432
+; COST: cost of 0 for instruction: {{.*}} zext <4 x i16> {{.*}} to <4 x i32>
+ %r3 = mul %T432 %r1, %r2
+; COST: cost of 2 for instruction: {{.*}} mul <4 x i32>
+; ASM: vmull.u16
+ store %T432 %r3, %T432* %storeaddr
+; ASM: vst1.64
+ ret void
+}
+
+define void @ups3264(%T232* %loadaddr, %T232* %loadaddr2, %T264* %storeaddr) {
+; COST: function 'ups3264':
+ %v0 = load %T232* %loadaddr
+; ASM: vldr
+ %v1 = load %T232* %loadaddr2
+; ASM: vldr
+ %r3 = mul %T232 %v0, %v1
+; ASM: vmul.i32
+; COST: cost of 1 for instruction: {{.*}} mul <2 x i32>
+ %st = sext %T232 %r3 to %T264
+; ASM: vmovl.s32
+; COST: cost of 1 for instruction: {{.*}} sext <2 x i32> {{.*}} to <2 x i64>
+ store %T264 %st, %T264* %storeaddr
+; ASM: vst1.64
+ ret void
+}
+
+define void @upu3264(%T232* %loadaddr, %T232* %loadaddr2, %T264* %storeaddr) {
+; COST: function 'upu3264':
+ %v0 = load %T232* %loadaddr
+; ASM: vldr
+ %v1 = load %T232* %loadaddr2
+; ASM: vldr
+ %r3 = mul %T232 %v0, %v1
+; ASM: vmul.i32
+; COST: cost of 1 for instruction: {{.*}} mul <2 x i32>
+ %st = zext %T232 %r3 to %T264
+; ASM: vmovl.u32
+; COST: cost of 1 for instruction: {{.*}} zext <2 x i32> {{.*}} to <2 x i64>
+ store %T264 %st, %T264* %storeaddr
+; ASM: vst1.64
+ ret void
+}
+
+define void @dn3216(%T432* %loadaddr, %T432* %loadaddr2, %T416* %storeaddr) {
+; COST: function 'dn3216':
+ %v0 = load %T432* %loadaddr
+; ASM: vld1.64
+ %v1 = load %T432* %loadaddr2
+; ASM: vld1.64
+ %r3 = mul %T432 %v0, %v1
+; ASM: vmul.i32
+; COST: cost of 2 for instruction: {{.*}} mul <4 x i32>
+ %st = trunc %T432 %r3 to %T416
+; ASM: vmovn.i32
+; COST: cost of 1 for instruction: {{.*}} trunc <4 x i32> {{.*}} to <4 x i16>
+ store %T416 %st, %T416* %storeaddr
+; ASM: vstr
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/ARM/width-detect.ll b/test/Transforms/LoopVectorize/ARM/width-detect.ll
new file mode 100644
index 0000000..c0795b6
--- /dev/null
+++ b/test/Transforms/LoopVectorize/ARM/width-detect.ll
@@ -0,0 +1,52 @@
+; RUN: opt < %s -loop-vectorize -mtriple=thumbv7-apple-ios3.0.0 -S | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+target triple = "thumbv7-apple-ios3.0.0"
+
+;CHECK:foo_F64
+;CHECK: <2 x double>
+;CHECK:ret
+define double @foo_F64(double* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
+ %1 = icmp sgt i32 %n, 0
+ br i1 %1, label %.lr.ph, label %._crit_edge
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
+ %prod.01 = phi double [ %4, %.lr.ph ], [ 0.000000e+00, %0 ]
+ %2 = getelementptr inbounds double* %A, i64 %indvars.iv
+ %3 = load double* %2, align 8
+ %4 = fmul fast double %prod.01, %3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ %prod.0.lcssa = phi double [ 0.000000e+00, %0 ], [ %4, %.lr.ph ]
+ ret double %prod.0.lcssa
+}
+
+;CHECK:foo_I8
+;CHECK: xor <16 x i8>
+;CHECK:ret
+define signext i8 @foo_I8(i8* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
+ %1 = icmp sgt i32 %n, 0
+ br i1 %1, label %.lr.ph, label %._crit_edge
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
+ %red.01 = phi i8 [ %4, %.lr.ph ], [ 0, %0 ]
+ %2 = getelementptr inbounds i8* %A, i64 %indvars.iv
+ %3 = load i8* %2, align 1
+ %4 = xor i8 %3, %red.01
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ %red.0.lcssa = phi i8 [ 0, %0 ], [ %4, %.lr.ph ]
+ ret i8 %red.0.lcssa
+}
+
+
diff --git a/test/Transforms/LoopVectorize/X86/avx1.ll b/test/Transforms/LoopVectorize/X86/avx1.ll
index a2d176a..a85c6fe 100644
--- a/test/Transforms/LoopVectorize/X86/avx1.ll
+++ b/test/Transforms/LoopVectorize/X86/avx1.ll
@@ -27,7 +27,7 @@ define i32 @read_mod_write_single_ptr(float* nocapture %a, i32 %n) nounwind uwta
;CHECK: @read_mod_i64
-;CHECK: load <8 x i64>
+;CHECK: load <4 x i64>
;CHECK: ret i32
define i32 @read_mod_i64(i64* nocapture %a, i32 %n) nounwind uwtable ssp {
%1 = icmp sgt i32 %n, 0
diff --git a/test/Transforms/LoopVectorize/X86/conversion-cost.ll b/test/Transforms/LoopVectorize/X86/conversion-cost.ll
index 60c742e..23d9233 100644
--- a/test/Transforms/LoopVectorize/X86/conversion-cost.ll
+++ b/test/Transforms/LoopVectorize/X86/conversion-cost.ll
@@ -4,7 +4,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "x86_64-apple-macosx10.8.0"
;CHECK: @conversion_cost1
-;CHECK: store <8 x i8>
+;CHECK: store <32 x i8>
;CHECK: ret
define i32 @conversion_cost1(i32 %n, i8* nocapture %A, float* nocapture %B) nounwind uwtable ssp {
%1 = icmp sgt i32 %n, 3
diff --git a/test/Transforms/LoopVectorize/X86/gcc-examples.ll b/test/Transforms/LoopVectorize/X86/gcc-examples.ll
index 0f21ba6..d2d0eac 100644
--- a/test/Transforms/LoopVectorize/X86/gcc-examples.ll
+++ b/test/Transforms/LoopVectorize/X86/gcc-examples.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 -dce -instcombine -licm -S | FileCheck %s
-; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 -force-vector-unroll=0 -dce -instcombine -licm -S | FileCheck %s -check-prefix=UNROLL
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 -dce -instcombine -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 -force-vector-unroll=0 -dce -instcombine -S | FileCheck %s -check-prefix=UNROLL
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
@@ -53,8 +53,6 @@ define void @example1() nounwind uwtable ssp {
;UNROLL: @example10b
;UNROLL: load <4 x i16>
;UNROLL: load <4 x i16>
-;UNROLL: load <4 x i16>
-;UNROLL: store <4 x i32>
;UNROLL: store <4 x i32>
;UNROLL: store <4 x i32>
;UNROLL: ret void
diff --git a/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll b/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll
new file mode 100644
index 0000000..186fba8
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll
@@ -0,0 +1,28 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -vectorizer-min-trip-count=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK: <4 x float>
+define void @trivial_loop(float* nocapture %a) nounwind uwtable optsize {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float* %a, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4, !tbaa !0
+ %add = fadd float %0, 1.000000e+00
+ store float %add, float* %arrayidx, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 8
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!0 = metadata !{metadata !"float", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll b/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
new file mode 100644
index 0000000..452d0df
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
@@ -0,0 +1,52 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; The parallel loop has been invalidated by the new memory accesses introduced
+; by reg2mem (Loop::isParallel() starts to return false). Ensure the loop is
+; now non-vectorizable.
+
+;CHECK-NOT: <4 x i32>
+define void @parallel_loop(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
+entry:
+ %indvars.iv.next.reg2mem = alloca i64
+ %indvars.iv.reg2mem = alloca i64
+ %"reg2mem alloca point" = bitcast i32 0 to i32
+ store i64 0, i64* %indvars.iv.reg2mem
+ br label %for.body
+
+for.body: ; preds = %for.body.for.body_crit_edge, %entry
+ %indvars.iv.reload = load i64* %indvars.iv.reg2mem
+ %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv.reload
+ %0 = load i32* %arrayidx, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv.reload
+ %1 = load i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %idxprom3 = sext i32 %1 to i64
+ %arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
+ store i32 %0, i32* %arrayidx4, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %indvars.iv.next = add i64 %indvars.iv.reload, 1
+ ; A new store without the parallel metadata here:
+ store i64 %indvars.iv.next, i64* %indvars.iv.next.reg2mem
+ %indvars.iv.next.reload1 = load i64* %indvars.iv.next.reg2mem
+ %arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next.reload1
+ %2 = load i32* %arrayidx6, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ store i32 %2, i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %indvars.iv.next.reload = load i64* %indvars.iv.next.reg2mem
+ %lftr.wideiv = trunc i64 %indvars.iv.next.reload to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 512
+ br i1 %exitcond, label %for.end, label %for.body.for.body_crit_edge, !llvm.loop.parallel !3
+
+for.body.for.body_crit_edge: ; preds = %for.body
+ %indvars.iv.next.reload2 = load i64* %indvars.iv.next.reg2mem
+ store i64 %indvars.iv.next.reload2, i64* %indvars.iv.reg2mem
+ br label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !3}
diff --git a/test/Transforms/LoopVectorize/X86/parallel-loops.ll b/test/Transforms/LoopVectorize/X86/parallel-loops.ll
new file mode 100644
index 0000000..f648722
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/parallel-loops.ll
@@ -0,0 +1,114 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; A tricky loop:
+;
+; void loop(int *a, int *b) {
+; for (int i = 0; i < 512; ++i) {
+; a[a[i]] = b[i];
+; a[i] = b[i+1];
+; }
+;}
+
+;CHECK: @loop
+;CHECK-NOT: <4 x i32>
+define void @loop(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4, !tbaa !0
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %1 = load i32* %arrayidx2, align 4, !tbaa !0
+ %idxprom3 = sext i32 %1 to i64
+ %arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
+ store i32 %0, i32* %arrayidx4, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
+ %2 = load i32* %arrayidx6, align 4, !tbaa !0
+ store i32 %2, i32* %arrayidx2, align 4, !tbaa !0
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 512
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+; The same loop with parallel loop metadata added to the loop branch
+; and the memory instructions.
+
+;CHECK: @parallel_loop
+;CHECK: <4 x i32>
+define void @parallel_loop(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %1 = load i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %idxprom3 = sext i32 %1 to i64
+ %arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
+ ; This store might have originated from inlining a function with a parallel
+ ; loop. Refers to a list with the "original loop reference" (!4) also included.
+ store i32 %0, i32* %arrayidx4, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !5
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
+ %2 = load i32* %arrayidx6, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ store i32 %2, i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 512
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop.parallel !3
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+; The same loop with an illegal parallel loop metadata: the memory
+; accesses refer to a different loop's identifier.
+
+;CHECK: @mixed_metadata
+;CHECK-NOT: <4 x i32>
+
+define void @mixed_metadata(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !6
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %1 = load i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !6
+ %idxprom3 = sext i32 %1 to i64
+ %arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
+ ; This refers to the loop marked with !7 which we are not in at the moment.
+ ; It should prevent detecting as a parallel loop.
+ store i32 %0, i32* %arrayidx4, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !7
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
+ %2 = load i32* %arrayidx6, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !6
+ store i32 %2, i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !6
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 512
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop.parallel !6
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !3}
+!4 = metadata !{metadata !4}
+!5 = metadata !{metadata !3, metadata !4}
+!6 = metadata !{metadata !6}
+!7 = metadata !{metadata !7}
diff --git a/test/Transforms/LoopVectorize/small-size.ll b/test/Transforms/LoopVectorize/X86/small-size.ll
index 35b91bb..f390b33 100644
--- a/test/Transforms/LoopVectorize/small-size.ll
+++ b/test/Transforms/LoopVectorize/X86/small-size.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll b/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll
index 2075986..ef63a14 100644
--- a/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll
+++ b/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx2 -force-vector-width=4 -force-vector-unroll=0 -dce -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -force-vector-width=4 -force-vector-unroll=0 -dce -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/X86/unroll_selection.ll b/test/Transforms/LoopVectorize/X86/unroll_selection.ll
new file mode 100644
index 0000000..2d7b663
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/unroll_selection.ll
@@ -0,0 +1,71 @@
+; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -force-vector-width=4 -force-vector-unroll=0 -dce -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+; Don't unroll when we have register pressure.
+;CHECK: reg_pressure
+;CHECK: load <4 x double>
+;CHECK-NOT: load <4 x double>
+;CHECK: store <4 x double>
+;CHECK-NOT: store <4 x double>
+;CHECK: ret
+define void @reg_pressure(double* nocapture %A, i32 %n) nounwind uwtable ssp {
+ %1 = sext i32 %n to i64
+ br label %2
+
+; <label>:2 ; preds = %2, %0
+ %indvars.iv = phi i64 [ %indvars.iv.next, %2 ], [ %1, %0 ]
+ %3 = getelementptr inbounds double* %A, i64 %indvars.iv
+ %4 = load double* %3, align 8
+ %5 = fadd double %4, 3.000000e+00
+ %6 = fmul double %4, 2.000000e+00
+ %7 = fadd double %5, %6
+ %8 = fadd double %7, 2.000000e+00
+ %9 = fmul double %8, 5.000000e-01
+ %10 = fadd double %6, %9
+ %11 = fsub double %10, %5
+ %12 = fadd double %4, %11
+ %13 = fdiv double %8, %12
+ %14 = fmul double %13, %8
+ %15 = fmul double %6, %14
+ %16 = fmul double %5, %15
+ %17 = fadd double %16, -3.000000e+00
+ %18 = fsub double %4, %5
+ %19 = fadd double %6, %18
+ %20 = fadd double %13, %19
+ %21 = fadd double %20, %17
+ %22 = fadd double %21, 3.000000e+00
+ %23 = fmul double %4, %22
+ store double %23, double* %3, align 8
+ %indvars.iv.next = add i64 %indvars.iv, -1
+ %24 = trunc i64 %indvars.iv to i32
+ %25 = icmp eq i32 %24, 0
+ br i1 %25, label %26, label %2
+
+; <label>:26 ; preds = %2
+ ret void
+}
+
+; This is a small loop. Unroll it twice.
+;CHECK: small_loop
+;CHECK: xor
+;CHECK: xor
+;CHECK: ret
+define void @small_loop(i16* nocapture %A, i64 %n) nounwind uwtable ssp {
+ %1 = icmp eq i64 %n, 0
+ br i1 %1, label %._crit_edge, label %.lr.ph
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %i.01 = phi i64 [ %5, %.lr.ph ], [ 0, %0 ]
+ %2 = getelementptr inbounds i16* %A, i64 %i.01
+ %3 = load i16* %2, align 2
+ %4 = xor i16 %3, 3
+ store i16 %4, i16* %2, align 2
+ %5 = add i64 %i.01, 1
+ %exitcond = icmp eq i64 %5, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll b/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll
new file mode 100644
index 0000000..59bb8d0
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll
@@ -0,0 +1,150 @@
+; RUN: opt -loop-vectorize -mcpu=corei7-avx -debug -S < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+%0 = type { %0*, %1 }
+%1 = type { i8*, i32 }
+
+@p = global [2048 x [8 x i32*]] zeroinitializer, align 16
+@q = global [2048 x i16] zeroinitializer, align 16
+@r = global [2048 x i16] zeroinitializer, align 16
+
+; Tests for widest type
+; Ensure that we count the pointer store in the first test case. We have a
+; consecutive vector of pointers store, therefore we should count it towards the
+; widest vector count.
+;
+; CHECK: test_consecutive_store
+; CHECK: The Widest type: 64 bits
+define void @test_consecutive_store(%0**, %0**, %0** nocapture) nounwind ssp uwtable align 2 {
+ %4 = load %0** %2, align 8
+ %5 = icmp eq %0** %0, %1
+ br i1 %5, label %12, label %6
+
+; <label>:6 ; preds = %3
+ br label %7
+
+; <label>:7 ; preds = %7, %6
+ %8 = phi %0** [ %0, %6 ], [ %9, %7 ]
+ store %0* %4, %0** %8, align 8
+ %9 = getelementptr inbounds %0** %8, i64 1
+ %10 = icmp eq %0** %9, %1
+ br i1 %10, label %11, label %7
+
+; <label>:11 ; preds = %7
+ br label %12
+
+; <label>:12 ; preds = %11, %3
+ ret void
+}
+
+; However, if the store of a set of pointers is not to consecutive memory we do
+; NOT count the store towards the widest vector type.
+; In the test case below we add i16 types to store it in an array of pointer,
+; therefore the widest type should be i16.
+; int* p[2048][8];
+; short q[2048];
+; for (int y = 0; y < 8; ++y)
+; for (int i = 0; i < 1024; ++i) {
+; p[i][y] = (int*) (1 + q[i]);
+; }
+; CHECK: test_nonconsecutive_store
+; CHECK: The Widest type: 16 bits
+define void @test_nonconsecutive_store() nounwind ssp uwtable {
+ br label %1
+
+; <label>:1 ; preds = %14, %0
+ %2 = phi i64 [ 0, %0 ], [ %15, %14 ]
+ br label %3
+
+; <label>:3 ; preds = %3, %1
+ %4 = phi i64 [ 0, %1 ], [ %11, %3 ]
+ %5 = getelementptr inbounds [2048 x i16]* @q, i64 0, i64 %4
+ %6 = load i16* %5, align 2
+ %7 = sext i16 %6 to i64
+ %8 = add i64 %7, 1
+ %9 = inttoptr i64 %8 to i32*
+ %10 = getelementptr inbounds [2048 x [8 x i32*]]* @p, i64 0, i64 %4, i64 %2
+ store i32* %9, i32** %10, align 8
+ %11 = add i64 %4, 1
+ %12 = trunc i64 %11 to i32
+ %13 = icmp ne i32 %12, 1024
+ br i1 %13, label %3, label %14
+
+; <label>:14 ; preds = %3
+ %15 = add i64 %2, 1
+ %16 = trunc i64 %15 to i32
+ %17 = icmp ne i32 %16, 8
+ br i1 %17, label %1, label %18
+
+; <label>:18 ; preds = %14
+ ret void
+}
+
+
+@ia = global [1024 x i32*] zeroinitializer, align 16
+@ib = global [1024 x i32] zeroinitializer, align 16
+@ic = global [1024 x i8] zeroinitializer, align 16
+@p2 = global [2048 x [8 x i32*]] zeroinitializer, align 16
+@q2 = global [2048 x i16] zeroinitializer, align 16
+
+;; Now we check the same rules for loads. We should take consecutive loads of
+;; pointer types into account.
+; CHECK: test_consecutive_ptr_load
+; CHECK: The Widest type: 64 bits
+define i8 @test_consecutive_ptr_load() nounwind readonly ssp uwtable {
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %2 = phi i64 [ 0, %0 ], [ %10, %1 ]
+ %3 = phi i8 [ 0, %0 ], [ %9, %1 ]
+ %4 = getelementptr inbounds [1024 x i32*]* @ia, i32 0, i64 %2
+ %5 = load i32** %4, align 4
+ %6 = ptrtoint i32* %5 to i64
+ %7 = trunc i64 %6 to i8
+ %8 = add i8 %3, 1
+ %9 = add i8 %7, %8
+ %10 = add i64 %2, 1
+ %11 = icmp ne i64 %10, 1024
+ br i1 %11, label %1, label %12
+
+; <label>:12 ; preds = %1
+ %13 = phi i8 [ %9, %1 ]
+ ret i8 %13
+}
+
+;; However, we should not take unconsecutive loads of pointers into account.
+; CHECK: test_nonconsecutive_ptr_load
+; CHECK: The Widest type: 16 bits
+define void @test_nonconsecutive_ptr_load() nounwind ssp uwtable {
+ br label %1
+
+; <label>:1 ; preds = %13, %0
+ %2 = phi i64 [ 0, %0 ], [ %14, %13 ]
+ br label %3
+
+; <label>:3 ; preds = %3, %1
+ %4 = phi i64 [ 0, %1 ], [ %10, %3 ]
+ %5 = getelementptr inbounds [2048 x [8 x i32*]]* @p2, i64 0, i64 %4, i64 %2
+ %6 = getelementptr inbounds [2048 x i16]* @q2, i64 0, i64 %4
+ %7 = load i32** %5, align 2
+ %8 = ptrtoint i32* %7 to i64
+ %9 = trunc i64 %8 to i16
+ store i16 %9, i16* %6, align 8
+ %10 = add i64 %4, 1
+ %11 = trunc i64 %10 to i32
+ %12 = icmp ne i32 %11, 1024
+ br i1 %12, label %3, label %13
+
+; <label>:13 ; preds = %3
+ %14 = add i64 %2, 1
+ %15 = trunc i64 %14 to i32
+ %16 = icmp ne i32 %15, 8
+ br i1 %16, label %1, label %17
+
+; <label>:17 ; preds = %13
+ ret void
+}
+
diff --git a/test/Transforms/LoopVectorize/bzip_reverse_loops.ll b/test/Transforms/LoopVectorize/bzip_reverse_loops.ll
new file mode 100644
index 0000000..431e422
--- /dev/null
+++ b/test/Transforms/LoopVectorize/bzip_reverse_loops.ll
@@ -0,0 +1,71 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S -enable-if-conversion | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;CHECK: fc
+;CHECK: load <4 x i16>
+;CHECK-NEXT: shufflevector <4 x i16>
+;CHECK: select <4 x i1>
+;CHECK: store <4 x i16>
+;CHECK: ret
+define void @fc(i16* nocapture %p, i32 %n, i32 %size) nounwind uwtable ssp {
+entry:
+ br label %do.body
+
+do.body: ; preds = %cond.end, %entry
+ %n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %cond.end ]
+ %p.addr.0 = phi i16* [ %p, %entry ], [ %incdec.ptr, %cond.end ]
+ %incdec.ptr = getelementptr inbounds i16* %p.addr.0, i64 -1
+ %0 = load i16* %incdec.ptr, align 2, !tbaa !0
+ %conv = zext i16 %0 to i32
+ %cmp = icmp ult i32 %conv, %size
+ br i1 %cmp, label %cond.end, label %cond.true
+
+cond.true: ; preds = %do.body
+ %sub = sub i32 %conv, %size
+ %phitmp = trunc i32 %sub to i16
+ br label %cond.end
+
+cond.end: ; preds = %do.body, %cond.true
+ %cond = phi i16 [ %phitmp, %cond.true ], [ 0, %do.body ]
+ store i16 %cond, i16* %incdec.ptr, align 2, !tbaa !0
+ %dec = add i32 %n.addr.0, -1
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %do.end, label %do.body
+
+do.end: ; preds = %cond.end
+ ret void
+}
+
+;CHECK: example1
+;CHECK: load <4 x i32>
+;CHECK-NEXT: shufflevector <4 x i32>
+;CHECK: select <4 x i1>
+;CHECK: store <4 x i32>
+;CHECK: ret
+define void @example1(i32* nocapture %a, i32 %n, i32 %wsize) nounwind uwtable ssp {
+entry:
+ br label %do.body
+
+do.body: ; preds = %do.body, %entry
+ %n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.body ]
+ %p.0 = phi i32* [ %a, %entry ], [ %incdec.ptr, %do.body ]
+ %incdec.ptr = getelementptr inbounds i32* %p.0, i64 -1
+ %0 = load i32* %incdec.ptr, align 4, !tbaa !3
+ %cmp = icmp slt i32 %0, %wsize
+ %sub = sub nsw i32 %0, %wsize
+ %cond = select i1 %cmp, i32 0, i32 %sub
+ store i32 %cond, i32* %incdec.ptr, align 4, !tbaa !3
+ %dec = add nsw i32 %n.addr.0, -1
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %do.end, label %do.body
+
+do.end: ; preds = %do.body
+ ret void
+}
+
+!0 = metadata !{metadata !"short", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !"int", metadata !1}
diff --git a/test/Transforms/LoopVectorize/calloc.ll b/test/Transforms/LoopVectorize/calloc.ll
index 55c1eba..08c84ef 100644
--- a/test/Transforms/LoopVectorize/calloc.ll
+++ b/test/Transforms/LoopVectorize/calloc.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.9.0"
diff --git a/test/Transforms/LoopVectorize/cast-induction.ll b/test/Transforms/LoopVectorize/cast-induction.ll
index 5c090aa..2aa29ed 100644
--- a/test/Transforms/LoopVectorize/cast-induction.ll
+++ b/test/Transforms/LoopVectorize/cast-induction.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
; rdar://problem/12848162
diff --git a/test/Transforms/LoopVectorize/cpp-new-array.ll b/test/Transforms/LoopVectorize/cpp-new-array.ll
index 7cd608d..da0fb05 100644
--- a/test/Transforms/LoopVectorize/cpp-new-array.ll
+++ b/test/Transforms/LoopVectorize/cpp-new-array.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/flags.ll b/test/Transforms/LoopVectorize/flags.ll
index b7f3815..656912e 100644
--- a/test/Transforms/LoopVectorize/flags.ll
+++ b/test/Transforms/LoopVectorize/flags.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/gcc-examples.ll b/test/Transforms/LoopVectorize/gcc-examples.ll
index b8b125f..f335557 100644
--- a/test/Transforms/LoopVectorize/gcc-examples.ll
+++ b/test/Transforms/LoopVectorize/gcc-examples.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-unroll=1 -dce -instcombine -licm -S | FileCheck %s
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-unroll=4 -dce -instcombine -licm -S | FileCheck %s -check-prefix=UNROLL
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-unroll=1 -dce -instcombine -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-unroll=4 -dce -instcombine -S | FileCheck %s -check-prefix=UNROLL
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/global_alias.ll b/test/Transforms/LoopVectorize/global_alias.ll
new file mode 100644
index 0000000..24e698b
--- /dev/null
+++ b/test/Transforms/LoopVectorize/global_alias.ll
@@ -0,0 +1,1078 @@
+; RUN: opt < %s -O3 -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+
+%struct.anon = type { [100 x i32], i32, [100 x i32] }
+%struct.anon.0 = type { [100 x [100 x i32]], i32, [100 x [100 x i32]] }
+
+@Foo = common global %struct.anon zeroinitializer, align 4
+@Bar = common global %struct.anon.0 zeroinitializer, align 4
+
+@PB = external global i32*
+@PA = external global i32*
+
+
+;; === First, the tests that should always vectorize, wither statically or by adding run-time checks ===
+
+
+; /// Different objects, positive induction, constant distance
+; int noAlias01 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i] = Foo.B[i] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias01
+; CHECK: add nsw <4 x i32>
+; CHECK ret
+
+define i32 @noAlias01(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %arrayidx1 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ store i32 %add, i32* %arrayidx1, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx2, align 4
+ ret i32 %7
+}
+
+; /// Different objects, positive induction with widening slide
+; int noAlias02 (int a) {
+; int i;
+; for (i=0; i<SIZE-10; i++)
+; Foo.A[i] = Foo.B[i+10] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias02
+; CHECK: add nsw <4 x i32>
+; CHECK ret
+
+define i32 @noAlias02(i32 %a) {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 90
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %add = add nsw i32 %1, 10
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %add
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add1 = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ store i32 %add1, i32* %arrayidx2, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx3, align 4
+ ret i32 %7
+}
+
+; /// Different objects, positive induction with shortening slide
+; int noAlias03 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i+10] = Foo.B[i] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias03
+; CHECK: add nsw <4 x i32>
+; CHECK ret
+
+define i32 @noAlias03(i32 %a) {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %add1 = add nsw i32 %4, 10
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add1
+ store i32 %add, i32* %arrayidx2, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx3, align 4
+ ret i32 %7
+}
+
+; /// Pointer access, positive stride, run-time check added
+; int noAlias04 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; *(PA+i) = *(PB+i) + a;
+; return *(PA+a);
+; }
+; CHECK: define i32 @noAlias04
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK ret
+;
+; TODO: This test vectorizes (with run-time check) on real targets with -O3)
+; Check why it's not being vectorized even when forcing vectorization
+
+define i32 @noAlias04(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32** @PB, align 4
+ %2 = load i32* %i, align 4
+ %add.ptr = getelementptr inbounds i32* %1, i32 %2
+ %3 = load i32* %add.ptr, align 4
+ %4 = load i32* %a.addr, align 4
+ %add = add nsw i32 %3, %4
+ %5 = load i32** @PA, align 4
+ %6 = load i32* %i, align 4
+ %add.ptr1 = getelementptr inbounds i32* %5, i32 %6
+ store i32 %add, i32* %add.ptr1, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32** @PA, align 4
+ %9 = load i32* %a.addr, align 4
+ %add.ptr2 = getelementptr inbounds i32* %8, i32 %9
+ %10 = load i32* %add.ptr2, align 4
+ ret i32 %10
+}
+
+; /// Different objects, positive induction, multi-array
+; int noAlias05 (int a) {
+; int i, N=10;
+; for (i=0; i<SIZE; i++)
+; Bar.A[N][i] = Bar.B[N][i] + a;
+; return Bar.A[N][a];
+; }
+; CHECK: define i32 @noAlias05
+; CHECK: add nsw <4 x i32>
+; CHECK ret
+
+define i32 @noAlias05(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ %N = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 10, i32* %N, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %2 = load i32* %N, align 4
+ %arrayidx = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 2), i32 0, i32 %2
+ %arrayidx1 = getelementptr inbounds [100 x i32]* %arrayidx, i32 0, i32 %1
+ %3 = load i32* %arrayidx1, align 4
+ %4 = load i32* %a.addr, align 4
+ %add = add nsw i32 %3, %4
+ %5 = load i32* %i, align 4
+ %6 = load i32* %N, align 4
+ %arrayidx2 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx3 = getelementptr inbounds [100 x i32]* %arrayidx2, i32 0, i32 %5
+ store i32 %add, i32* %arrayidx3, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32* %a.addr, align 4
+ %9 = load i32* %N, align 4
+ %arrayidx4 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
+ %arrayidx5 = getelementptr inbounds [100 x i32]* %arrayidx4, i32 0, i32 %8
+ %10 = load i32* %arrayidx5, align 4
+ ret i32 %10
+}
+
+; /// Same objects, positive induction, multi-array, different sub-elements
+; int noAlias06 (int a) {
+; int i, N=10;
+; for (i=0; i<SIZE; i++)
+; Bar.A[N][i] = Bar.A[N+1][i] + a;
+; return Bar.A[N][a];
+; }
+; CHECK: define i32 @noAlias06
+; CHECK: add nsw <4 x i32>
+; CHECK ret
+
+define i32 @noAlias06(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ %N = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 10, i32* %N, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %2 = load i32* %N, align 4
+ %add = add nsw i32 %2, 1
+ %arrayidx = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %add
+ %arrayidx1 = getelementptr inbounds [100 x i32]* %arrayidx, i32 0, i32 %1
+ %3 = load i32* %arrayidx1, align 4
+ %4 = load i32* %a.addr, align 4
+ %add2 = add nsw i32 %3, %4
+ %5 = load i32* %i, align 4
+ %6 = load i32* %N, align 4
+ %arrayidx3 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx4 = getelementptr inbounds [100 x i32]* %arrayidx3, i32 0, i32 %5
+ store i32 %add2, i32* %arrayidx4, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32* %a.addr, align 4
+ %9 = load i32* %N, align 4
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
+ %arrayidx6 = getelementptr inbounds [100 x i32]* %arrayidx5, i32 0, i32 %8
+ %10 = load i32* %arrayidx6, align 4
+ ret i32 %10
+}
+
+; /// Different objects, negative induction, constant distance
+; int noAlias07 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[SIZE-i-1] = Foo.B[SIZE-i-1] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias07
+; CHECK: sub nsw <4 x i32>
+; CHECK ret
+
+define i32 @noAlias07(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 1
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %sub2 = sub nsw i32 100, %4
+ %sub3 = sub nsw i32 %sub2, 1
+ %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
+ store i32 %add, i32* %arrayidx4, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx5 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx5, align 4
+ ret i32 %7
+}
+
+; /// Different objects, negative induction, shortening slide
+; int noAlias08 (int a) {
+; int i;
+; for (i=0; i<SIZE-10; i++)
+; Foo.A[SIZE-i-1] = Foo.B[SIZE-i-10] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias08
+; CHECK: sub nsw <4 x i32>
+; CHECK ret
+
+define i32 @noAlias08(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 90
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 10
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %sub2 = sub nsw i32 100, %4
+ %sub3 = sub nsw i32 %sub2, 1
+ %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
+ store i32 %add, i32* %arrayidx4, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx5 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx5, align 4
+ ret i32 %7
+}
+
+; /// Different objects, negative induction, widening slide
+; int noAlias09 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[SIZE-i-10] = Foo.B[SIZE-i-1] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias09
+; CHECK: sub nsw <4 x i32>
+; CHECK ret
+
+define i32 @noAlias09(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 1
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %sub2 = sub nsw i32 100, %4
+ %sub3 = sub nsw i32 %sub2, 10
+ %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
+ store i32 %add, i32* %arrayidx4, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx5 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx5, align 4
+ ret i32 %7
+}
+
+; /// Pointer access, negative stride, run-time check added
+; int noAlias10 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; *(PA+SIZE-i-1) = *(PB+SIZE-i-1) + a;
+; return *(PA+a);
+; }
+; CHECK: define i32 @noAlias10
+; CHECK-NOT: sub nsw <4 x i32>
+; CHECK ret
+;
+; TODO: This test vectorizes (with run-time check) on real targets with -O3)
+; Check why it's not being vectorized even when forcing vectorization
+
+define i32 @noAlias10(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32** @PB, align 4
+ %add.ptr = getelementptr inbounds i32* %1, i32 100
+ %2 = load i32* %i, align 4
+ %idx.neg = sub i32 0, %2
+ %add.ptr1 = getelementptr inbounds i32* %add.ptr, i32 %idx.neg
+ %add.ptr2 = getelementptr inbounds i32* %add.ptr1, i32 -1
+ %3 = load i32* %add.ptr2, align 4
+ %4 = load i32* %a.addr, align 4
+ %add = add nsw i32 %3, %4
+ %5 = load i32** @PA, align 4
+ %add.ptr3 = getelementptr inbounds i32* %5, i32 100
+ %6 = load i32* %i, align 4
+ %idx.neg4 = sub i32 0, %6
+ %add.ptr5 = getelementptr inbounds i32* %add.ptr3, i32 %idx.neg4
+ %add.ptr6 = getelementptr inbounds i32* %add.ptr5, i32 -1
+ store i32 %add, i32* %add.ptr6, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32** @PA, align 4
+ %9 = load i32* %a.addr, align 4
+ %add.ptr7 = getelementptr inbounds i32* %8, i32 %9
+ %10 = load i32* %add.ptr7, align 4
+ ret i32 %10
+}
+
+; /// Different objects, negative induction, multi-array
+; int noAlias11 (int a) {
+; int i, N=10;
+; for (i=0; i<SIZE; i++)
+; Bar.A[N][SIZE-i-1] = Bar.B[N][SIZE-i-1] + a;
+; return Bar.A[N][a];
+; }
+; CHECK: define i32 @noAlias11
+; CHECK: sub nsw <4 x i32>
+; CHECK ret
+
+define i32 @noAlias11(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ %N = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 10, i32* %N, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 1
+ %2 = load i32* %N, align 4
+ %arrayidx = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 2), i32 0, i32 %2
+ %arrayidx2 = getelementptr inbounds [100 x i32]* %arrayidx, i32 0, i32 %sub1
+ %3 = load i32* %arrayidx2, align 4
+ %4 = load i32* %a.addr, align 4
+ %add = add nsw i32 %3, %4
+ %5 = load i32* %i, align 4
+ %sub3 = sub nsw i32 100, %5
+ %sub4 = sub nsw i32 %sub3, 1
+ %6 = load i32* %N, align 4
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx6 = getelementptr inbounds [100 x i32]* %arrayidx5, i32 0, i32 %sub4
+ store i32 %add, i32* %arrayidx6, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32* %a.addr, align 4
+ %9 = load i32* %N, align 4
+ %arrayidx7 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
+ %arrayidx8 = getelementptr inbounds [100 x i32]* %arrayidx7, i32 0, i32 %8
+ %10 = load i32* %arrayidx8, align 4
+ ret i32 %10
+}
+
+; /// Same objects, negative induction, multi-array, different sub-elements
+; int noAlias12 (int a) {
+; int i, N=10;
+; for (i=0; i<SIZE; i++)
+; Bar.A[N][SIZE-i-1] = Bar.A[N+1][SIZE-i-1] + a;
+; return Bar.A[N][a];
+; }
+; CHECK: define i32 @noAlias12
+; CHECK: sub nsw <4 x i32>
+; CHECK ret
+
+define i32 @noAlias12(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ %N = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 10, i32* %N, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 1
+ %2 = load i32* %N, align 4
+ %add = add nsw i32 %2, 1
+ %arrayidx = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %add
+ %arrayidx2 = getelementptr inbounds [100 x i32]* %arrayidx, i32 0, i32 %sub1
+ %3 = load i32* %arrayidx2, align 4
+ %4 = load i32* %a.addr, align 4
+ %add3 = add nsw i32 %3, %4
+ %5 = load i32* %i, align 4
+ %sub4 = sub nsw i32 100, %5
+ %sub5 = sub nsw i32 %sub4, 1
+ %6 = load i32* %N, align 4
+ %arrayidx6 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx7 = getelementptr inbounds [100 x i32]* %arrayidx6, i32 0, i32 %sub5
+ store i32 %add3, i32* %arrayidx7, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32* %a.addr, align 4
+ %9 = load i32* %N, align 4
+ %arrayidx8 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
+ %arrayidx9 = getelementptr inbounds [100 x i32]* %arrayidx8, i32 0, i32 %8
+ %10 = load i32* %arrayidx9, align 4
+ ret i32 %10
+}
+
+; /// Same objects, positive induction, constant distance, just enough for vector size
+; int noAlias13 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i] = Foo.A[i+4] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias13
+; CHECK: add nsw <4 x i32>
+; CHECK ret
+
+define i32 @noAlias13(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %add = add nsw i32 %1, 4
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add1 = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ store i32 %add1, i32* %arrayidx2, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx3, align 4
+ ret i32 %7
+}
+
+; /// Same objects, negative induction, constant distance, just enough for vector size
+; int noAlias14 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[SIZE-i-1] = Foo.A[SIZE-i-5] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @noAlias14
+; CHECK: sub nsw <4 x i32>
+; CHECK ret
+
+define i32 @noAlias14(i32 %a) #0 {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 5
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %sub2 = sub nsw i32 100, %4
+ %sub3 = sub nsw i32 %sub2, 1
+ %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
+ store i32 %add, i32* %arrayidx4, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx5 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx5, align 4
+ ret i32 %7
+}
+
+
+;; === Now, the tests that we could vectorize with induction changes or run-time checks ===
+
+
+; /// Different objects, swapped induction, alias at the end
+; int mayAlias01 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i] = Foo.B[SIZE-i-1] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @mayAlias01
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK ret
+
+define i32 @mayAlias01(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 1
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ store i32 %add, i32* %arrayidx2, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx3, align 4
+ ret i32 %7
+}
+
+; /// Different objects, swapped induction, alias at the beginning
+; int mayAlias02 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[SIZE-i-1] = Foo.B[i] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @mayAlias02
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK ret
+
+define i32 @mayAlias02(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %4
+ %sub1 = sub nsw i32 %sub, 1
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub1
+ store i32 %add, i32* %arrayidx2, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx3, align 4
+ ret i32 %7
+}
+
+; /// Pointer access, run-time check added
+; int mayAlias03 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; *(PA+i) = *(PB+SIZE-i-1) + a;
+; return *(PA+a);
+; }
+; CHECK: define i32 @mayAlias03
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK ret
+
+define i32 @mayAlias03(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32** @PB, align 4
+ %add.ptr = getelementptr inbounds i32* %1, i32 100
+ %2 = load i32* %i, align 4
+ %idx.neg = sub i32 0, %2
+ %add.ptr1 = getelementptr inbounds i32* %add.ptr, i32 %idx.neg
+ %add.ptr2 = getelementptr inbounds i32* %add.ptr1, i32 -1
+ %3 = load i32* %add.ptr2, align 4
+ %4 = load i32* %a.addr, align 4
+ %add = add nsw i32 %3, %4
+ %5 = load i32** @PA, align 4
+ %6 = load i32* %i, align 4
+ %add.ptr3 = getelementptr inbounds i32* %5, i32 %6
+ store i32 %add, i32* %add.ptr3, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %7 = load i32* %i, align 4
+ %inc = add nsw i32 %7, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %8 = load i32** @PA, align 4
+ %9 = load i32* %a.addr, align 4
+ %add.ptr4 = getelementptr inbounds i32* %8, i32 %9
+ %10 = load i32* %add.ptr4, align 4
+ ret i32 %10
+}
+
+
+;; === Finally, the tests that should only vectorize with care (or if we ignore undefined behaviour at all) ===
+
+
+; int mustAlias01 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i+10] = Foo.B[SIZE-i-1] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @mustAlias01
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK ret
+
+define i32 @mustAlias01(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 1
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %add2 = add nsw i32 %4, 10
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add2
+ store i32 %add, i32* %arrayidx3, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx4, align 4
+ ret i32 %7
+}
+
+; int mustAlias02 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i] = Foo.B[SIZE-i-10] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @mustAlias02
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK ret
+
+define i32 @mustAlias02(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 10
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ store i32 %add, i32* %arrayidx2, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx3, align 4
+ ret i32 %7
+}
+
+; int mustAlias03 (int a) {
+; int i;
+; for (i=0; i<SIZE; i++)
+; Foo.A[i+10] = Foo.B[SIZE-i-10] + a;
+; return Foo.A[a];
+; }
+; CHECK: define i32 @mustAlias03
+; CHECK-NOT: add nsw <4 x i32>
+; CHECK ret
+
+define i32 @mustAlias03(i32 %a) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %i = alloca i32, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %0 = load i32* %i, align 4
+ %cmp = icmp slt i32 %0, 100
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %1 = load i32* %i, align 4
+ %sub = sub nsw i32 100, %1
+ %sub1 = sub nsw i32 %sub, 10
+ %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %2 = load i32* %arrayidx, align 4
+ %3 = load i32* %a.addr, align 4
+ %add = add nsw i32 %2, %3
+ %4 = load i32* %i, align 4
+ %add2 = add nsw i32 %4, 10
+ %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add2
+ store i32 %add, i32* %arrayidx3, align 4
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %5 = load i32* %i, align 4
+ %inc = add nsw i32 %5, 1
+ store i32 %inc, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %6 = load i32* %a.addr, align 4
+ %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %7 = load i32* %arrayidx4, align 4
+ ret i32 %7
+}
diff --git a/test/Transforms/LoopVectorize/i8-induction.ll b/test/Transforms/LoopVectorize/i8-induction.ll
new file mode 100644
index 0000000..7759b70
--- /dev/null
+++ b/test/Transforms/LoopVectorize/i8-induction.ll
@@ -0,0 +1,35 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+@a = common global i8 0, align 1
+@b = common global i8 0, align 1
+
+define void @f() nounwind uwtable ssp {
+scalar.ph:
+ store i8 0, i8* inttoptr (i64 1 to i8*), align 1, !tbaa !0
+ %0 = load i8* @a, align 1, !tbaa !0
+ br label %for.body
+
+for.body:
+ %mul16 = phi i8 [ 0, %scalar.ph ], [ %mul, %for.body ] ; <------- i8 induction var.
+ %c.015 = phi i8 [ undef, %scalar.ph ], [ %conv8, %for.body ]
+ %conv2 = sext i8 %c.015 to i32
+ %tobool = icmp ne i8 %c.015, 0
+ %.sink = select i1 %tobool, i8 %c.015, i8 %0
+ %mul = mul i8 %mul16, %.sink
+ %add = add nsw i32 %conv2, 1
+ %conv8 = trunc i32 %add to i8
+ %sext = shl i32 %add, 24
+ %phitmp14 = icmp slt i32 %sext, 268435456
+ br i1 %phitmp14, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ store i8 %mul, i8* @b, align 1, !tbaa !0
+ ret void
+}
+
+!0 = metadata !{metadata !"omnipotent char", metadata !1}
+!1 = metadata !{metadata !"Simple C/C++ TBAA"}
+
diff --git a/test/Transforms/LoopVectorize/if-conversion-reduction.ll b/test/Transforms/LoopVectorize/if-conversion-reduction.ll
index c6dc5d7..3a2d82e 100644
--- a/test/Transforms/LoopVectorize/if-conversion-reduction.ll
+++ b/test/Transforms/LoopVectorize/if-conversion-reduction.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -enable-if-conversion -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -enable-if-conversion -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.9.0"
diff --git a/test/Transforms/LoopVectorize/if-conversion.ll b/test/Transforms/LoopVectorize/if-conversion.ll
index 28407dc..6e7c03a 100644
--- a/test/Transforms/LoopVectorize/if-conversion.ll
+++ b/test/Transforms/LoopVectorize/if-conversion.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -enable-if-conversion -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -enable-if-conversion -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.9.0"
diff --git a/test/Transforms/LoopVectorize/increment.ll b/test/Transforms/LoopVectorize/increment.ll
index e24fb39..3fa6b19 100644
--- a/test/Transforms/LoopVectorize/increment.ll
+++ b/test/Transforms/LoopVectorize/increment.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/intrinsic.ll b/test/Transforms/LoopVectorize/intrinsic.ll
index 49c8ecb..7d5a5d7 100644
--- a/test/Transforms/LoopVectorize/intrinsic.ll
+++ b/test/Transforms/LoopVectorize/intrinsic.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/test/Transforms/LoopVectorize/no_int_induction.ll b/test/Transforms/LoopVectorize/no_int_induction.ll
index 6eab799..45aa8c7 100644
--- a/test/Transforms/LoopVectorize/no_int_induction.ll
+++ b/test/Transforms/LoopVectorize/no_int_induction.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
; int __attribute__((noinline)) sum_array(int *A, int n) {
; return std::accumulate(A, A + n, 0);
diff --git a/test/Transforms/LoopVectorize/nofloat.ll b/test/Transforms/LoopVectorize/nofloat.ll
index dbdec33..de23bf0 100644
--- a/test/Transforms/LoopVectorize/nofloat.ll
+++ b/test/Transforms/LoopVectorize/nofloat.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
; Make sure that we don't vectorize functions with 'noimplicitfloat' attributes.
diff --git a/test/Transforms/LoopVectorize/non-const-n.ll b/test/Transforms/LoopVectorize/non-const-n.ll
index 7e4cee4..8262a18 100644
--- a/test/Transforms/LoopVectorize/non-const-n.ll
+++ b/test/Transforms/LoopVectorize/non-const-n.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/nsw-crash.ll b/test/Transforms/LoopVectorize/nsw-crash.ll
new file mode 100644
index 0000000..e5fad14
--- /dev/null
+++ b/test/Transforms/LoopVectorize/nsw-crash.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4
+
+target datalayout =
+"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.7.0"
+
+define void @test() {
+entry:
+ br i1 undef, label %while.end, label %while.body.lr.ph
+
+while.body.lr.ph:
+ br label %while.body
+
+while.body:
+ %it.sroa.0.091 = phi i32* [ undef, %while.body.lr.ph ], [ %incdec.ptr.i, %while.body ]
+ %incdec.ptr.i = getelementptr inbounds i32* %it.sroa.0.091, i64 1
+ %inc32 = add i32 undef, 1 ; <------------- Make sure we don't set NSW flags to the undef.
+ %cmp.i11 = icmp eq i32* %incdec.ptr.i, undef
+ br i1 %cmp.i11, label %while.end, label %while.body
+
+while.end:
+ ret void
+}
+
+
diff --git a/test/Transforms/LoopVectorize/ptr_loops.ll b/test/Transforms/LoopVectorize/ptr_loops.ll
new file mode 100644
index 0000000..25599f8
--- /dev/null
+++ b/test/Transforms/LoopVectorize/ptr_loops.ll
@@ -0,0 +1,74 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S -enable-if-conversion | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+@A = global [36 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35], align 16
+@B = global [36 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35], align 16
+
+;CHECK:_Z5test1v
+;CHECK: load <4 x i32>
+;CHECK: shufflevector <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+define i32 @_Z5test1v() nounwind uwtable ssp {
+ br label %1
+
+; <label>:1 ; preds = %0, %1
+ %p.02 = phi i32* [ getelementptr inbounds ([36 x i32]* @A, i64 0, i64 18), %0 ], [ %4, %1 ]
+ %b.01 = phi i32* [ getelementptr inbounds ([36 x i32]* @B, i64 0, i64 0), %0 ], [ %5, %1 ]
+ %2 = load i32* %b.01, align 4
+ %3 = shl nsw i32 %2, 1
+ store i32 %3, i32* %p.02, align 4
+ %4 = getelementptr inbounds i32* %p.02, i64 -1
+ %5 = getelementptr inbounds i32* %b.01, i64 1
+ %6 = icmp eq i32* %4, getelementptr ([36 x i32]* @A, i64 128102389400760775, i64 3)
+ br i1 %6, label %7, label %1
+
+; <label>:7 ; preds = %1
+ ret i32 0
+}
+
+;CHECK:_Z5test2v
+;CHECK: load <4 x i32>
+;CHECK: shufflevector <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+define i32 @_Z5test2v() nounwind uwtable ssp {
+ br label %1
+
+; <label>:1 ; preds = %0, %1
+ %p.02 = phi i32* [ getelementptr inbounds ([36 x i32]* @A, i64 0, i64 25), %0 ], [ %3, %1 ]
+ %b.01 = phi i32* [ getelementptr inbounds ([36 x i32]* @B, i64 0, i64 2), %0 ], [ %4, %1 ]
+ %2 = load i32* %b.01, align 4
+ store i32 %2, i32* %p.02, align 4
+ %3 = getelementptr inbounds i32* %p.02, i64 -1
+ %4 = getelementptr inbounds i32* %b.01, i64 1
+ %5 = icmp eq i32* %4, getelementptr inbounds ([36 x i32]* @A, i64 0, i64 18)
+ br i1 %5, label %6, label %1
+
+; <label>:6 ; preds = %1
+ ret i32 0
+}
+
+;CHECK:_Z5test3v
+;CHECK: load <4 x i32>
+;CHECK: shufflevector <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+define i32 @_Z5test3v() nounwind uwtable ssp {
+ br label %1
+
+; <label>:1 ; preds = %0, %1
+ %p.02 = phi i32* [ getelementptr inbounds ([36 x i32]* @A, i64 0, i64 29), %0 ], [ %3, %1 ]
+ %b.01 = phi i32* [ getelementptr inbounds ([36 x i32]* @B, i64 0, i64 5), %0 ], [ %4, %1 ]
+ %2 = load i32* %b.01, align 4
+ store i32 %2, i32* %p.02, align 4
+ %3 = getelementptr inbounds i32* %p.02, i64 -1
+ %4 = getelementptr inbounds i32* %b.01, i64 1
+ %5 = icmp eq i32* %3, getelementptr ([36 x i32]* @A, i64 128102389400760775, i64 3)
+ br i1 %5, label %6, label %1
+
+; <label>:6 ; preds = %1
+ ret i32 0
+}
diff --git a/test/Transforms/LoopVectorize/read-only.ll b/test/Transforms/LoopVectorize/read-only.ll
index c3c9035..bfaa6d4 100644
--- a/test/Transforms/LoopVectorize/read-only.ll
+++ b/test/Transforms/LoopVectorize/read-only.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/reduction.ll b/test/Transforms/LoopVectorize/reduction.ll
index 129c20d..08b7b27 100644
--- a/test/Transforms/LoopVectorize/reduction.ll
+++ b/test/Transforms/LoopVectorize/reduction.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/runtime-check.ll b/test/Transforms/LoopVectorize/runtime-check.ll
index 2852684..86098a6 100644
--- a/test/Transforms/LoopVectorize/runtime-check.ll
+++ b/test/Transforms/LoopVectorize/runtime-check.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.9.0"
@@ -9,6 +9,10 @@ target triple = "x86_64-apple-macosx10.9.0"
; a[i] = b[i] * 3;
; }
+;CHECK: for.body.preheader:
+;CHECK: br i1 %cmp.zero, label %middle.block, label %vector.memcheck
+;CHECK: vector.memcheck:
+;CHECK: br i1 %found.conflict, label %middle.block, label %vector.ph
;CHECK: load <4 x float>
define i32 @foo(float* nocapture %a, float* nocapture %b, i32 %n) nounwind uwtable ssp {
entry:
diff --git a/test/Transforms/LoopVectorize/same-base-access.ll b/test/Transforms/LoopVectorize/same-base-access.ll
index 2a1f19d..1573893 100644
--- a/test/Transforms/LoopVectorize/same-base-access.ll
+++ b/test/Transforms/LoopVectorize/same-base-access.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S -enable-if-conversion | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S -enable-if-conversion | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.9.0"
diff --git a/test/Transforms/LoopVectorize/scalar-select.ll b/test/Transforms/LoopVectorize/scalar-select.ll
index d72cd14..7a14d24 100644
--- a/test/Transforms/LoopVectorize/scalar-select.ll
+++ b/test/Transforms/LoopVectorize/scalar-select.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/simple-unroll.ll b/test/Transforms/LoopVectorize/simple-unroll.ll
index 9825764..7e2dd5f 100644
--- a/test/Transforms/LoopVectorize/simple-unroll.ll
+++ b/test/Transforms/LoopVectorize/simple-unroll.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-unroll=2 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-unroll=2 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/small-loop.ll b/test/Transforms/LoopVectorize/small-loop.ll
index ae784b3..fa83dba 100644
--- a/test/Transforms/LoopVectorize/small-loop.ll
+++ b/test/Transforms/LoopVectorize/small-loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
diff --git a/test/Transforms/LoopVectorize/struct_access.ll b/test/Transforms/LoopVectorize/struct_access.ll
new file mode 100644
index 0000000..de65d0d
--- /dev/null
+++ b/test/Transforms/LoopVectorize/struct_access.ll
@@ -0,0 +1,50 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+%struct.coordinate = type { i32, i32 }
+
+; Make sure that we don't generate a wide load when accessing the struct.
+; struct coordinate {
+; int x;
+; int y;
+; };
+;
+;
+; int foo(struct coordinate *A, int n) {
+;
+; int sum = 0;
+; for (int i = 0; i < n; ++i)
+; sum += A[i].x;
+;
+; return sum;
+; }
+
+;CHECK: @foo
+;CHECK-NOT: load <4 x i32>
+;CHECK: ret
+define i32 @foo(%struct.coordinate* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
+entry:
+ %cmp4 = icmp sgt i32 %n, 0
+ br i1 %cmp4, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %sum.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
+ %x = getelementptr inbounds %struct.coordinate* %A, i64 %indvars.iv, i32 0
+ %0 = load i32* %x, align 4, !tbaa !0
+ %add = add nsw i32 %0, %sum.05
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ ret i32 %sum.0.lcssa
+}
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/write-only.ll b/test/Transforms/LoopVectorize/write-only.ll
index b42122b..54cbe8d 100644
--- a/test/Transforms/LoopVectorize/write-only.ll
+++ b/test/Transforms/LoopVectorize/write-only.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"