aboutsummaryrefslogtreecommitdiffstats
path: root/test/Transforms/SLPVectorizer/X86
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/SLPVectorizer/X86')
-rw-r--r--test/Transforms/SLPVectorizer/X86/addsub.ll252
-rw-r--r--test/Transforms/SLPVectorizer/X86/align.ll38
-rw-r--r--test/Transforms/SLPVectorizer/X86/atomics.ll16
-rw-r--r--test/Transforms/SLPVectorizer/X86/bad_types.ll2
-rw-r--r--test/Transforms/SLPVectorizer/X86/barriercall.ll6
-rw-r--r--test/Transforms/SLPVectorizer/X86/call.ll70
-rw-r--r--test/Transforms/SLPVectorizer/X86/cast.ll20
-rw-r--r--test/Transforms/SLPVectorizer/X86/cmp_sel.ll10
-rw-r--r--test/Transforms/SLPVectorizer/X86/compare-reduce.ll10
-rw-r--r--test/Transforms/SLPVectorizer/X86/consecutive-access.ll80
-rw-r--r--test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll18
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_7zip.ll4
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_binaryop.ll2
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_bullet.ll10
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_bullet3.ll4
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_cmpop.ll6
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_dequeue.ll14
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_gep.ll6
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_lencod.ll14
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll12
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll12
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_scheduling.ll4
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_sim4b1.ll4
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_smallpt.ll12
-rw-r--r--test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll16
-rw-r--r--test/Transforms/SLPVectorizer/X86/cross_block_slp.ll12
-rw-r--r--test/Transforms/SLPVectorizer/X86/cse.ll86
-rw-r--r--test/Transforms/SLPVectorizer/X86/cycle_dup.ll18
-rw-r--r--test/Transforms/SLPVectorizer/X86/debug_info.ll56
-rw-r--r--test/Transforms/SLPVectorizer/X86/diamond.ll60
-rw-r--r--test/Transforms/SLPVectorizer/X86/external_user.ll12
-rw-r--r--test/Transforms/SLPVectorizer/X86/extract.ll18
-rw-r--r--test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll42
-rw-r--r--test/Transforms/SLPVectorizer/X86/extractcost.ll6
-rw-r--r--test/Transforms/SLPVectorizer/X86/flag.ll24
-rw-r--r--test/Transforms/SLPVectorizer/X86/gep.ll32
-rw-r--r--test/Transforms/SLPVectorizer/X86/hoist.ll16
-rw-r--r--test/Transforms/SLPVectorizer/X86/horizontal.ll194
-rw-r--r--test/Transforms/SLPVectorizer/X86/implicitfloat.ll14
-rw-r--r--test/Transforms/SLPVectorizer/X86/in-tree-user.ll10
-rw-r--r--test/Transforms/SLPVectorizer/X86/intrinsic.ll286
-rw-r--r--test/Transforms/SLPVectorizer/X86/long_chains.ll8
-rw-r--r--test/Transforms/SLPVectorizer/X86/loopinvariant.ll32
-rw-r--r--test/Transforms/SLPVectorizer/X86/metadata.ll28
-rw-r--r--test/Transforms/SLPVectorizer/X86/multi_block.ll10
-rw-r--r--test/Transforms/SLPVectorizer/X86/multi_user.ll18
-rw-r--r--test/Transforms/SLPVectorizer/X86/odd_store.ll16
-rw-r--r--test/Transforms/SLPVectorizer/X86/operandorder.ll184
-rw-r--r--test/Transforms/SLPVectorizer/X86/opt.ll14
-rw-r--r--test/Transforms/SLPVectorizer/X86/ordering.ll2
-rw-r--r--test/Transforms/SLPVectorizer/X86/phi.ll72
-rw-r--r--test/Transforms/SLPVectorizer/X86/phi3.ll2
-rw-r--r--test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll24
-rw-r--r--test/Transforms/SLPVectorizer/X86/powof2div.ll36
-rw-r--r--test/Transforms/SLPVectorizer/X86/pr16628.ll4
-rw-r--r--test/Transforms/SLPVectorizer/X86/pr16899.ll8
-rw-r--r--test/Transforms/SLPVectorizer/X86/pr19657.ll90
-rw-r--r--test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll210
-rw-r--r--test/Transforms/SLPVectorizer/X86/reduction.ll8
-rw-r--r--test/Transforms/SLPVectorizer/X86/reduction2.ll8
-rw-r--r--test/Transforms/SLPVectorizer/X86/return.ll30
-rw-r--r--test/Transforms/SLPVectorizer/X86/rgb_phi.ll22
-rw-r--r--test/Transforms/SLPVectorizer/X86/saxpy.ll44
-rw-r--r--test/Transforms/SLPVectorizer/X86/scheduling.ll42
-rw-r--r--test/Transforms/SLPVectorizer/X86/simple-loop.ll48
-rw-r--r--test/Transforms/SLPVectorizer/X86/simplebb.ll56
-rw-r--r--test/Transforms/SLPVectorizer/X86/tiny-tree.ll80
-rw-r--r--test/Transforms/SLPVectorizer/X86/unreachable.ll22
68 files changed, 1323 insertions, 1323 deletions
diff --git a/test/Transforms/SLPVectorizer/X86/addsub.ll b/test/Transforms/SLPVectorizer/X86/addsub.ll
index d082b07..7056a14 100644
--- a/test/Transforms/SLPVectorizer/X86/addsub.ll
+++ b/test/Transforms/SLPVectorizer/X86/addsub.ll
@@ -21,38 +21,38 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
define void @addsub() #0 {
entry:
- %0 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 0), align 4
- %1 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 0), align 4
+ %0 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 0), align 4
+ %1 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 0), align 4
%add = add nsw i32 %0, %1
- %2 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 0), align 4
- %3 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 0), align 4
+ %2 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 0), align 4
+ %3 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 0), align 4
%add1 = add nsw i32 %2, %3
%add2 = add nsw i32 %add, %add1
- store i32 %add2, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 0), align 4
- %4 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 1), align 4
- %5 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 1), align 4
+ store i32 %add2, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 0), align 4
+ %4 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 1), align 4
+ %5 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 1), align 4
%add3 = add nsw i32 %4, %5
- %6 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 1), align 4
- %7 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 1), align 4
+ %6 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 1), align 4
+ %7 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 1), align 4
%add4 = add nsw i32 %6, %7
%sub = sub nsw i32 %add3, %add4
- store i32 %sub, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 1), align 4
- %8 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 2), align 4
- %9 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 2), align 4
+ store i32 %sub, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 1), align 4
+ %8 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 2), align 4
+ %9 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 2), align 4
%add5 = add nsw i32 %8, %9
- %10 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 2), align 4
- %11 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 2), align 4
+ %10 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 2), align 4
+ %11 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 2), align 4
%add6 = add nsw i32 %10, %11
%add7 = add nsw i32 %add5, %add6
- store i32 %add7, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 2), align 4
- %12 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 3), align 4
- %13 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 3), align 4
+ store i32 %add7, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 2), align 4
+ %12 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 3), align 4
+ %13 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 3), align 4
%add8 = add nsw i32 %12, %13
- %14 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 3), align 4
- %15 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 3), align 4
+ %14 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 3), align 4
+ %15 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 3), align 4
%add9 = add nsw i32 %14, %15
%sub10 = sub nsw i32 %add8, %add9
- store i32 %sub10, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 3), align 4
+ store i32 %sub10, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 3), align 4
ret void
}
@@ -65,38 +65,38 @@ entry:
; Function Attrs: nounwind uwtable
define void @subadd() #0 {
entry:
- %0 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 0), align 4
- %1 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 0), align 4
+ %0 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 0), align 4
+ %1 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 0), align 4
%add = add nsw i32 %0, %1
- %2 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 0), align 4
- %3 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 0), align 4
+ %2 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 0), align 4
+ %3 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 0), align 4
%add1 = add nsw i32 %2, %3
%sub = sub nsw i32 %add, %add1
- store i32 %sub, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 0), align 4
- %4 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 1), align 4
- %5 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 1), align 4
+ store i32 %sub, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 0), align 4
+ %4 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 1), align 4
+ %5 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 1), align 4
%add2 = add nsw i32 %4, %5
- %6 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 1), align 4
- %7 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 1), align 4
+ %6 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 1), align 4
+ %7 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 1), align 4
%add3 = add nsw i32 %6, %7
%add4 = add nsw i32 %add2, %add3
- store i32 %add4, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 1), align 4
- %8 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 2), align 4
- %9 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 2), align 4
+ store i32 %add4, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 1), align 4
+ %8 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 2), align 4
+ %9 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 2), align 4
%add5 = add nsw i32 %8, %9
- %10 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 2), align 4
- %11 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 2), align 4
+ %10 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 2), align 4
+ %11 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 2), align 4
%add6 = add nsw i32 %10, %11
%sub7 = sub nsw i32 %add5, %add6
- store i32 %sub7, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 2), align 4
- %12 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 3), align 4
- %13 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 3), align 4
+ store i32 %sub7, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 2), align 4
+ %12 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 3), align 4
+ %13 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 3), align 4
%add8 = add nsw i32 %12, %13
- %14 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 3), align 4
- %15 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 3), align 4
+ %14 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 3), align 4
+ %15 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 3), align 4
%add9 = add nsw i32 %14, %15
%add10 = add nsw i32 %add8, %add9
- store i32 %add10, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 3), align 4
+ store i32 %add10, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 3), align 4
ret void
}
@@ -107,22 +107,22 @@ entry:
; Function Attrs: nounwind uwtable
define void @faddfsub() #0 {
entry:
- %0 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
- %1 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
+ %0 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
+ %1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4
%add = fadd float %0, %1
- store float %add, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
- %3 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
+ store float %add, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4
+ %2 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 1), align 4
+ %3 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 1), align 4
%sub = fsub float %2, %3
- store float %sub, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
- %4 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
- %5 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
+ store float %sub, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 1), align 4
+ %4 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 2), align 4
+ %5 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 2), align 4
%add1 = fadd float %4, %5
- store float %add1, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
- %6 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
- %7 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+ store float %add1, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 2), align 4
+ %6 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 3), align 4
+ %7 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 3), align 4
%sub2 = fsub float %6, %7
- store float %sub2, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
+ store float %sub2, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 3), align 4
ret void
}
@@ -133,22 +133,22 @@ entry:
; Function Attrs: nounwind uwtable
define void @fsubfadd() #0 {
entry:
- %0 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
- %1 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
+ %0 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
+ %1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4
%sub = fsub float %0, %1
- store float %sub, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
- %3 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
+ store float %sub, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4
+ %2 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 1), align 4
+ %3 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 1), align 4
%add = fadd float %2, %3
- store float %add, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
- %4 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
- %5 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
+ store float %add, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 1), align 4
+ %4 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 2), align 4
+ %5 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 2), align 4
%sub1 = fsub float %4, %5
- store float %sub1, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
- %6 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
- %7 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+ store float %sub1, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 2), align 4
+ %6 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 3), align 4
+ %7 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 3), align 4
%add2 = fadd float %6, %7
- store float %add2, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
+ store float %add2, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 3), align 4
ret void
}
@@ -159,22 +159,22 @@ entry:
; Function Attrs: nounwind uwtable
define void @No_faddfsub() #0 {
entry:
- %0 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
- %1 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
+ %0 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
+ %1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4
%add = fadd float %0, %1
- store float %add, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
- %3 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
+ store float %add, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4
+ %2 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 1), align 4
+ %3 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 1), align 4
%add1 = fadd float %2, %3
- store float %add1, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
- %4 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
- %5 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
+ store float %add1, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 1), align 4
+ %4 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 2), align 4
+ %5 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 2), align 4
%add2 = fadd float %4, %5
- store float %add2, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
- %6 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
- %7 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+ store float %add2, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 2), align 4
+ %6 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 3), align 4
+ %7 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 3), align 4
%sub = fsub float %6, %7
- store float %sub, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
+ store float %sub, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 3), align 4
ret void
}
@@ -189,22 +189,22 @@ entry:
; CHECK: %4 = fsub <4 x float> %1, %2
; CHECK: %5 = shufflevector <4 x float> %3, <4 x float> %4, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
define void @reorder_alt() #0 {
- %1 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
+ %1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
+ %2 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4
%3 = fadd float %1, %2
- store float %3, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
- %4 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
- %5 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+ store float %3, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4
+ %4 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 1), align 4
+ %5 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 1), align 4
%6 = fsub float %4, %5
- store float %6, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
- %7 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
- %8 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+ store float %6, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 1), align 4
+ %7 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 2), align 4
+ %8 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 2), align 4
%9 = fadd float %7, %8
- store float %9, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
- %10 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
- %11 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+ store float %9, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 2), align 4
+ %10 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 3), align 4
+ %11 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 3), align 4
%12 = fsub float %10, %11
- store float %12, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+ store float %12, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 3), align 4
ret void
}
@@ -222,30 +222,30 @@ define void @reorder_alt() #0 {
; CHECK: %8 = fsub <4 x float> %1, %6
; CHECK: %9 = shufflevector <4 x float> %7, <4 x float> %8, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
define void @reorder_alt_subTree() #0 {
- %1 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
- %3 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 0), align 4
+ %1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4
+ %2 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
+ %3 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fd, i32 0, i64 0), align 4
%4 = fsub float %2, %3
%5 = fadd float %1, %4
- store float %5, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
- %6 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
- %7 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
- %8 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 1), align 4
+ store float %5, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4
+ %6 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 1), align 4
+ %7 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 1), align 4
+ %8 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fd, i32 0, i64 1), align 4
%9 = fadd float %7, %8
%10 = fsub float %6, %9
- store float %10, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
- %11 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
- %12 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
- %13 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 2), align 4
+ store float %10, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 1), align 4
+ %11 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 2), align 4
+ %12 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 2), align 4
+ %13 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fd, i32 0, i64 2), align 4
%14 = fsub float %12, %13
%15 = fadd float %11, %14
- store float %15, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
- %16 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
- %17 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 3), align 4
- %18 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+ store float %15, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 2), align 4
+ %16 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 3), align 4
+ %17 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fd, i32 0, i64 3), align 4
+ %18 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 3), align 4
%19 = fadd float %17, %18
%20 = fsub float %16, %19
- store float %20, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+ store float %20, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 3), align 4
ret void
}
@@ -258,21 +258,21 @@ define void @reorder_alt_subTree() #0 {
; CHECK: fsub <2 x double>
; CHECK: shufflevector <2 x double>
define void @reorder_alt_rightsubTree(double* nocapture %c, double* noalias nocapture readonly %a, double* noalias nocapture readonly %b, double* noalias nocapture readonly %d) {
- %1 = load double* %a
- %2 = load double* %b
+ %1 = load double, double* %a
+ %2 = load double, double* %b
%3 = fadd double %1, %2
- %4 = load double* %d
+ %4 = load double, double* %d
%5 = fsub double %3, %4
store double %5, double* %c
- %6 = getelementptr inbounds double* %d, i64 1
- %7 = load double* %6
- %8 = getelementptr inbounds double* %a, i64 1
- %9 = load double* %8
- %10 = getelementptr inbounds double* %b, i64 1
- %11 = load double* %10
+ %6 = getelementptr inbounds double, double* %d, i64 1
+ %7 = load double, double* %6
+ %8 = getelementptr inbounds double, double* %a, i64 1
+ %9 = load double, double* %8
+ %10 = getelementptr inbounds double, double* %b, i64 1
+ %11 = load double, double* %10
%12 = fadd double %9, %11
%13 = fadd double %7, %12
- %14 = getelementptr inbounds double* %c, i64 1
+ %14 = getelementptr inbounds double, double* %c, i64 1
store double %13, double* %14
ret void
}
@@ -290,22 +290,22 @@ define void @reorder_alt_rightsubTree(double* nocapture %c, double* noalias noca
; CHECK-NOT: fsub <4 x float>
; CHECK-NOT: shufflevector
define void @no_vec_shuff_reorder() #0 {
- %1 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
+ %1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
+ %2 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4
%3 = fadd float %1, %2
- store float %3, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
- %4 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
- %5 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+ store float %3, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4
+ %4 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 1), align 4
+ %5 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 1), align 4
%6 = fsub float %4, %5
- store float %6, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
- %7 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
- %8 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+ store float %6, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 1), align 4
+ %7 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 2), align 4
+ %8 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 2), align 4
%9 = fadd float %7, %8
- store float %9, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
- %10 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
- %11 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
+ store float %9, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 2), align 4
+ %10 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 3), align 4
+ %11 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 3), align 4
%12 = fsub float %10, %11
- store float %12, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+ store float %12, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 3), align 4
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/align.ll b/test/Transforms/SLPVectorizer/X86/align.ll
index ce80620..b74b709 100644
--- a/test/Transforms/SLPVectorizer/X86/align.ll
+++ b/test/Transforms/SLPVectorizer/X86/align.ll
@@ -8,16 +8,16 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @test1(double* %a, double* %b, double* %c) {
entry:
%agg.tmp.i.i.sroa.0 = alloca [3 x double], align 16
-; CHECK: %[[V0:[0-9]+]] = load <2 x double>* %[[V2:[0-9]+]], align 8
- %i0 = load double* %a
- %i1 = load double* %b
+; CHECK: %[[V0:[0-9]+]] = load <2 x double>, <2 x double>* %[[V2:[0-9]+]], align 8
+ %i0 = load double, double* %a
+ %i1 = load double, double* %b
%mul = fmul double %i0, %i1
- %store1 = getelementptr inbounds [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 1
- %store2 = getelementptr inbounds [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 2
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %store1 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 1
+ %store2 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 2
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
; CHECK: store <2 x double> %[[V1:[0-9]+]], <2 x double>* %[[V2:[0-9]+]], align 8
store double %mul, double* %store1
@@ -37,19 +37,19 @@ entry:
define void @test2(float * %a, float * %b) {
entry:
- %l0 = load float* %a
- %a1 = getelementptr inbounds float* %a, i64 1
- %l1 = load float* %a1
- %a2 = getelementptr inbounds float* %a, i64 2
- %l2 = load float* %a2
- %a3 = getelementptr inbounds float* %a, i64 3
- %l3 = load float* %a3
+ %l0 = load float, float* %a
+ %a1 = getelementptr inbounds float, float* %a, i64 1
+ %l1 = load float, float* %a1
+ %a2 = getelementptr inbounds float, float* %a, i64 2
+ %l2 = load float, float* %a2
+ %a3 = getelementptr inbounds float, float* %a, i64 3
+ %l3 = load float, float* %a3
store float %l0, float* %b
- %b1 = getelementptr inbounds float* %b, i64 1
+ %b1 = getelementptr inbounds float, float* %b, i64 1
store float %l1, float* %b1
- %b2 = getelementptr inbounds float* %b, i64 2
+ %b2 = getelementptr inbounds float, float* %b, i64 2
store float %l2, float* %b2
- %b3 = getelementptr inbounds float* %b, i64 3
+ %b3 = getelementptr inbounds float, float* %b, i64 3
store float %l3, float* %b3
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/atomics.ll b/test/Transforms/SLPVectorizer/X86/atomics.ll
index 6cb322e..a48b793 100644
--- a/test/Transforms/SLPVectorizer/X86/atomics.ll
+++ b/test/Transforms/SLPVectorizer/X86/atomics.ll
@@ -18,14 +18,14 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; CHECK: store atomic i32
define void @test() {
entry:
- store i32 0, i32* getelementptr inbounds ([4 x i32]* @a, i64 0, i64 0), align 16
- store atomic i32 0, i32* getelementptr inbounds ([4 x i32]* @x, i64 0, i64 0) release, align 16
- store i32 0, i32* getelementptr inbounds ([4 x i32]* @a, i64 0, i64 1), align 4
- store atomic i32 1, i32* getelementptr inbounds ([4 x i32]* @x, i64 0, i64 1) release, align 4
- store i32 0, i32* getelementptr inbounds ([4 x i32]* @a, i64 0, i64 2), align 8
- store atomic i32 2, i32* getelementptr inbounds ([4 x i32]* @x, i64 0, i64 2) release, align 8
- store i32 0, i32* getelementptr inbounds ([4 x i32]* @a, i64 0, i64 3), align 4
- store atomic i32 3, i32* getelementptr inbounds ([4 x i32]* @x, i64 0, i64 3) release, align 4
+ store i32 0, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 0), align 16
+ store atomic i32 0, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @x, i64 0, i64 0) release, align 16
+ store i32 0, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1), align 4
+ store atomic i32 1, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @x, i64 0, i64 1) release, align 4
+ store i32 0, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 2), align 8
+ store atomic i32 2, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @x, i64 0, i64 2) release, align 8
+ store i32 0, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 3), align 4
+ store atomic i32 3, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @x, i64 0, i64 3) release, align 4
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/bad_types.ll b/test/Transforms/SLPVectorizer/X86/bad_types.ll
index 38ed18d..2d8f383 100644
--- a/test/Transforms/SLPVectorizer/X86/bad_types.ll
+++ b/test/Transforms/SLPVectorizer/X86/bad_types.ll
@@ -16,7 +16,7 @@ entry:
%b.cast = bitcast x86_mmx %b to i64
%a.and = and i64 %a.cast, 42
%b.and = and i64 %b.cast, 42
- %gep = getelementptr i64* %ptr, i32 1
+ %gep = getelementptr i64, i64* %ptr, i32 1
store i64 %a.and, i64* %ptr
store i64 %b.and, i64* %gep
ret void
diff --git a/test/Transforms/SLPVectorizer/X86/barriercall.ll b/test/Transforms/SLPVectorizer/X86/barriercall.ll
index bba2855..9def190 100644
--- a/test/Transforms/SLPVectorizer/X86/barriercall.ll
+++ b/test/Transforms/SLPVectorizer/X86/barriercall.ll
@@ -14,15 +14,15 @@ entry:
store i32 %add, i32* %A, align 4
%mul1 = mul nsw i32 %n, 9
%add2 = add nsw i32 %mul1, 9
- %arrayidx3 = getelementptr inbounds i32* %A, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 1
store i32 %add2, i32* %arrayidx3, align 4
%mul4 = shl i32 %n, 3
%add5 = add nsw i32 %mul4, 9
- %arrayidx6 = getelementptr inbounds i32* %A, i64 2
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 2
store i32 %add5, i32* %arrayidx6, align 4
%mul7 = mul nsw i32 %n, 10
%add8 = add nsw i32 %mul7, 9
- %arrayidx9 = getelementptr inbounds i32* %A, i64 3
+ %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 3
store i32 %add8, i32* %arrayidx9, align 4
ret i32 undef
}
diff --git a/test/Transforms/SLPVectorizer/X86/call.ll b/test/Transforms/SLPVectorizer/X86/call.ll
index 83d45c0..b76ac2c 100644
--- a/test/Transforms/SLPVectorizer/X86/call.ll
+++ b/test/Transforms/SLPVectorizer/X86/call.ll
@@ -15,18 +15,18 @@ declare i64 @round(i64)
; CHECK: ret void
define void @sin_libm(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @sin(double %mul) nounwind readnone
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @sin(double %mul5) nounwind readnone
store double %call, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %call5, double* %arrayidx5, align 8
ret void
}
@@ -36,18 +36,18 @@ entry:
; CHECK: ret void
define void @cos_libm(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @cos(double %mul) nounwind readnone
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @cos(double %mul5) nounwind readnone
store double %call, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %call5, double* %arrayidx5, align 8
ret void
}
@@ -57,18 +57,18 @@ entry:
; CHECK: ret void
define void @pow_libm(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @pow(double %mul,double %mul) nounwind readnone
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @pow(double %mul5,double %mul5) nounwind readnone
store double %call, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %call5, double* %arrayidx5, align 8
ret void
}
@@ -79,18 +79,18 @@ entry:
; CHECK: ret void
define void @exp2_libm(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @exp2(double %mul) nounwind readnone
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @exp2(double %mul5) nounwind readnone
store double %call, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %call5, double* %arrayidx5, align 8
ret void
}
@@ -102,18 +102,18 @@ entry:
; CHECK: ret void
define void @round_custom(i64* %a, i64* %b, i64* %c) {
entry:
- %i0 = load i64* %a, align 8
- %i1 = load i64* %b, align 8
+ %i0 = load i64, i64* %a, align 8
+ %i1 = load i64, i64* %b, align 8
%mul = mul i64 %i0, %i1
%call = tail call i64 @round(i64 %mul) nounwind readnone
- %arrayidx3 = getelementptr inbounds i64* %a, i64 1
- %i3 = load i64* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds i64* %b, i64 1
- %i4 = load i64* %arrayidx4, align 8
+ %arrayidx3 = getelementptr inbounds i64, i64* %a, i64 1
+ %i3 = load i64, i64* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds i64, i64* %b, i64 1
+ %i4 = load i64, i64* %arrayidx4, align 8
%mul5 = mul i64 %i3, %i4
%call5 = tail call i64 @round(i64 %mul5) nounwind readnone
store i64 %call, i64* %c, align 8
- %arrayidx5 = getelementptr inbounds i64* %c, i64 1
+ %arrayidx5 = getelementptr inbounds i64, i64* %c, i64 1
store i64 %call5, i64* %arrayidx5, align 8
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/cast.ll b/test/Transforms/SLPVectorizer/X86/cast.ll
index e340fba..044db5d 100644
--- a/test/Transforms/SLPVectorizer/X86/cast.ll
+++ b/test/Transforms/SLPVectorizer/X86/cast.ll
@@ -15,23 +15,23 @@ target triple = "x86_64-apple-macosx10.9.0"
;CHECK: store <4 x i32>
define i32 @foo(i32* noalias nocapture %A, i8* noalias nocapture %B) {
entry:
- %0 = load i8* %B, align 1
+ %0 = load i8, i8* %B, align 1
%conv = sext i8 %0 to i32
store i32 %conv, i32* %A, align 4
- %arrayidx2 = getelementptr inbounds i8* %B, i64 1
- %1 = load i8* %arrayidx2, align 1
+ %arrayidx2 = getelementptr inbounds i8, i8* %B, i64 1
+ %1 = load i8, i8* %arrayidx2, align 1
%conv3 = sext i8 %1 to i32
- %arrayidx4 = getelementptr inbounds i32* %A, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
store i32 %conv3, i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i8* %B, i64 2
- %2 = load i8* %arrayidx5, align 1
+ %arrayidx5 = getelementptr inbounds i8, i8* %B, i64 2
+ %2 = load i8, i8* %arrayidx5, align 1
%conv6 = sext i8 %2 to i32
- %arrayidx7 = getelementptr inbounds i32* %A, i64 2
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 2
store i32 %conv6, i32* %arrayidx7, align 4
- %arrayidx8 = getelementptr inbounds i8* %B, i64 3
- %3 = load i8* %arrayidx8, align 1
+ %arrayidx8 = getelementptr inbounds i8, i8* %B, i64 3
+ %3 = load i8, i8* %arrayidx8, align 1
%conv9 = sext i8 %3 to i32
- %arrayidx10 = getelementptr inbounds i32* %A, i64 3
+ %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 3
store i32 %conv9, i32* %arrayidx10, align 4
ret i32 undef
}
diff --git a/test/Transforms/SLPVectorizer/X86/cmp_sel.ll b/test/Transforms/SLPVectorizer/X86/cmp_sel.ll
index 0c124a7..a3e2b21 100644
--- a/test/Transforms/SLPVectorizer/X86/cmp_sel.ll
+++ b/test/Transforms/SLPVectorizer/X86/cmp_sel.ll
@@ -16,16 +16,16 @@ target triple = "x86_64-apple-macosx10.8.0"
;CHECK: ret i32 undef
define i32 @foo(double* noalias nocapture %A, double* noalias nocapture %B, double %G) {
entry:
- %arrayidx = getelementptr inbounds double* %B, i64 10
- %0 = load double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, double* %B, i64 10
+ %0 = load double, double* %arrayidx, align 8
%tobool = fcmp une double %0, 0.000000e+00
%cond = select i1 %tobool, double %G, double 1.000000e+00
store double %cond, double* %A, align 8
- %arrayidx2 = getelementptr inbounds double* %B, i64 11
- %1 = load double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, double* %B, i64 11
+ %1 = load double, double* %arrayidx2, align 8
%tobool3 = fcmp une double %1, 0.000000e+00
%cond7 = select i1 %tobool3, double %G, double 1.000000e+00
- %arrayidx8 = getelementptr inbounds double* %A, i64 1
+ %arrayidx8 = getelementptr inbounds double, double* %A, i64 1
store double %cond7, double* %arrayidx8, align 8
ret i32 undef
}
diff --git a/test/Transforms/SLPVectorizer/X86/compare-reduce.ll b/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
index 9653d18..3c457c4 100644
--- a/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
+++ b/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
@@ -21,14 +21,14 @@ entry:
for.body: ; preds = %for.inc, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%0 = shl nsw i64 %indvars.iv, 1
- %arrayidx = getelementptr inbounds double* %A, i64 %0
- %1 = load double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, double* %A, i64 %0
+ %1 = load double, double* %arrayidx, align 8
%mul1 = fmul double %conv, %1
%mul2 = fmul double %mul1, 7.000000e+00
%add = fadd double %mul2, 5.000000e+00
%2 = or i64 %0, 1
- %arrayidx6 = getelementptr inbounds double* %A, i64 %2
- %3 = load double* %arrayidx6, align 8
+ %arrayidx6 = getelementptr inbounds double, double* %A, i64 %2
+ %3 = load double, double* %arrayidx6, align 8
%mul8 = fmul double %conv, %3
%mul9 = fmul double %mul8, 4.000000e+00
%add10 = fadd double %mul9, 9.000000e+00
@@ -36,7 +36,7 @@ for.body: ; preds = %for.inc, %entry
br i1 %cmp11, label %if.then, label %for.inc
if.then: ; preds = %for.body
- %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([6 x i8]* @.str, i64 0, i64 0))
+ %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0))
br label %for.inc
for.inc: ; preds = %for.body, %if.then
diff --git a/test/Transforms/SLPVectorizer/X86/consecutive-access.ll b/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
index aa59429..1ad4d69 100644
--- a/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
+++ b/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
@@ -20,26 +20,26 @@ entry:
store i32 %u, i32* %u.addr, align 4
%mul = mul nsw i32 %u, 3
%idxprom = sext i32 %mul to i64
- %arrayidx = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom
- %0 = load double* %arrayidx, align 8
- %arrayidx4 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom
- %1 = load double* %arrayidx4, align 8
+ %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
+ %0 = load double, double* %arrayidx, align 8
+ %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
+ %1 = load double, double* %arrayidx4, align 8
%add5 = fadd double %0, %1
store double %add5, double* %arrayidx, align 8
%add11 = add nsw i32 %mul, 1
%idxprom12 = sext i32 %add11 to i64
- %arrayidx13 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom12
- %2 = load double* %arrayidx13, align 8
- %arrayidx17 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom12
- %3 = load double* %arrayidx17, align 8
+ %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
+ %2 = load double, double* %arrayidx13, align 8
+ %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
+ %3 = load double, double* %arrayidx17, align 8
%add18 = fadd double %2, %3
store double %add18, double* %arrayidx13, align 8
%add24 = add nsw i32 %mul, 2
%idxprom25 = sext i32 %add24 to i64
- %arrayidx26 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom25
- %4 = load double* %arrayidx26, align 8
- %arrayidx30 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom25
- %5 = load double* %arrayidx30, align 8
+ %arrayidx26 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom25
+ %4 = load double, double* %arrayidx26, align 8
+ %arrayidx30 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom25
+ %5 = load double, double* %arrayidx30, align 8
%add31 = fadd double %4, %5
store double %add31, double* %arrayidx26, align 8
ret void
@@ -57,18 +57,18 @@ entry:
store i32 %u, i32* %u.addr, align 4
%mul = mul nsw i32 %u, 2
%idxprom = sext i32 %mul to i64
- %arrayidx = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom
- %0 = load double* %arrayidx, align 8
- %arrayidx4 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom
- %1 = load double* %arrayidx4, align 8
+ %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
+ %0 = load double, double* %arrayidx, align 8
+ %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
+ %1 = load double, double* %arrayidx4, align 8
%add5 = fadd double %0, %1
store double %add5, double* %arrayidx, align 8
%add11 = add nsw i32 %mul, 1
%idxprom12 = sext i32 %add11 to i64
- %arrayidx13 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom12
- %2 = load double* %arrayidx13, align 8
- %arrayidx17 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom12
- %3 = load double* %arrayidx17, align 8
+ %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
+ %2 = load double, double* %arrayidx13, align 8
+ %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
+ %3 = load double, double* %arrayidx17, align 8
%add18 = fadd double %2, %3
store double %add18, double* %arrayidx13, align 8
ret void
@@ -84,34 +84,34 @@ entry:
store i32 %u, i32* %u.addr, align 4
%mul = mul nsw i32 %u, 4
%idxprom = sext i32 %mul to i64
- %arrayidx = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom
- %0 = load float* %arrayidx, align 4
- %arrayidx4 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom
- %1 = load float* %arrayidx4, align 4
+ %arrayidx = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom
+ %0 = load float, float* %arrayidx, align 4
+ %arrayidx4 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom
+ %1 = load float, float* %arrayidx4, align 4
%add5 = fadd float %0, %1
store float %add5, float* %arrayidx, align 4
%add11 = add nsw i32 %mul, 1
%idxprom12 = sext i32 %add11 to i64
- %arrayidx13 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom12
- %2 = load float* %arrayidx13, align 4
- %arrayidx17 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom12
- %3 = load float* %arrayidx17, align 4
+ %arrayidx13 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom12
+ %2 = load float, float* %arrayidx13, align 4
+ %arrayidx17 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom12
+ %3 = load float, float* %arrayidx17, align 4
%add18 = fadd float %2, %3
store float %add18, float* %arrayidx13, align 4
%add24 = add nsw i32 %mul, 2
%idxprom25 = sext i32 %add24 to i64
- %arrayidx26 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom25
- %4 = load float* %arrayidx26, align 4
- %arrayidx30 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom25
- %5 = load float* %arrayidx30, align 4
+ %arrayidx26 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom25
+ %4 = load float, float* %arrayidx26, align 4
+ %arrayidx30 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom25
+ %5 = load float, float* %arrayidx30, align 4
%add31 = fadd float %4, %5
store float %add31, float* %arrayidx26, align 4
%add37 = add nsw i32 %mul, 3
%idxprom38 = sext i32 %add37 to i64
- %arrayidx39 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom38
- %6 = load float* %arrayidx39, align 4
- %arrayidx43 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom38
- %7 = load float* %arrayidx43, align 4
+ %arrayidx39 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom38
+ %6 = load float, float* %arrayidx39, align 4
+ %arrayidx43 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom38
+ %7 = load float, float* %arrayidx43, align 4
%add44 = fadd float %6, %7
store float %add44, float* %arrayidx39, align 4
ret void
@@ -142,13 +142,13 @@ for.body: ; preds = %for.body.lr.ph, %fo
%1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ]
%mul = mul nsw i32 %0, 2
%idxprom = sext i32 %mul to i64
- %arrayidx = getelementptr inbounds double* %A, i64 %idxprom
- %2 = load double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom
+ %2 = load double, double* %arrayidx, align 8
%mul1 = fmul double 7.000000e+00, %2
%add = add nsw i32 %mul, 1
%idxprom3 = sext i32 %add to i64
- %arrayidx4 = getelementptr inbounds double* %A, i64 %idxprom3
- %3 = load double* %arrayidx4, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %A, i64 %idxprom3
+ %3 = load double, double* %arrayidx4, align 8
%mul5 = fmul double 7.000000e+00, %3
%add6 = fadd double %mul1, %mul5
%add7 = fadd double %1, %add6
diff --git a/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll b/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
index ed22574..ecae70e 100644
--- a/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
+++ b/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
@@ -9,21 +9,21 @@ target triple = "x86_64-apple-macosx10.8.0"
; CHECK: ret
define void @test1(double* %a, double* %b, double* %c, double* %d) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
%0 = bitcast double* %a to <4 x i32>*
- %1 = load <4 x i32>* %0, align 8
+ %1 = load <4 x i32>, <4 x i32>* %0, align 8
%2 = bitcast double* %b to <4 x i32>*
- %3 = load <4 x i32>* %2, align 8
+ %3 = load <4 x i32>, <4 x i32>* %2, align 8
%4 = mul <4 x i32> %1, %3
%5 = bitcast double* %d to <4 x i32>*
store <4 x i32> %4, <4 x i32>* %5, align 8
diff --git a/test/Transforms/SLPVectorizer/X86/crash_7zip.ll b/test/Transforms/SLPVectorizer/X86/crash_7zip.ll
index 51b1c08..54e5d5a 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_7zip.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_7zip.ll
@@ -8,8 +8,8 @@ target triple = "x86_64-apple-macosx10.8.0"
define fastcc void @LzmaDec_DecodeReal2(%struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p) {
entry:
- %range20.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 4
- %code21.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 5
+ %range20.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334, %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 4
+ %code21.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334, %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 5
br label %do.body66.i
do.body66.i: ; preds = %do.cond.i, %entry
diff --git a/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll b/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll
index dc99366..9046c35 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-apple-darwin13.3.0"
define i32 @fn1() {
entry:
- %init = load double* @a, align 8
+ %init = load double, double* @a, align 8
br label %loop
loop:
diff --git a/test/Transforms/SLPVectorizer/X86/crash_bullet.ll b/test/Transforms/SLPVectorizer/X86/crash_bullet.ll
index 3898921..1bad671 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_bullet.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_bullet.ll
@@ -13,8 +13,8 @@ if.then: ; preds = %entry
ret void
if.else: ; preds = %entry
- %m_numConstraintRows4 = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* %info, i64 0, i32 0
- %nub5 = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* %info, i64 0, i32 1
+ %m_numConstraintRows4 = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960", %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* %info, i64 0, i32 0
+ %nub5 = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960", %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* %info, i64 0, i32 1
br i1 undef, label %land.lhs.true.i.1, label %if.then7.1
land.lhs.true.i.1: ; preds = %if.else
@@ -43,9 +43,9 @@ for.inc.1: ; preds = %if.then7.1, %land.l
define void @_ZN30GIM_TRIANGLE_CALCULATION_CACHE18triangle_collisionERK9btVector3S2_S2_fS2_S2_S2_fR25GIM_TRIANGLE_CONTACT_DATA(%class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this) {
entry:
- %arrayidx26 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 1
- %arrayidx36 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 2
- %0 = load float* %arrayidx36, align 4
+ %arrayidx26 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332, %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 1
+ %arrayidx36 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332, %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 2
+ %0 = load float, float* %arrayidx36, align 4
%add587 = fadd float undef, undef
%sub600 = fsub float %add587, undef
store float %sub600, float* undef, align 4
diff --git a/test/Transforms/SLPVectorizer/X86/crash_bullet3.ll b/test/Transforms/SLPVectorizer/X86/crash_bullet3.ll
index 25c6545..8102769 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_bullet3.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_bullet3.ll
@@ -59,9 +59,9 @@ if.end332: ; preds = %if.then329, %if.end
%dy276.1 = phi float [ undef, %if.then329 ], [ undef, %if.end327 ], [ 0x3F847AE140000000, %if.then291 ]
%sub334 = fsub float %add294, %dx272.1
%sub338 = fsub float %add297, %dy276.1
- %arrayidx.i.i606 = getelementptr inbounds %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* %vertices, i64 0, i32 0, i64 0
+ %arrayidx.i.i606 = getelementptr inbounds %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113, %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* %vertices, i64 0, i32 0, i64 0
store float %sub334, float* %arrayidx.i.i606, align 4
- %arrayidx3.i607 = getelementptr inbounds %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* %vertices, i64 0, i32 0, i64 1
+ %arrayidx3.i607 = getelementptr inbounds %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113, %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* %vertices, i64 0, i32 0, i64 1
store float %sub338, float* %arrayidx3.i607, align 4
br label %return
diff --git a/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll b/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
index 18a96e5..f10c862 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
@@ -12,10 +12,10 @@ for.body:
%acc1.056 = phi float [ 0.000000e+00, %entry ], [ %add13, %for.body ]
%s1.055 = phi float [ 0.000000e+00, %entry ], [ %cond.i40, %for.body ]
%s0.054 = phi float [ 0.000000e+00, %entry ], [ %cond.i44, %for.body ]
- %arrayidx = getelementptr inbounds float* %src, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, float* %src, i64 %indvars.iv
+ %0 = load float, float* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx2 = getelementptr inbounds float* %dest, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %dest, i64 %indvars.iv
store float %acc1.056, float* %arrayidx2, align 4
%add = fadd float %s0.054, %0
%add3 = fadd float %s1.055, %0
diff --git a/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll b/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll
index ce01590..28b7aa3 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll
@@ -7,13 +7,13 @@ target triple = "x86_64-apple-macosx10.8.0"
; Function Attrs: nounwind ssp uwtable
define void @_ZSt6uniqueISt15_Deque_iteratorIdRdPdEET_S4_S4_(%"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* nocapture %__last) {
entry:
- %_M_cur2.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 0
- %0 = load double** %_M_cur2.i.i, align 8
- %_M_first3.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 1
- %_M_cur2.i.i81 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 0
- %1 = load double** %_M_cur2.i.i81, align 8
- %_M_first3.i.i83 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 1
- %2 = load double** %_M_first3.i.i83, align 8
+ %_M_cur2.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 0
+ %0 = load double*, double** %_M_cur2.i.i, align 8
+ %_M_first3.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 1
+ %_M_cur2.i.i81 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 0
+ %1 = load double*, double** %_M_cur2.i.i81, align 8
+ %_M_first3.i.i83 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 1
+ %2 = load double*, double** %_M_first3.i.i83, align 8
br i1 undef, label %_ZSt13adjacent_findISt15_Deque_iteratorIdRdPdEET_S4_S4_.exit, label %while.cond.i.preheader
while.cond.i.preheader: ; preds = %entry
diff --git a/test/Transforms/SLPVectorizer/X86/crash_gep.ll b/test/Transforms/SLPVectorizer/X86/crash_gep.ll
index dd4034c..bd1e8f7 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_gep.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_gep.ll
@@ -8,10 +8,10 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
define i32 @fn1() {
entry:
- %0 = load i64** @a, align 8
- %add.ptr = getelementptr inbounds i64* %0, i64 1
+ %0 = load i64*, i64** @a, align 8
+ %add.ptr = getelementptr inbounds i64, i64* %0, i64 1
%1 = ptrtoint i64* %add.ptr to i64
- %arrayidx = getelementptr inbounds i64* %0, i64 2
+ %arrayidx = getelementptr inbounds i64, i64* %0, i64 2
store i64 %1, i64* %arrayidx, align 8
%2 = ptrtoint i64* %arrayidx to i64
store i64 %2, i64* %add.ptr, align 8
diff --git a/test/Transforms/SLPVectorizer/X86/crash_lencod.ll b/test/Transforms/SLPVectorizer/X86/crash_lencod.ll
index c02e1fa..70b13fd 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_lencod.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_lencod.ll
@@ -68,23 +68,23 @@ if.end103: ; preds = %if.else, %if.then88
define void @intrapred_luma() {
entry:
%conv153 = trunc i32 undef to i16
- %arrayidx154 = getelementptr inbounds [13 x i16]* undef, i64 0, i64 12
+ %arrayidx154 = getelementptr inbounds [13 x i16], [13 x i16]* undef, i64 0, i64 12
store i16 %conv153, i16* %arrayidx154, align 8
- %arrayidx155 = getelementptr inbounds [13 x i16]* undef, i64 0, i64 11
+ %arrayidx155 = getelementptr inbounds [13 x i16], [13 x i16]* undef, i64 0, i64 11
store i16 %conv153, i16* %arrayidx155, align 2
- %arrayidx156 = getelementptr inbounds [13 x i16]* undef, i64 0, i64 10
+ %arrayidx156 = getelementptr inbounds [13 x i16], [13 x i16]* undef, i64 0, i64 10
store i16 %conv153, i16* %arrayidx156, align 4
ret void
}
define fastcc void @dct36(double* %inbuf) {
entry:
- %arrayidx41 = getelementptr inbounds double* %inbuf, i64 2
- %arrayidx44 = getelementptr inbounds double* %inbuf, i64 1
- %0 = load double* %arrayidx44, align 8
+ %arrayidx41 = getelementptr inbounds double, double* %inbuf, i64 2
+ %arrayidx44 = getelementptr inbounds double, double* %inbuf, i64 1
+ %0 = load double, double* %arrayidx44, align 8
%add46 = fadd double %0, undef
store double %add46, double* %arrayidx41, align 8
- %1 = load double* %inbuf, align 8
+ %1 = load double, double* %inbuf, align 8
%add49 = fadd double %1, %0
store double %add49, double* %arrayidx44, align 8
ret void
diff --git a/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll b/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
index d6915e2..f82343f 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
@@ -55,18 +55,18 @@ for.end48: ; preds = %for.end44
define void @zot(%struct.hoge* %arg) {
bb:
- %tmp = load double* undef, align 8
+ %tmp = load double, double* undef, align 8
%tmp1 = fsub double %tmp, undef
- %tmp2 = load double* undef, align 8
+ %tmp2 = load double, double* undef, align 8
%tmp3 = fsub double %tmp2, undef
%tmp4 = fmul double %tmp3, undef
%tmp5 = fmul double %tmp3, undef
%tmp6 = fsub double %tmp5, undef
- %tmp7 = getelementptr inbounds %struct.hoge* %arg, i64 0, i32 1
+ %tmp7 = getelementptr inbounds %struct.hoge, %struct.hoge* %arg, i64 0, i32 1
store double %tmp6, double* %tmp7, align 8
%tmp8 = fmul double %tmp1, undef
%tmp9 = fsub double %tmp8, undef
- %tmp10 = getelementptr inbounds %struct.hoge* %arg, i64 0, i32 2
+ %tmp10 = getelementptr inbounds %struct.hoge, %struct.hoge* %arg, i64 0, i32 2
store double %tmp9, double* %tmp10, align 8
br i1 undef, label %bb11, label %bb12
@@ -86,8 +86,8 @@ bb14: ; preds = %bb12, %bb11
define void @rc4_crypt(%struct.rc4_state.0.24* nocapture %s) {
entry:
- %x1 = getelementptr inbounds %struct.rc4_state.0.24* %s, i64 0, i32 0
- %y2 = getelementptr inbounds %struct.rc4_state.0.24* %s, i64 0, i32 1
+ %x1 = getelementptr inbounds %struct.rc4_state.0.24, %struct.rc4_state.0.24* %s, i64 0, i32 0
+ %y2 = getelementptr inbounds %struct.rc4_state.0.24, %struct.rc4_state.0.24* %s, i64 0, i32 1
br i1 undef, label %for.body, label %for.end
for.body: ; preds = %for.body, %entry
diff --git a/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll b/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll
index 8da3c34..e1df98d 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll
@@ -13,14 +13,14 @@ target triple = "x86_64-apple-macosx10.8.0"
define i32 @fn1() {
entry:
- %0 = load i32* getelementptr inbounds (%struct.DState* @b, i32 0, i32 0), align 4
- %1 = load i32* getelementptr inbounds (%struct.DState* @b, i32 0, i32 1), align 4
- %2 = load i32* @d, align 4
+ %0 = load i32, i32* getelementptr inbounds (%struct.DState, %struct.DState* @b, i32 0, i32 0), align 4
+ %1 = load i32, i32* getelementptr inbounds (%struct.DState, %struct.DState* @b, i32 0, i32 1), align 4
+ %2 = load i32, i32* @d, align 4
%cond = icmp eq i32 %2, 0
br i1 %cond, label %sw.bb, label %save_state_and_return
sw.bb: ; preds = %entry
- %3 = load i32* @c, align 4
+ %3 = load i32, i32* @c, align 4
%and = and i32 %3, 7
store i32 %and, i32* @a, align 4
switch i32 %and, label %if.end [
@@ -34,8 +34,8 @@ if.end: ; preds = %sw.bb
save_state_and_return: ; preds = %sw.bb, %sw.bb, %if.end, %entry
%t.0 = phi i32 [ 0, %if.end ], [ %0, %entry ], [ %0, %sw.bb ], [ %0, %sw.bb ]
%f.0 = phi i32 [ 0, %if.end ], [ %1, %entry ], [ 0, %sw.bb ], [ 0, %sw.bb ]
- store i32 %t.0, i32* getelementptr inbounds (%struct.DState* @b, i32 0, i32 0), align 4
- store i32 %f.0, i32* getelementptr inbounds (%struct.DState* @b, i32 0, i32 1), align 4
+ store i32 %t.0, i32* getelementptr inbounds (%struct.DState, %struct.DState* @b, i32 0, i32 0), align 4
+ store i32 %f.0, i32* getelementptr inbounds (%struct.DState, %struct.DState* @b, i32 0, i32 1), align 4
ret i32 undef
}
diff --git a/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll b/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll
index e6cc0f7..14a8b35 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll
@@ -25,11 +25,11 @@ for.body:
%p3.addr.0258 = phi double [ %add, %bb1 ], [ %add28, %for.body ]
%vecinit.i.i237 = insertelement <2 x double> undef, double %t.0259, i32 0
%x13 = tail call i32 @_xfn(<2 x double> %vecinit.i.i237) #2
- %arrayidx = getelementptr inbounds [256 x i32]* %tab1, i64 0, i64 %indvars.iv266
+ %arrayidx = getelementptr inbounds [256 x i32], [256 x i32]* %tab1, i64 0, i64 %indvars.iv266
store i32 %x13, i32* %arrayidx, align 4, !tbaa !4
%vecinit.i.i = insertelement <2 x double> undef, double %p3.addr.0258, i32 0
%x14 = tail call i32 @_xfn(<2 x double> %vecinit.i.i) #2
- %arrayidx26 = getelementptr inbounds [256 x i32]* %tab2, i64 0, i64 %indvars.iv266
+ %arrayidx26 = getelementptr inbounds [256 x i32], [256 x i32]* %tab2, i64 0, i64 %indvars.iv266
store i32 %x14, i32* %arrayidx26, align 4, !tbaa !4
%add27 = fadd double %mul19, %t.0259
%add28 = fadd double %mul21, %p3.addr.0258
diff --git a/test/Transforms/SLPVectorizer/X86/crash_sim4b1.ll b/test/Transforms/SLPVectorizer/X86/crash_sim4b1.ll
index 0541545..5a576c2 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_sim4b1.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_sim4b1.ll
@@ -34,13 +34,13 @@ land.rhs.lr.ph: ; preds = %if.then17
unreachable
if.end98: ; preds = %if.then17
- %from299 = getelementptr inbounds %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 1
+ %from299 = getelementptr inbounds %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171, %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 1
br i1 undef, label %land.lhs.true167, label %if.then103
if.then103: ; preds = %if.end98
%.sub100 = select i1 undef, i32 250, i32 undef
%mul114 = shl nsw i32 %.sub100, 2
- %from1115 = getelementptr inbounds %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 0
+ %from1115 = getelementptr inbounds %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171, %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 0
%cond125 = select i1 undef, i32 undef, i32 %mul114
br label %for.cond.i
diff --git a/test/Transforms/SLPVectorizer/X86/crash_smallpt.ll b/test/Transforms/SLPVectorizer/X86/crash_smallpt.ll
index 915c41b..273584c 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_smallpt.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_smallpt.ll
@@ -21,10 +21,10 @@ invoke.cont: ; preds = %invoke.cont, %cond.
br i1 undef, label %arrayctor.cont, label %invoke.cont
arrayctor.cont: ; preds = %invoke.cont
- %agg.tmp99208.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 0, i32 0
- %agg.tmp99208.sroa.1.8.idx388 = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 0, i32 1
- %agg.tmp101211.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 1, i32 0
- %agg.tmp101211.sroa.1.8.idx390 = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 1, i32 1
+ %agg.tmp99208.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416, %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 0, i32 0
+ %agg.tmp99208.sroa.1.8.idx388 = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416, %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 0, i32 1
+ %agg.tmp101211.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416, %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 1, i32 0
+ %agg.tmp101211.sroa.1.8.idx390 = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416, %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 1, i32 1
br label %for.cond36.preheader
for.cond36.preheader: ; preds = %_Z5clampd.exit.1, %arrayctor.cont
@@ -89,9 +89,9 @@ if.then38: ; preds = %entry
%add4.i698 = fadd double undef, %add4.i719
%mul.i.i679 = fmul double undef, %add.i695
%mul2.i.i680 = fmul double undef, %add4.i698
- %agg.tmp74663.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601* undef, i64 0, i32 1, i32 0
+ %agg.tmp74663.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601, %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601* undef, i64 0, i32 1, i32 0
store double %mul.i.i679, double* %agg.tmp74663.sroa.0.0.idx, align 8
- %agg.tmp74663.sroa.1.8.idx943 = getelementptr inbounds %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601* undef, i64 0, i32 1, i32 1
+ %agg.tmp74663.sroa.1.8.idx943 = getelementptr inbounds %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601, %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601* undef, i64 0, i32 1, i32 1
store double %mul2.i.i680, double* %agg.tmp74663.sroa.1.8.idx943, align 8
br label %return
diff --git a/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll b/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll
index 9f1fb71..45ca99a 100644
--- a/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll
+++ b/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll
@@ -17,12 +17,12 @@ target triple = "x86_64-apple-macosx10.9.0"
;define fastcc void @bar() {
define void @bar() {
- %1 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 0
- %2 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 1
- %3 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 0
- %4 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 1
- %5 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 0
- %6 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 1
+ %1 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 0
+ %2 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 1
+ %3 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 0
+ %4 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 1
+ %5 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 0
+ %6 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 1
br label %7
; <label>:7 ; preds = %18, %17, %17, %0
@@ -30,8 +30,8 @@ define void @bar() {
%9 = phi double [ 1.800000e+01, %0 ], [ %10, %18 ], [ %10, %17 ], [ %10, %17 ]
store double %9, double* %1, align 8
store double %8, double* %2, align 8
- %10 = load double* %3, align 8
- %11 = load double* %4, align 8
+ %10 = load double, double* %3, align 8
+ %11 = load double, double* %4, align 8
br i1 undef, label %12, label %13
; <label>:12 ; preds = %7
diff --git a/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll b/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll
index 06c4b52..ea0064d 100644
--- a/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll
+++ b/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll
@@ -26,9 +26,9 @@ target triple = "x86_64-apple-macosx10.8.0"
;CHECK: ret
define i32 @foo(double* nocapture %A, float* nocapture %B, i32 %g) {
entry:
- %0 = load float* %B, align 4
- %arrayidx1 = getelementptr inbounds float* %B, i64 1
- %1 = load float* %arrayidx1, align 4
+ %0 = load float, float* %B, align 4
+ %arrayidx1 = getelementptr inbounds float, float* %B, i64 1
+ %1 = load float, float* %arrayidx1, align 4
%add = fadd float %0, 5.000000e+00
%add2 = fadd float %1, 8.000000e+00
%tobool = icmp eq i32 %g, 0
@@ -40,12 +40,12 @@ if.then:
if.end:
%conv = fpext float %add to double
- %2 = load double* %A, align 8
+ %2 = load double, double* %A, align 8
%add4 = fadd double %conv, %2
store double %add4, double* %A, align 8
%conv5 = fpext float %add2 to double
- %arrayidx6 = getelementptr inbounds double* %A, i64 1
- %3 = load double* %arrayidx6, align 8
+ %arrayidx6 = getelementptr inbounds double, double* %A, i64 1
+ %3 = load double, double* %arrayidx6, align 8
%add7 = fadd double %conv5, %3
store double %add7, double* %arrayidx6, align 8
ret i32 undef
diff --git a/test/Transforms/SLPVectorizer/X86/cse.ll b/test/Transforms/SLPVectorizer/X86/cse.ll
index d2ad7eb..9f56e21 100644
--- a/test/Transforms/SLPVectorizer/X86/cse.ll
+++ b/test/Transforms/SLPVectorizer/X86/cse.ll
@@ -21,23 +21,23 @@ target triple = "i386-apple-macosx10.8.0"
define i32 @test(double* nocapture %G) {
entry:
- %arrayidx = getelementptr inbounds double* %G, i64 5
- %0 = load double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, double* %G, i64 5
+ %0 = load double, double* %arrayidx, align 8
%mul = fmul double %0, 4.000000e+00
%add = fadd double %mul, 1.000000e+00
store double %add, double* %G, align 8
- %arrayidx2 = getelementptr inbounds double* %G, i64 6
- %1 = load double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, double* %G, i64 6
+ %1 = load double, double* %arrayidx2, align 8
%mul3 = fmul double %1, 3.000000e+00
%add4 = fadd double %mul3, 6.000000e+00
- %arrayidx5 = getelementptr inbounds double* %G, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %G, i64 1
store double %add4, double* %arrayidx5, align 8
%add8 = fadd double %mul, 7.000000e+00
- %arrayidx9 = getelementptr inbounds double* %G, i64 2
+ %arrayidx9 = getelementptr inbounds double, double* %G, i64 2
store double %add8, double* %arrayidx9, align 8
%mul11 = fmul double %1, 4.000000e+00
%add12 = fadd double %mul11, 8.000000e+00
- %arrayidx13 = getelementptr inbounds double* %G, i64 3
+ %arrayidx13 = getelementptr inbounds double, double* %G, i64 3
store double %add12, double* %arrayidx13, align 8
ret i32 undef
}
@@ -55,26 +55,26 @@ entry:
;CHECK: ret
define i32 @foo(double* nocapture %A, i32 %n) {
entry:
- %0 = load double* %A, align 8
+ %0 = load double, double* %A, align 8
%mul = fmul double %0, 7.900000e+00
%conv = sitofp i32 %n to double
%mul1 = fmul double %conv, %mul
%add = fadd double %mul1, 6.000000e+00
store double %add, double* %A, align 8
- %arrayidx3 = getelementptr inbounds double* %A, i64 1
- %1 = load double* %arrayidx3, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
+ %1 = load double, double* %arrayidx3, align 8
%mul4 = fmul double %1, 7.700000e+00
%mul6 = fmul double %conv, %mul4
%add7 = fadd double %mul6, 2.000000e+00
store double %add7, double* %arrayidx3, align 8
- %arrayidx9 = getelementptr inbounds double* %A, i64 2
- %2 = load double* %arrayidx9, align 8
+ %arrayidx9 = getelementptr inbounds double, double* %A, i64 2
+ %2 = load double, double* %arrayidx9, align 8
%mul10 = fmul double %2, 7.600000e+00
%mul12 = fmul double %conv, %mul10
%add13 = fadd double %mul12, 3.000000e+00
store double %add13, double* %arrayidx9, align 8
- %arrayidx15 = getelementptr inbounds double* %A, i64 3
- %3 = load double* %arrayidx15, align 8
+ %arrayidx15 = getelementptr inbounds double, double* %A, i64 3
+ %3 = load double, double* %arrayidx15, align 8
%mul16 = fmul double %3, 7.400000e+00
%mul18 = fmul double %conv, %mul16
%add19 = fadd double %mul18, 4.000000e+00
@@ -101,31 +101,31 @@ entry:
; CHECK: ret
define i32 @test2(double* nocapture %G, i32 %k) {
%1 = icmp eq i32 %k, 0
- %2 = getelementptr inbounds double* %G, i64 5
- %3 = load double* %2, align 8
+ %2 = getelementptr inbounds double, double* %G, i64 5
+ %3 = load double, double* %2, align 8
%4 = fmul double %3, 4.000000e+00
br i1 %1, label %12, label %5
; <label>:5 ; preds = %0
%6 = fadd double %4, 1.000000e+00
store double %6, double* %G, align 8
- %7 = getelementptr inbounds double* %G, i64 6
- %8 = load double* %7, align 8
+ %7 = getelementptr inbounds double, double* %G, i64 6
+ %8 = load double, double* %7, align 8
%9 = fmul double %8, 3.000000e+00
%10 = fadd double %9, 6.000000e+00
- %11 = getelementptr inbounds double* %G, i64 1
+ %11 = getelementptr inbounds double, double* %G, i64 1
store double %10, double* %11, align 8
br label %20
; <label>:12 ; preds = %0
%13 = fadd double %4, 7.000000e+00
- %14 = getelementptr inbounds double* %G, i64 2
+ %14 = getelementptr inbounds double, double* %G, i64 2
store double %13, double* %14, align 8
- %15 = getelementptr inbounds double* %G, i64 6
- %16 = load double* %15, align 8
+ %15 = getelementptr inbounds double, double* %G, i64 6
+ %16 = load double, double* %15, align 8
%17 = fmul double %16, 3.000000e+00
%18 = fadd double %17, 8.000000e+00
- %19 = getelementptr inbounds double* %G, i64 3
+ %19 = getelementptr inbounds double, double* %G, i64 3
store double %18, double* %19, align 8
br label %20
@@ -147,26 +147,26 @@ define i32 @test2(double* nocapture %G, i32 %k) {
;CHECK: ret
define i32 @foo4(double* nocapture %A, i32 %n) {
entry:
- %0 = load double* %A, align 8
+ %0 = load double, double* %A, align 8
%mul = fmul double %0, 7.900000e+00
%conv = sitofp i32 %n to double
%mul1 = fmul double %conv, %mul
%add = fadd double %mul1, 6.000000e+00
store double %add, double* %A, align 8
- %arrayidx3 = getelementptr inbounds double* %A, i64 1
- %1 = load double* %arrayidx3, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
+ %1 = load double, double* %arrayidx3, align 8
%mul4 = fmul double %1, 7.900000e+00
%mul6 = fmul double %conv, %mul4
%add7 = fadd double %mul6, 6.000000e+00
store double %add7, double* %arrayidx3, align 8
- %arrayidx9 = getelementptr inbounds double* %A, i64 2
- %2 = load double* %arrayidx9, align 8
+ %arrayidx9 = getelementptr inbounds double, double* %A, i64 2
+ %2 = load double, double* %arrayidx9, align 8
%mul10 = fmul double %2, 7.900000e+00
%mul12 = fmul double %conv, %mul10
%add13 = fadd double %mul12, 6.000000e+00
store double %add13, double* %arrayidx9, align 8
- %arrayidx15 = getelementptr inbounds double* %A, i64 3
- %3 = load double* %arrayidx15, align 8
+ %arrayidx15 = getelementptr inbounds double, double* %A, i64 3
+ %3 = load double, double* %arrayidx15, align 8
%mul16 = fmul double %3, 7.900000e+00
%mul18 = fmul double %conv, %mul16
%add19 = fadd double %mul18, 6.000000e+00
@@ -189,24 +189,24 @@ entry:
;CHECK: ret
define i32 @partial_mrg(double* nocapture %A, i32 %n) {
entry:
- %0 = load double* %A, align 8
+ %0 = load double, double* %A, align 8
%conv = sitofp i32 %n to double
%mul = fmul double %conv, %0
store double %mul, double* %A, align 8
- %arrayidx2 = getelementptr inbounds double* %A, i64 1
- %1 = load double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, double* %A, i64 1
+ %1 = load double, double* %arrayidx2, align 8
%mul4 = fmul double %conv, %1
store double %mul4, double* %arrayidx2, align 8
%cmp = icmp slt i32 %n, 4
br i1 %cmp, label %return, label %if.end
if.end: ; preds = %entry
- %arrayidx7 = getelementptr inbounds double* %A, i64 2
- %2 = load double* %arrayidx7, align 8
+ %arrayidx7 = getelementptr inbounds double, double* %A, i64 2
+ %2 = load double, double* %arrayidx7, align 8
%mul9 = fmul double %conv, %2
store double %mul9, double* %arrayidx7, align 8
- %arrayidx11 = getelementptr inbounds double* %A, i64 3
- %3 = load double* %arrayidx11, align 8
+ %arrayidx11 = getelementptr inbounds double, double* %A, i64 3
+ %3 = load double, double* %arrayidx11, align 8
%add = add nsw i32 %n, 4
%conv12 = sitofp i32 %add to double
%mul13 = fmul double %conv12, %3
@@ -227,19 +227,19 @@ entry:
br i1 undef, label %if.end13, label %if.end13
sw.epilog7: ; No predecessors!
- %.in = getelementptr inbounds %class.B.53.55* %this, i64 0, i32 0, i32 1
- %0 = load double* %.in, align 8
+ %.in = getelementptr inbounds %class.B.53.55, %class.B.53.55* %this, i64 0, i32 0, i32 1
+ %0 = load double, double* %.in, align 8
%add = fadd double undef, 0.000000e+00
%add6 = fadd double %add, %0
- %1 = load double* @a, align 8
+ %1 = load double, double* @a, align 8
%add8 = fadd double %1, 0.000000e+00
- %_dy = getelementptr inbounds %class.B.53.55* %this, i64 0, i32 0, i32 2
- %2 = load double* %_dy, align 8
+ %_dy = getelementptr inbounds %class.B.53.55, %class.B.53.55* %this, i64 0, i32 0, i32 2
+ %2 = load double, double* %_dy, align 8
%add10 = fadd double %add8, %2
br i1 undef, label %if.then12, label %if.end13
if.then12: ; preds = %sw.epilog7
- %3 = load double* undef, align 8
+ %3 = load double, double* undef, align 8
br label %if.end13
if.end13: ; preds = %if.then12, %sw.epilog7, %entry
diff --git a/test/Transforms/SLPVectorizer/X86/cycle_dup.ll b/test/Transforms/SLPVectorizer/X86/cycle_dup.ll
index bac2c3c..0a4e961 100644
--- a/test/Transforms/SLPVectorizer/X86/cycle_dup.ll
+++ b/test/Transforms/SLPVectorizer/X86/cycle_dup.ll
@@ -23,15 +23,15 @@ target triple = "x86_64-apple-macosx10.9.0"
;CHECK-NEXT:ret i32 undef
define i32 @foo(i32* nocapture %A) #0 {
entry:
- %0 = load i32* %A, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 1
- %1 = load i32* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds i32* %A, i64 2
- %2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %A, i64 3
- %3 = load i32* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 13
- %4 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %A, align 4
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 1
+ %1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 2
+ %2 = load i32, i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 3
+ %3 = load i32, i32* %arrayidx3, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 13
+ %4 = load i32, i32* %arrayidx4, align 4
%cmp24 = icmp sgt i32 %4, 0
br i1 %cmp24, label %for.body, label %for.end
diff --git a/test/Transforms/SLPVectorizer/X86/debug_info.ll b/test/Transforms/SLPVectorizer/X86/debug_info.ll
index 21f51d7..ccacbcc 100644
--- a/test/Transforms/SLPVectorizer/X86/debug_info.ll
+++ b/test/Transforms/SLPVectorizer/X86/debug_info.ll
@@ -15,7 +15,7 @@ target triple = "x86_64-apple-macosx10.7.0"
;CHECK: @depth
;CHECK: getelementptr inbounds {{.*}}, !dbg ![[LOC:[0-9]+]]
;CHECK: bitcast double* {{.*}}, !dbg ![[LOC]]
-;CHECK: load <2 x double>* {{.*}}, !dbg ![[LOC]]
+;CHECK: load <2 x double>, <2 x double>* {{.*}}, !dbg ![[LOC]]
;CHECK: store <2 x double> {{.*}}, !dbg ![[LOC2:[0-9]+]]
;CHECK: ret
;CHECK: ![[LOC]] = !MDLocation(line: 4, scope:
@@ -23,27 +23,27 @@ target triple = "x86_64-apple-macosx10.7.0"
define i32 @depth(double* nocapture %A, i32 %m) #0 {
entry:
- tail call void @llvm.dbg.value(metadata double* %A, i64 0, metadata !12, metadata !{}), !dbg !19
- tail call void @llvm.dbg.value(metadata i32 %m, i64 0, metadata !13, metadata !{}), !dbg !19
- tail call void @llvm.dbg.value(metadata i32 00, i64 0, metadata !14, metadata !{}), !dbg !21
- tail call void @llvm.dbg.value(metadata i32 02, i64 0, metadata !15, metadata !{}), !dbg !21
- tail call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !16, metadata !{}), !dbg !23
+ tail call void @llvm.dbg.value(metadata double* %A, i64 0, metadata !12, metadata !MDExpression()), !dbg !19
+ tail call void @llvm.dbg.value(metadata i32 %m, i64 0, metadata !13, metadata !MDExpression()), !dbg !19
+ tail call void @llvm.dbg.value(metadata i32 00, i64 0, metadata !14, metadata !MDExpression()), !dbg !21
+ tail call void @llvm.dbg.value(metadata i32 02, i64 0, metadata !15, metadata !MDExpression()), !dbg !21
+ tail call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !16, metadata !MDExpression()), !dbg !23
%cmp8 = icmp sgt i32 %m, 0, !dbg !23
br i1 %cmp8, label %for.body.lr.ph, label %for.end, !dbg !23
for.body.lr.ph: ; preds = %entry
- %arrayidx = getelementptr inbounds double* %A, i64 4, !dbg !24
- %0 = load double* %arrayidx, align 8, !dbg !24
- %arrayidx1 = getelementptr inbounds double* %A, i64 5, !dbg !29
- %1 = load double* %arrayidx1, align 8, !dbg !29
+ %arrayidx = getelementptr inbounds double, double* %A, i64 4, !dbg !24
+ %0 = load double, double* %arrayidx, align 8, !dbg !24
+ %arrayidx1 = getelementptr inbounds double, double* %A, i64 5, !dbg !29
+ %1 = load double, double* %arrayidx1, align 8, !dbg !29
br label %for.end, !dbg !23
for.end: ; preds = %for.body.lr.ph, %entry
%y1.0.lcssa = phi double [ %1, %for.body.lr.ph ], [ 1.000000e+00, %entry ]
%y0.0.lcssa = phi double [ %0, %for.body.lr.ph ], [ 0.000000e+00, %entry ]
- %arrayidx2 = getelementptr inbounds double* %A, i64 8, !dbg !30
+ %arrayidx2 = getelementptr inbounds double, double* %A, i64 8, !dbg !30
store double %y0.0.lcssa, double* %arrayidx2, align 8, !dbg !30
- %arrayidx3 = getelementptr inbounds double* %A, i64 9, !dbg !30
+ %arrayidx3 = getelementptr inbounds double, double* %A, i64 9, !dbg !30
store double %y1.0.lcssa, double* %arrayidx3, align 8, !dbg !30
ret i32 undef, !dbg !31
}
@@ -57,24 +57,24 @@ attributes #1 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!18, !32}
-!0 = !{!"0x11\0012\00clang version 3.4 (trunk 187335) (llvm/trunk 187335:187340M)\001\00\000\00\000", !1, !2, !2, !3, !2, !2} ; [ DW_TAG_compile_unit ] [/Users/nadav/file.c] [DW_LANG_C99]
-!1 = !{!"file.c", !"/Users/nadav"}
+!0 = !MDCompileUnit(language: DW_LANG_C99, producer: "clang version 3.4 (trunk 187335) (llvm/trunk 187335:187340M)", isOptimized: true, emissionKind: 0, file: !1, enums: !2, retainedTypes: !2, subprograms: !3, globals: !2, imports: !2)
+!1 = !MDFile(filename: "file.c", directory: "/Users/nadav")
!2 = !{i32 0}
!3 = !{!4}
-!4 = !{!"0x2e\00depth\00depth\00\001\000\001\000\006\00256\001\001", !1, !5, !6, null, i32 (double*, i32)* @depth, null, null, !11} ; [ DW_TAG_subprogram ] [line 1] [def] [depth]
-!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/Users/nadav/file.c]
-!6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !MDSubprogram(name: "depth", line: 1, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, scopeLine: 1, file: !1, scope: !5, type: !6, function: i32 (double*, i32)* @depth, variables: !11)
+!5 = !MDFile(filename: "file.c", directory: "/Users/nadav")
+!6 = !MDSubroutineType(types: !7)
!7 = !{!8, !9, !8}
-!8 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!9 = !{!"0xf\00\000\0064\0064\000\000", null, null, !10} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from double]
-!10 = !{!"0x24\00double\000\0064\0064\000\000\004", null, null} ; [ DW_TAG_base_type ] [double] [line 0, size 64, align 64, offset 0, enc DW_ATE_float]
+!8 = !MDBasicType(tag: DW_TAG_base_type, name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+!9 = !MDDerivedType(tag: DW_TAG_pointer_type, size: 64, align: 64, baseType: !10)
+!10 = !MDBasicType(tag: DW_TAG_base_type, name: "double", size: 64, align: 64, encoding: DW_ATE_float)
!11 = !{!12, !13, !14, !15, !16}
-!12 = !{!"0x101\00A\0016777217\000", !4, !5, !9} ; [ DW_TAG_arg_variable ] [A] [line 1]
-!13 = !{!"0x101\00m\0033554433\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [m] [line 1]
-!14 = !{!"0x100\00y0\002\000", !4, !5, !10} ; [ DW_TAG_auto_variable ] [y0] [line 2]
-!15 = !{!"0x100\00y1\002\000", !4, !5, !10} ; [ DW_TAG_auto_variable ] [y1] [line 2]
-!16 = !{!"0x100\00i\003\000", !17, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 3]
-!17 = !{!"0xb\003\000\000", !1, !4} ; [ DW_TAG_lexical_block ] [/Users/nadav/file.c]
+!12 = !MDLocalVariable(tag: DW_TAG_arg_variable, name: "A", line: 1, arg: 1, scope: !4, file: !5, type: !9)
+!13 = !MDLocalVariable(tag: DW_TAG_arg_variable, name: "m", line: 1, arg: 2, scope: !4, file: !5, type: !8)
+!14 = !MDLocalVariable(tag: DW_TAG_auto_variable, name: "y0", line: 2, scope: !4, file: !5, type: !10)
+!15 = !MDLocalVariable(tag: DW_TAG_auto_variable, name: "y1", line: 2, scope: !4, file: !5, type: !10)
+!16 = !MDLocalVariable(tag: DW_TAG_auto_variable, name: "i", line: 3, scope: !17, file: !5, type: !8)
+!17 = distinct !MDLexicalBlock(line: 3, column: 0, file: !1, scope: !4)
!18 = !{i32 2, !"Dwarf Version", i32 2}
!19 = !MDLocation(line: 1, scope: !4)
!20 = !{double 0.000000e+00}
@@ -82,8 +82,8 @@ attributes #1 = { nounwind readnone }
!22 = !{double 1.000000e+00}
!23 = !MDLocation(line: 3, scope: !17)
!24 = !MDLocation(line: 4, scope: !25)
-!25 = !{!"0xb\003\000\001", !1, !17} ; [ DW_TAG_lexical_block ] [/Users/nadav/file.c]
+!25 = distinct !MDLexicalBlock(line: 3, column: 0, file: !1, scope: !17)
!29 = !MDLocation(line: 5, scope: !25)
!30 = !MDLocation(line: 7, scope: !4)
!31 = !MDLocation(line: 8, scope: !4)
-!32 = !{i32 1, !"Debug Info Version", i32 2}
+!32 = !{i32 1, !"Debug Info Version", i32 3}
diff --git a/test/Transforms/SLPVectorizer/X86/diamond.ll b/test/Transforms/SLPVectorizer/X86/diamond.ll
index 5135a92..4e2c02f 100644
--- a/test/Transforms/SLPVectorizer/X86/diamond.ll
+++ b/test/Transforms/SLPVectorizer/X86/diamond.ll
@@ -18,24 +18,24 @@ target triple = "x86_64-apple-macosx10.8.0"
; CHECK: ret
define i32 @foo(i32* noalias nocapture %B, i32* noalias nocapture %A, i32 %n, i32 %m) #0 {
entry:
- %0 = load i32* %A, align 4
+ %0 = load i32, i32* %A, align 4
%mul238 = add i32 %m, %n
%add = mul i32 %0, %mul238
store i32 %add, i32* %B, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 1
- %1 = load i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
+ %1 = load i32, i32* %arrayidx4, align 4
%add8 = mul i32 %1, %mul238
- %arrayidx9 = getelementptr inbounds i32* %B, i64 1
+ %arrayidx9 = getelementptr inbounds i32, i32* %B, i64 1
store i32 %add8, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %A, i64 2
- %2 = load i32* %arrayidx10, align 4
+ %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 2
+ %2 = load i32, i32* %arrayidx10, align 4
%add14 = mul i32 %2, %mul238
- %arrayidx15 = getelementptr inbounds i32* %B, i64 2
+ %arrayidx15 = getelementptr inbounds i32, i32* %B, i64 2
store i32 %add14, i32* %arrayidx15, align 4
- %arrayidx16 = getelementptr inbounds i32* %A, i64 3
- %3 = load i32* %arrayidx16, align 4
+ %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 3
+ %3 = load i32, i32* %arrayidx16, align 4
%add20 = mul i32 %3, %mul238
- %arrayidx21 = getelementptr inbounds i32* %B, i64 3
+ %arrayidx21 = getelementptr inbounds i32, i32* %B, i64 3
store i32 %add20, i32* %arrayidx21, align 4
ret i32 0
}
@@ -56,24 +56,24 @@ entry:
; CHECK-NEXT: ret
define i32 @extr_user(i32* noalias nocapture %B, i32* noalias nocapture %A, i32 %n, i32 %m) {
entry:
- %0 = load i32* %A, align 4
+ %0 = load i32, i32* %A, align 4
%mul238 = add i32 %m, %n
%add = mul i32 %0, %mul238
store i32 %add, i32* %B, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 1
- %1 = load i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
+ %1 = load i32, i32* %arrayidx4, align 4
%add8 = mul i32 %1, %mul238
- %arrayidx9 = getelementptr inbounds i32* %B, i64 1
+ %arrayidx9 = getelementptr inbounds i32, i32* %B, i64 1
store i32 %add8, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %A, i64 2
- %2 = load i32* %arrayidx10, align 4
+ %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 2
+ %2 = load i32, i32* %arrayidx10, align 4
%add14 = mul i32 %2, %mul238
- %arrayidx15 = getelementptr inbounds i32* %B, i64 2
+ %arrayidx15 = getelementptr inbounds i32, i32* %B, i64 2
store i32 %add14, i32* %arrayidx15, align 4
- %arrayidx16 = getelementptr inbounds i32* %A, i64 3
- %3 = load i32* %arrayidx16, align 4
+ %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 3
+ %3 = load i32, i32* %arrayidx16, align 4
%add20 = mul i32 %3, %mul238
- %arrayidx21 = getelementptr inbounds i32* %B, i64 3
+ %arrayidx21 = getelementptr inbounds i32, i32* %B, i64 3
store i32 %add20, i32* %arrayidx21, align 4
ret i32 %0 ;<--------- This value has multiple users
}
@@ -86,24 +86,24 @@ entry:
; CHECK-NEXT: ret
define i32 @extr_user1(i32* noalias nocapture %B, i32* noalias nocapture %A, i32 %n, i32 %m) {
entry:
- %0 = load i32* %A, align 4
+ %0 = load i32, i32* %A, align 4
%mul238 = add i32 %m, %n
%add = mul i32 %0, %mul238
store i32 %add, i32* %B, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 1
- %1 = load i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
+ %1 = load i32, i32* %arrayidx4, align 4
%add8 = mul i32 %1, %mul238
- %arrayidx9 = getelementptr inbounds i32* %B, i64 1
+ %arrayidx9 = getelementptr inbounds i32, i32* %B, i64 1
store i32 %add8, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %A, i64 2
- %2 = load i32* %arrayidx10, align 4
+ %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 2
+ %2 = load i32, i32* %arrayidx10, align 4
%add14 = mul i32 %2, %mul238
- %arrayidx15 = getelementptr inbounds i32* %B, i64 2
+ %arrayidx15 = getelementptr inbounds i32, i32* %B, i64 2
store i32 %add14, i32* %arrayidx15, align 4
- %arrayidx16 = getelementptr inbounds i32* %A, i64 3
- %3 = load i32* %arrayidx16, align 4
+ %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 3
+ %3 = load i32, i32* %arrayidx16, align 4
%add20 = mul i32 %3, %mul238
- %arrayidx21 = getelementptr inbounds i32* %B, i64 3
+ %arrayidx21 = getelementptr inbounds i32, i32* %B, i64 3
store i32 %add20, i32* %arrayidx21, align 4
ret i32 %1 ;<--------- This value has multiple users
}
diff --git a/test/Transforms/SLPVectorizer/X86/external_user.ll b/test/Transforms/SLPVectorizer/X86/external_user.ll
index 6d09aa6..bf2febd 100644
--- a/test/Transforms/SLPVectorizer/X86/external_user.ll
+++ b/test/Transforms/SLPVectorizer/X86/external_user.ll
@@ -33,9 +33,9 @@ target triple = "x86_64-apple-macosx10.8.0"
define double @ext_user(double* noalias nocapture %B, double* noalias nocapture %A, i32 %n, i32 %m) {
entry:
- %arrayidx = getelementptr inbounds double* %A, i64 1
- %0 = load double* %arrayidx, align 8
- %1 = load double* %A, align 8
+ %arrayidx = getelementptr inbounds double, double* %A, i64 1
+ %0 = load double, double* %arrayidx, align 8
+ %1 = load double, double* %A, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
@@ -54,7 +54,7 @@ for.body: ; preds = %for.body, %entry
for.end: ; preds = %for.body
store double %add5, double* %B, align 8
- %arrayidx7 = getelementptr inbounds double* %B, i64 1
+ %arrayidx7 = getelementptr inbounds double, double* %B, i64 1
store double %add4, double* %arrayidx7, align 8
ret double %mul3
}
@@ -69,9 +69,9 @@ for.end: ; preds = %for.body
define i32 @needtogather(double *noalias %a, i32 *noalias %b, float * noalias %c,
i32 * noalias %d) {
entry:
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv = sitofp i32 %0 to float
- %1 = load float* %c
+ %1 = load float, float* %c
%sub = fsub float 0.000000e+00, %1
%mul = fmul float %sub, 0.000000e+00
%add = fadd float %conv, %mul
diff --git a/test/Transforms/SLPVectorizer/X86/extract.ll b/test/Transforms/SLPVectorizer/X86/extract.ll
index f611fd4..9a6ee2a 100644
--- a/test/Transforms/SLPVectorizer/X86/extract.ll
+++ b/test/Transforms/SLPVectorizer/X86/extract.ll
@@ -9,11 +9,11 @@ target triple = "x86_64-apple-macosx10.8.0"
;CHECK: ret void
define void @fextr(double* %ptr) {
entry:
- %LD = load <2 x double>* undef
+ %LD = load <2 x double>, <2 x double>* undef
%V0 = extractelement <2 x double> %LD, i32 0
%V1 = extractelement <2 x double> %LD, i32 1
- %P0 = getelementptr inbounds double* %ptr, i64 0
- %P1 = getelementptr inbounds double* %ptr, i64 1
+ %P0 = getelementptr inbounds double, double* %ptr, i64 0
+ %P1 = getelementptr inbounds double, double* %ptr, i64 1
%A0 = fadd double %V0, 0.0
%A1 = fadd double %V1, 1.1
store double %A0, double* %P0, align 4
@@ -27,11 +27,11 @@ entry:
;CHECK: ret void
define void @fextr1(double* %ptr) {
entry:
- %LD = load <2 x double>* undef
+ %LD = load <2 x double>, <2 x double>* undef
%V0 = extractelement <2 x double> %LD, i32 0
%V1 = extractelement <2 x double> %LD, i32 1
- %P0 = getelementptr inbounds double* %ptr, i64 1 ; <--- incorrect order
- %P1 = getelementptr inbounds double* %ptr, i64 0
+ %P0 = getelementptr inbounds double, double* %ptr, i64 1 ; <--- incorrect order
+ %P1 = getelementptr inbounds double, double* %ptr, i64 0
%A0 = fadd double %V0, 1.2
%A1 = fadd double %V1, 3.4
store double %A0, double* %P0, align 4
@@ -45,11 +45,11 @@ entry:
;CHECK: ret void
define void @fextr2(double* %ptr) {
entry:
- %LD = load <4 x double>* undef
+ %LD = load <4 x double>, <4 x double>* undef
%V0 = extractelement <4 x double> %LD, i32 0 ; <--- invalid size.
%V1 = extractelement <4 x double> %LD, i32 1
- %P0 = getelementptr inbounds double* %ptr, i64 0
- %P1 = getelementptr inbounds double* %ptr, i64 1
+ %P0 = getelementptr inbounds double, double* %ptr, i64 0
+ %P1 = getelementptr inbounds double, double* %ptr, i64 1
%A0 = fadd double %V0, 5.5
%A1 = fadd double %V1, 6.6
store double %A0, double* %P0, align 4
diff --git a/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll b/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll
index 3628042..6e5415b 100644
--- a/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll
+++ b/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll
@@ -7,13 +7,13 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; Function Attrs: nounwind ssp uwtable
define i32 @fn1() {
entry:
- %0 = load i64** @a, align 8
- %add.ptr = getelementptr inbounds i64* %0, i64 11
+ %0 = load i64*, i64** @a, align 8
+ %add.ptr = getelementptr inbounds i64, i64* %0, i64 11
%1 = ptrtoint i64* %add.ptr to i64
store i64 %1, i64* %add.ptr, align 8
- %add.ptr1 = getelementptr inbounds i64* %0, i64 56
+ %add.ptr1 = getelementptr inbounds i64, i64* %0, i64 56
%2 = ptrtoint i64* %add.ptr1 to i64
- %arrayidx2 = getelementptr inbounds i64* %0, i64 12
+ %arrayidx2 = getelementptr inbounds i64, i64* %0, i64 12
store i64 %2, i64* %arrayidx2, align 8
ret i32 undef
; CHECK-LABEL: @fn1(
@@ -25,42 +25,42 @@ entry:
declare float @llvm.powi.f32(float, i32)
define void @fn2(i32* %a, i32* %b, float* %c) {
entry:
- %i0 = load i32* %a, align 4
- %i1 = load i32* %b, align 4
+ %i0 = load i32, i32* %a, align 4
+ %i1 = load i32, i32* %b, align 4
%add1 = add i32 %i0, %i1
%fp1 = sitofp i32 %add1 to float
%call1 = tail call float @llvm.powi.f32(float %fp1,i32 %add1) nounwind readnone
- %arrayidx2 = getelementptr inbounds i32* %a, i32 1
- %i2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i32 1
- %i3 = load i32* %arrayidx3, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
+ %i2 = load i32, i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
+ %i3 = load i32, i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%fp2 = sitofp i32 %add2 to float
%call2 = tail call float @llvm.powi.f32(float %fp2,i32 %add1) nounwind readnone
- %arrayidx4 = getelementptr inbounds i32* %a, i32 2
- %i4 = load i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i32* %b, i32 2
- %i5 = load i32* %arrayidx5, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
+ %i4 = load i32, i32* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
+ %i5 = load i32, i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%fp3 = sitofp i32 %add3 to float
%call3 = tail call float @llvm.powi.f32(float %fp3,i32 %add1) nounwind readnone
- %arrayidx6 = getelementptr inbounds i32* %a, i32 3
- %i6 = load i32* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds i32* %b, i32 3
- %i7 = load i32* %arrayidx7, align 4
+ %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
+ %i6 = load i32, i32* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
+ %i7 = load i32, i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%fp4 = sitofp i32 %add4 to float
%call4 = tail call float @llvm.powi.f32(float %fp4,i32 %add1) nounwind readnone
store float %call1, float* %c, align 4
- %arrayidx8 = getelementptr inbounds float* %c, i32 1
+ %arrayidx8 = getelementptr inbounds float, float* %c, i32 1
store float %call2, float* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds float* %c, i32 2
+ %arrayidx9 = getelementptr inbounds float, float* %c, i32 2
store float %call3, float* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds float* %c, i32 3
+ %arrayidx10 = getelementptr inbounds float, float* %c, i32 3
store float %call4, float* %arrayidx10, align 4
ret void
diff --git a/test/Transforms/SLPVectorizer/X86/extractcost.ll b/test/Transforms/SLPVectorizer/X86/extractcost.ll
index 01baf66..164ddf3 100644
--- a/test/Transforms/SLPVectorizer/X86/extractcost.ll
+++ b/test/Transforms/SLPVectorizer/X86/extractcost.ll
@@ -13,15 +13,15 @@ entry:
store i32 %add, i32* %A, align 4
%mul1 = mul nsw i32 %n, 9
%add2 = add nsw i32 %mul1, 9
- %arrayidx3 = getelementptr inbounds i32* %A, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 1
store i32 %add2, i32* %arrayidx3, align 4
%mul4 = shl i32 %n, 3
%add5 = add nsw i32 %mul4, 9
- %arrayidx6 = getelementptr inbounds i32* %A, i64 2
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 2
store i32 %add5, i32* %arrayidx6, align 4
%mul7 = mul nsw i32 %n, 10
%add8 = add nsw i32 %mul7, 9
- %arrayidx9 = getelementptr inbounds i32* %A, i64 3
+ %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 3
store i32 %add8, i32* %arrayidx9, align 4
%externaluse1 = add nsw i32 %add, %m
%externaluse2 = mul nsw i32 %add, %m ; we should add the extract cost only once and the store will be vectorized
diff --git a/test/Transforms/SLPVectorizer/X86/flag.ll b/test/Transforms/SLPVectorizer/X86/flag.ll
index 3ca5407..7db8d75 100644
--- a/test/Transforms/SLPVectorizer/X86/flag.ll
+++ b/test/Transforms/SLPVectorizer/X86/flag.ll
@@ -15,17 +15,17 @@ define i32 @rollable(i32* noalias nocapture %in, i32* noalias nocapture %out, i6
.lr.ph: ; preds = %0, %.lr.ph
%i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
%2 = shl i64 %i.019, 2
- %3 = getelementptr inbounds i32* %in, i64 %2
- %4 = load i32* %3, align 4
+ %3 = getelementptr inbounds i32, i32* %in, i64 %2
+ %4 = load i32, i32* %3, align 4
%5 = or i64 %2, 1
- %6 = getelementptr inbounds i32* %in, i64 %5
- %7 = load i32* %6, align 4
+ %6 = getelementptr inbounds i32, i32* %in, i64 %5
+ %7 = load i32, i32* %6, align 4
%8 = or i64 %2, 2
- %9 = getelementptr inbounds i32* %in, i64 %8
- %10 = load i32* %9, align 4
+ %9 = getelementptr inbounds i32, i32* %in, i64 %8
+ %10 = load i32, i32* %9, align 4
%11 = or i64 %2, 3
- %12 = getelementptr inbounds i32* %in, i64 %11
- %13 = load i32* %12, align 4
+ %12 = getelementptr inbounds i32, i32* %in, i64 %11
+ %13 = load i32, i32* %12, align 4
%14 = mul i32 %4, 7
%15 = add i32 %14, 7
%16 = mul i32 %7, 7
@@ -34,13 +34,13 @@ define i32 @rollable(i32* noalias nocapture %in, i32* noalias nocapture %out, i6
%19 = add i32 %18, 21
%20 = mul i32 %13, 7
%21 = add i32 %20, 28
- %22 = getelementptr inbounds i32* %out, i64 %2
+ %22 = getelementptr inbounds i32, i32* %out, i64 %2
store i32 %15, i32* %22, align 4
- %23 = getelementptr inbounds i32* %out, i64 %5
+ %23 = getelementptr inbounds i32, i32* %out, i64 %5
store i32 %17, i32* %23, align 4
- %24 = getelementptr inbounds i32* %out, i64 %8
+ %24 = getelementptr inbounds i32, i32* %out, i64 %8
store i32 %19, i32* %24, align 4
- %25 = getelementptr inbounds i32* %out, i64 %11
+ %25 = getelementptr inbounds i32, i32* %out, i64 %11
store i32 %21, i32* %25, align 4
%26 = add i64 %i.019, 1
%exitcond = icmp eq i64 %26, %n
diff --git a/test/Transforms/SLPVectorizer/X86/gep.ll b/test/Transforms/SLPVectorizer/X86/gep.ll
index 9e105ec..3f952d7 100644
--- a/test/Transforms/SLPVectorizer/X86/gep.ll
+++ b/test/Transforms/SLPVectorizer/X86/gep.ll
@@ -9,15 +9,15 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; CHECK-LABEL: foo1
; CHECK: <2 x i32*>
define void @foo1 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y) {
- %1 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 0
- %2 = load i32** %1, align 8
- %3 = getelementptr inbounds i32* %2, i64 16
- %4 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 0
+ %1 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 0
+ %2 = load i32*, i32** %1, align 8
+ %3 = getelementptr inbounds i32, i32* %2, i64 16
+ %4 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 0
store i32* %3, i32** %4, align 8
- %5 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 1
- %6 = load i32** %5, align 8
- %7 = getelementptr inbounds i32* %6, i64 16
- %8 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 1
+ %5 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 1
+ %6 = load i32*, i32** %5, align 8
+ %7 = getelementptr inbounds i32, i32* %6, i64 16
+ %8 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 1
store i32* %7, i32** %8, align 8
ret void
}
@@ -27,15 +27,15 @@ define void @foo1 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y) {
; CHECK-LABEL: foo2
; CHECK-NOT: <2 x i32*>
define void @foo2 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y, i32 %i) {
- %1 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 0
- %2 = load i32** %1, align 8
- %3 = getelementptr inbounds i32* %2, i32 %i
- %4 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 0
+ %1 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 0
+ %2 = load i32*, i32** %1, align 8
+ %3 = getelementptr inbounds i32, i32* %2, i32 %i
+ %4 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 0
store i32* %3, i32** %4, align 8
- %5 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 1
- %6 = load i32** %5, align 8
- %7 = getelementptr inbounds i32* %6, i32 %i
- %8 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 1
+ %5 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 1
+ %6 = load i32*, i32** %5, align 8
+ %7 = getelementptr inbounds i32, i32* %6, i32 %i
+ %8 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 1
store i32* %7, i32** %8, align 8
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/hoist.ll b/test/Transforms/SLPVectorizer/X86/hoist.ll
index 78c58f1..36c939b 100644
--- a/test/Transforms/SLPVectorizer/X86/hoist.ll
+++ b/test/Transforms/SLPVectorizer/X86/hoist.ll
@@ -30,23 +30,23 @@ entry:
for.body: ; preds = %entry, %for.body
%i.024 = phi i32 [ 0, %entry ], [ %add10, %for.body ]
- %arrayidx = getelementptr inbounds i32* %A, i32 %i.024
- %0 = load i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.024
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %n
store i32 %add, i32* %arrayidx, align 4
%add121 = or i32 %i.024, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i32 %add121
- %1 = load i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %add121
+ %1 = load i32, i32* %arrayidx2, align 4
%add3 = add nsw i32 %1, %k
store i32 %add3, i32* %arrayidx2, align 4
%add422 = or i32 %i.024, 2
- %arrayidx5 = getelementptr inbounds i32* %A, i32 %add422
- %2 = load i32* %arrayidx5, align 4
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i32 %add422
+ %2 = load i32, i32* %arrayidx5, align 4
%add6 = add nsw i32 %2, %n
store i32 %add6, i32* %arrayidx5, align 4
%add723 = or i32 %i.024, 3
- %arrayidx8 = getelementptr inbounds i32* %A, i32 %add723
- %3 = load i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add723
+ %3 = load i32, i32* %arrayidx8, align 4
%add9 = add nsw i32 %3, %k
store i32 %add9, i32* %arrayidx8, align 4
%add10 = add nsw i32 %i.024, 4
diff --git a/test/Transforms/SLPVectorizer/X86/horizontal.ll b/test/Transforms/SLPVectorizer/X86/horizontal.ll
index 1836047..83b2e01 100644
--- a/test/Transforms/SLPVectorizer/X86/horizontal.ll
+++ b/test/Transforms/SLPVectorizer/X86/horizontal.ll
@@ -32,22 +32,22 @@ for.body:
%i.033 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%sum.032 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add17, %for.body ]
%mul = shl nsw i64 %i.033, 2
- %arrayidx = getelementptr inbounds float* %A, i64 %mul
- %1 = load float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, float* %A, i64 %mul
+ %1 = load float, float* %arrayidx, align 4
%mul2 = fmul float %1, 7.000000e+00
%add28 = or i64 %mul, 1
- %arrayidx4 = getelementptr inbounds float* %A, i64 %add28
- %2 = load float* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds float, float* %A, i64 %add28
+ %2 = load float, float* %arrayidx4, align 4
%mul5 = fmul float %2, 7.000000e+00
%add6 = fadd fast float %mul2, %mul5
%add829 = or i64 %mul, 2
- %arrayidx9 = getelementptr inbounds float* %A, i64 %add829
- %3 = load float* %arrayidx9, align 4
+ %arrayidx9 = getelementptr inbounds float, float* %A, i64 %add829
+ %3 = load float, float* %arrayidx9, align 4
%mul10 = fmul float %3, 7.000000e+00
%add11 = fadd fast float %add6, %mul10
%add1330 = or i64 %mul, 3
- %arrayidx14 = getelementptr inbounds float* %A, i64 %add1330
- %4 = load float* %arrayidx14, align 4
+ %arrayidx14 = getelementptr inbounds float, float* %A, i64 %add1330
+ %4 = load float, float* %arrayidx14, align 4
%mul15 = fmul float %4, 7.000000e+00
%add16 = fadd fast float %add11, %mul15
%add17 = fadd fast float %sum.032, %add16
@@ -85,13 +85,13 @@ entry:
br i1 %cmp38, label %for.body.lr.ph, label %for.end
for.body.lr.ph:
- %0 = load float* %B, align 4
- %arrayidx4 = getelementptr inbounds float* %B, i64 1
- %1 = load float* %arrayidx4, align 4
- %arrayidx9 = getelementptr inbounds float* %B, i64 2
- %2 = load float* %arrayidx9, align 4
- %arrayidx15 = getelementptr inbounds float* %B, i64 3
- %3 = load float* %arrayidx15, align 4
+ %0 = load float, float* %B, align 4
+ %arrayidx4 = getelementptr inbounds float, float* %B, i64 1
+ %1 = load float, float* %arrayidx4, align 4
+ %arrayidx9 = getelementptr inbounds float, float* %B, i64 2
+ %2 = load float, float* %arrayidx9, align 4
+ %arrayidx15 = getelementptr inbounds float, float* %B, i64 3
+ %3 = load float, float* %arrayidx15, align 4
%4 = sext i32 %n to i64
br label %for.body
@@ -99,22 +99,22 @@ for.body:
%i.040 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%sum.039 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %mul21, %for.body ]
%mul = shl nsw i64 %i.040, 2
- %arrayidx2 = getelementptr inbounds float* %A, i64 %mul
- %5 = load float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
+ %5 = load float, float* %arrayidx2, align 4
%mul3 = fmul float %0, %5
%add35 = or i64 %mul, 1
- %arrayidx6 = getelementptr inbounds float* %A, i64 %add35
- %6 = load float* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds float, float* %A, i64 %add35
+ %6 = load float, float* %arrayidx6, align 4
%mul7 = fmul float %1, %6
%add8 = fadd fast float %mul3, %mul7
%add1136 = or i64 %mul, 2
- %arrayidx12 = getelementptr inbounds float* %A, i64 %add1136
- %7 = load float* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds float, float* %A, i64 %add1136
+ %7 = load float, float* %arrayidx12, align 4
%mul13 = fmul float %2, %7
%add14 = fadd fast float %add8, %mul13
%add1737 = or i64 %mul, 3
- %arrayidx18 = getelementptr inbounds float* %A, i64 %add1737
- %8 = load float* %arrayidx18, align 4
+ %arrayidx18 = getelementptr inbounds float, float* %A, i64 %add1737
+ %8 = load float, float* %arrayidx18, align 4
%mul19 = fmul float %3, %8
%add20 = fadd fast float %add14, %mul19
%mul21 = fmul float %sum.039, %add20
@@ -157,23 +157,23 @@ entry:
br i1 %cmp81, label %for.body.lr.ph, label %for.end
for.body.lr.ph:
- %0 = load float* %B, align 4
- %arrayidx4 = getelementptr inbounds float* %B, i64 1
- %1 = load float* %arrayidx4, align 4
- %arrayidx9 = getelementptr inbounds float* %B, i64 2
- %2 = load float* %arrayidx9, align 4
- %arrayidx15 = getelementptr inbounds float* %B, i64 3
- %3 = load float* %arrayidx15, align 4
- %arrayidx21 = getelementptr inbounds float* %B, i64 4
- %4 = load float* %arrayidx21, align 4
- %arrayidx27 = getelementptr inbounds float* %B, i64 5
- %5 = load float* %arrayidx27, align 4
- %arrayidx33 = getelementptr inbounds float* %B, i64 6
- %6 = load float* %arrayidx33, align 4
- %arrayidx39 = getelementptr inbounds float* %B, i64 7
- %7 = load float* %arrayidx39, align 4
- %arrayidx45 = getelementptr inbounds float* %B, i64 8
- %8 = load float* %arrayidx45, align 4
+ %0 = load float, float* %B, align 4
+ %arrayidx4 = getelementptr inbounds float, float* %B, i64 1
+ %1 = load float, float* %arrayidx4, align 4
+ %arrayidx9 = getelementptr inbounds float, float* %B, i64 2
+ %2 = load float, float* %arrayidx9, align 4
+ %arrayidx15 = getelementptr inbounds float, float* %B, i64 3
+ %3 = load float, float* %arrayidx15, align 4
+ %arrayidx21 = getelementptr inbounds float, float* %B, i64 4
+ %4 = load float, float* %arrayidx21, align 4
+ %arrayidx27 = getelementptr inbounds float, float* %B, i64 5
+ %5 = load float, float* %arrayidx27, align 4
+ %arrayidx33 = getelementptr inbounds float, float* %B, i64 6
+ %6 = load float, float* %arrayidx33, align 4
+ %arrayidx39 = getelementptr inbounds float, float* %B, i64 7
+ %7 = load float, float* %arrayidx39, align 4
+ %arrayidx45 = getelementptr inbounds float, float* %B, i64 8
+ %8 = load float, float* %arrayidx45, align 4
%9 = sext i32 %n to i64
br label %for.body
@@ -181,47 +181,47 @@ for.body:
%i.083 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%sum.082 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add51, %for.body ]
%mul = mul nsw i64 %i.083, 6
- %arrayidx2 = getelementptr inbounds float* %A, i64 %mul
- %10 = load float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
+ %10 = load float, float* %arrayidx2, align 4
%mul3 = fmul fast float %0, %10
%add80 = or i64 %mul, 1
- %arrayidx6 = getelementptr inbounds float* %A, i64 %add80
- %11 = load float* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds float, float* %A, i64 %add80
+ %11 = load float, float* %arrayidx6, align 4
%mul7 = fmul fast float %1, %11
%add8 = fadd fast float %mul3, %mul7
%add11 = add nsw i64 %mul, 2
- %arrayidx12 = getelementptr inbounds float* %A, i64 %add11
- %12 = load float* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds float, float* %A, i64 %add11
+ %12 = load float, float* %arrayidx12, align 4
%mul13 = fmul fast float %2, %12
%add14 = fadd fast float %add8, %mul13
%add17 = add nsw i64 %mul, 3
- %arrayidx18 = getelementptr inbounds float* %A, i64 %add17
- %13 = load float* %arrayidx18, align 4
+ %arrayidx18 = getelementptr inbounds float, float* %A, i64 %add17
+ %13 = load float, float* %arrayidx18, align 4
%mul19 = fmul fast float %3, %13
%add20 = fadd fast float %add14, %mul19
%add23 = add nsw i64 %mul, 4
- %arrayidx24 = getelementptr inbounds float* %A, i64 %add23
- %14 = load float* %arrayidx24, align 4
+ %arrayidx24 = getelementptr inbounds float, float* %A, i64 %add23
+ %14 = load float, float* %arrayidx24, align 4
%mul25 = fmul fast float %4, %14
%add26 = fadd fast float %add20, %mul25
%add29 = add nsw i64 %mul, 5
- %arrayidx30 = getelementptr inbounds float* %A, i64 %add29
- %15 = load float* %arrayidx30, align 4
+ %arrayidx30 = getelementptr inbounds float, float* %A, i64 %add29
+ %15 = load float, float* %arrayidx30, align 4
%mul31 = fmul fast float %5, %15
%add32 = fadd fast float %add26, %mul31
%add35 = add nsw i64 %mul, 6
- %arrayidx36 = getelementptr inbounds float* %A, i64 %add35
- %16 = load float* %arrayidx36, align 4
+ %arrayidx36 = getelementptr inbounds float, float* %A, i64 %add35
+ %16 = load float, float* %arrayidx36, align 4
%mul37 = fmul fast float %6, %16
%add38 = fadd fast float %add32, %mul37
%add41 = add nsw i64 %mul, 7
- %arrayidx42 = getelementptr inbounds float* %A, i64 %add41
- %17 = load float* %arrayidx42, align 4
+ %arrayidx42 = getelementptr inbounds float, float* %A, i64 %add41
+ %17 = load float, float* %arrayidx42, align 4
%mul43 = fmul fast float %7, %17
%add44 = fadd fast float %add38, %mul43
%add47 = add nsw i64 %mul, 8
- %arrayidx48 = getelementptr inbounds float* %A, i64 %add47
- %18 = load float* %arrayidx48, align 4
+ %arrayidx48 = getelementptr inbounds float, float* %A, i64 %add47
+ %18 = load float, float* %arrayidx48, align 4
%mul49 = fmul fast float %8, %18
%add50 = fadd fast float %add44, %mul49
%add51 = fadd fast float %sum.082, %add50
@@ -259,13 +259,13 @@ entry:
br i1 %cmp41, label %for.body.lr.ph, label %for.end
for.body.lr.ph:
- %0 = load float* %B, align 4
- %arrayidx4 = getelementptr inbounds float* %B, i64 1
- %1 = load float* %arrayidx4, align 4
- %arrayidx10 = getelementptr inbounds float* %B, i64 2
- %2 = load float* %arrayidx10, align 4
- %arrayidx16 = getelementptr inbounds float* %B, i64 3
- %3 = load float* %arrayidx16, align 4
+ %0 = load float, float* %B, align 4
+ %arrayidx4 = getelementptr inbounds float, float* %B, i64 1
+ %1 = load float, float* %arrayidx4, align 4
+ %arrayidx10 = getelementptr inbounds float, float* %B, i64 2
+ %2 = load float, float* %arrayidx10, align 4
+ %arrayidx16 = getelementptr inbounds float, float* %B, i64 3
+ %3 = load float, float* %arrayidx16, align 4
%4 = sext i32 %n to i64
br label %for.body
@@ -273,23 +273,23 @@ for.body:
%i.043 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%sum.042 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add21, %for.body ]
%mul = shl nsw i64 %i.043, 2
- %arrayidx2 = getelementptr inbounds float* %A, i64 %mul
- %5 = load float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
+ %5 = load float, float* %arrayidx2, align 4
%mul3 = fmul fast float %0, %5
%add = fadd fast float %sum.042, %mul3
%add638 = or i64 %mul, 1
- %arrayidx7 = getelementptr inbounds float* %A, i64 %add638
- %6 = load float* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds float, float* %A, i64 %add638
+ %6 = load float, float* %arrayidx7, align 4
%mul8 = fmul fast float %1, %6
%add9 = fadd fast float %add, %mul8
%add1239 = or i64 %mul, 2
- %arrayidx13 = getelementptr inbounds float* %A, i64 %add1239
- %7 = load float* %arrayidx13, align 4
+ %arrayidx13 = getelementptr inbounds float, float* %A, i64 %add1239
+ %7 = load float, float* %arrayidx13, align 4
%mul14 = fmul fast float %2, %7
%add15 = fadd fast float %add9, %mul14
%add1840 = or i64 %mul, 3
- %arrayidx19 = getelementptr inbounds float* %A, i64 %add1840
- %8 = load float* %arrayidx19, align 4
+ %arrayidx19 = getelementptr inbounds float, float* %A, i64 %add1840
+ %8 = load float, float* %arrayidx19, align 4
%mul20 = fmul fast float %3, %8
%add21 = fadd fast float %add15, %mul20
%inc = add nsw i64 %i.043, 1
@@ -326,40 +326,40 @@ entry:
br i1 %cmp37, label %for.body.lr.ph, label %for.end
for.body.lr.ph:
- %arrayidx4 = getelementptr inbounds float* %B, i64 1
- %arrayidx9 = getelementptr inbounds float* %B, i64 2
- %arrayidx15 = getelementptr inbounds float* %B, i64 3
+ %arrayidx4 = getelementptr inbounds float, float* %B, i64 1
+ %arrayidx9 = getelementptr inbounds float, float* %B, i64 2
+ %arrayidx15 = getelementptr inbounds float, float* %B, i64 3
%0 = sext i32 %n to i64
br label %for.body
for.body:
%i.039 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%C.addr.038 = phi float* [ %C, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
- %1 = load float* %B, align 4
+ %1 = load float, float* %B, align 4
%mul = shl nsw i64 %i.039, 2
- %arrayidx2 = getelementptr inbounds float* %A, i64 %mul
- %2 = load float* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
+ %2 = load float, float* %arrayidx2, align 4
%mul3 = fmul fast float %1, %2
- %3 = load float* %arrayidx4, align 4
+ %3 = load float, float* %arrayidx4, align 4
%add34 = or i64 %mul, 1
- %arrayidx6 = getelementptr inbounds float* %A, i64 %add34
- %4 = load float* %arrayidx6, align 4
+ %arrayidx6 = getelementptr inbounds float, float* %A, i64 %add34
+ %4 = load float, float* %arrayidx6, align 4
%mul7 = fmul fast float %3, %4
%add8 = fadd fast float %mul3, %mul7
- %5 = load float* %arrayidx9, align 4
+ %5 = load float, float* %arrayidx9, align 4
%add1135 = or i64 %mul, 2
- %arrayidx12 = getelementptr inbounds float* %A, i64 %add1135
- %6 = load float* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds float, float* %A, i64 %add1135
+ %6 = load float, float* %arrayidx12, align 4
%mul13 = fmul fast float %5, %6
%add14 = fadd fast float %add8, %mul13
- %7 = load float* %arrayidx15, align 4
+ %7 = load float, float* %arrayidx15, align 4
%add1736 = or i64 %mul, 3
- %arrayidx18 = getelementptr inbounds float* %A, i64 %add1736
- %8 = load float* %arrayidx18, align 4
+ %arrayidx18 = getelementptr inbounds float, float* %A, i64 %add1736
+ %8 = load float, float* %arrayidx18, align 4
%mul19 = fmul fast float %7, %8
%add20 = fadd fast float %add14, %mul19
store float %add20, float* %C.addr.038, align 4
- %incdec.ptr = getelementptr inbounds float* %C.addr.038, i64 1
+ %incdec.ptr = getelementptr inbounds float, float* %C.addr.038, i64 1
%inc = add nsw i64 %i.039, 1
%exitcond = icmp eq i64 %inc, %0
br i1 %exitcond, label %for.end, label %for.body
@@ -389,24 +389,24 @@ entry:
br i1 %cmp17, label %for.body.lr.ph, label %for.end
for.body.lr.ph:
- %0 = load double* %B, align 8
- %arrayidx4 = getelementptr inbounds double* %B, i64 1
- %1 = load double* %arrayidx4, align 8
+ %0 = load double, double* %B, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %B, i64 1
+ %1 = load double, double* %arrayidx4, align 8
%2 = sext i32 %n to i64
br label %for.body
for.body:
%i.018 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%mul = shl nsw i64 %i.018, 2
- %arrayidx2 = getelementptr inbounds double* %A, i64 %mul
- %3 = load double* %arrayidx2, align 8
+ %arrayidx2 = getelementptr inbounds double, double* %A, i64 %mul
+ %3 = load double, double* %arrayidx2, align 8
%mul3 = fmul fast double %0, %3
%add16 = or i64 %mul, 1
- %arrayidx6 = getelementptr inbounds double* %A, i64 %add16
- %4 = load double* %arrayidx6, align 8
+ %arrayidx6 = getelementptr inbounds double, double* %A, i64 %add16
+ %4 = load double, double* %arrayidx6, align 8
%mul7 = fmul fast double %1, %4
%add8 = fadd fast double %mul3, %mul7
- %arrayidx9 = getelementptr inbounds double* %C, i64 %i.018
+ %arrayidx9 = getelementptr inbounds double, double* %C, i64 %i.018
store double %add8, double* %arrayidx9, align 8
%inc = add nsw i64 %i.018, 1
%exitcond = icmp eq i64 %inc, %2
diff --git a/test/Transforms/SLPVectorizer/X86/implicitfloat.ll b/test/Transforms/SLPVectorizer/X86/implicitfloat.ll
index f63f268..f7283f0 100644
--- a/test/Transforms/SLPVectorizer/X86/implicitfloat.ll
+++ b/test/Transforms/SLPVectorizer/X86/implicitfloat.ll
@@ -9,16 +9,16 @@ target triple = "x86_64-apple-macosx10.8.0"
; CHECK: ret
define void @test1(double* %a, double* %b, double* %c) noimplicitfloat { ; <------ noimplicitfloat attribute here!
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/in-tree-user.ll b/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
index 194a0fb..a638548 100644
--- a/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
+++ b/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
@@ -19,15 +19,15 @@ entry:
for.body: ; preds = %for.inc, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%0 = shl nsw i64 %indvars.iv, 1
- %arrayidx = getelementptr inbounds double* %A, i64 %0
- %1 = load double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, double* %A, i64 %0
+ %1 = load double, double* %arrayidx, align 8
%mul1 = fmul double %conv, %1
%mul2 = fmul double %mul1, 7.000000e+00
%add = fadd double %mul2, 5.000000e+00
%InTreeUser = fadd double %add, %add ; <------------------ In tree user.
%2 = or i64 %0, 1
- %arrayidx6 = getelementptr inbounds double* %A, i64 %2
- %3 = load double* %arrayidx6, align 8
+ %arrayidx6 = getelementptr inbounds double, double* %A, i64 %2
+ %3 = load double, double* %arrayidx6, align 8
%mul8 = fmul double %conv, %3
%mul9 = fmul double %mul8, 4.000000e+00
%add10 = fadd double %mul9, 9.000000e+00
@@ -35,7 +35,7 @@ for.body: ; preds = %for.inc, %entry
br i1 %cmp11, label %if.then, label %for.inc
if.then: ; preds = %for.body
- %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([6 x i8]* @.str, i64 0, i64 0))
+ %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0))
br label %for.inc
for.inc: ; preds = %for.body, %if.then
diff --git a/test/Transforms/SLPVectorizer/X86/intrinsic.ll b/test/Transforms/SLPVectorizer/X86/intrinsic.ll
index 937252f..cc5a4af 100644
--- a/test/Transforms/SLPVectorizer/X86/intrinsic.ll
+++ b/test/Transforms/SLPVectorizer/X86/intrinsic.ll
@@ -13,18 +13,18 @@ declare double @llvm.fabs.f64(double) nounwind readnone
;CHECK: ret
define void @vec_fabs_f64(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @llvm.fabs.f64(double %mul) nounwind readnone
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @llvm.fabs.f64(double %mul5) nounwind readnone
store double %call, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %call5, double* %arrayidx5, align 8
ret void
}
@@ -39,33 +39,33 @@ declare float @llvm.copysign.f32(float, float) nounwind readnone
;CHECK: ret
define void @vec_copysign_f32(float* %a, float* %b, float* noalias %c) {
entry:
- %0 = load float* %a, align 4
- %1 = load float* %b, align 4
+ %0 = load float, float* %a, align 4
+ %1 = load float, float* %b, align 4
%call0 = tail call float @llvm.copysign.f32(float %0, float %1) nounwind readnone
store float %call0, float* %c, align 4
- %ix2 = getelementptr inbounds float* %a, i64 1
- %2 = load float* %ix2, align 4
- %ix3 = getelementptr inbounds float* %b, i64 1
- %3 = load float* %ix3, align 4
+ %ix2 = getelementptr inbounds float, float* %a, i64 1
+ %2 = load float, float* %ix2, align 4
+ %ix3 = getelementptr inbounds float, float* %b, i64 1
+ %3 = load float, float* %ix3, align 4
%call1 = tail call float @llvm.copysign.f32(float %2, float %3) nounwind readnone
- %c1 = getelementptr inbounds float* %c, i64 1
+ %c1 = getelementptr inbounds float, float* %c, i64 1
store float %call1, float* %c1, align 4
- %ix4 = getelementptr inbounds float* %a, i64 2
- %4 = load float* %ix4, align 4
- %ix5 = getelementptr inbounds float* %b, i64 2
- %5 = load float* %ix5, align 4
+ %ix4 = getelementptr inbounds float, float* %a, i64 2
+ %4 = load float, float* %ix4, align 4
+ %ix5 = getelementptr inbounds float, float* %b, i64 2
+ %5 = load float, float* %ix5, align 4
%call2 = tail call float @llvm.copysign.f32(float %4, float %5) nounwind readnone
- %c2 = getelementptr inbounds float* %c, i64 2
+ %c2 = getelementptr inbounds float, float* %c, i64 2
store float %call2, float* %c2, align 4
- %ix6 = getelementptr inbounds float* %a, i64 3
- %6 = load float* %ix6, align 4
- %ix7 = getelementptr inbounds float* %b, i64 3
- %7 = load float* %ix7, align 4
+ %ix6 = getelementptr inbounds float, float* %a, i64 3
+ %6 = load float, float* %ix6, align 4
+ %ix7 = getelementptr inbounds float, float* %b, i64 3
+ %7 = load float, float* %ix7, align 4
%call3 = tail call float @llvm.copysign.f32(float %6, float %7) nounwind readnone
- %c3 = getelementptr inbounds float* %c, i64 3
+ %c3 = getelementptr inbounds float, float* %c, i64 3
store float %call3, float* %c3, align 4
ret void
@@ -75,38 +75,38 @@ declare i32 @llvm.bswap.i32(i32) nounwind readnone
define void @vec_bswap_i32(i32* %a, i32* %b, i32* %c) {
entry:
- %i0 = load i32* %a, align 4
- %i1 = load i32* %b, align 4
+ %i0 = load i32, i32* %a, align 4
+ %i1 = load i32, i32* %b, align 4
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.bswap.i32(i32 %add1) nounwind readnone
- %arrayidx2 = getelementptr inbounds i32* %a, i32 1
- %i2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i32 1
- %i3 = load i32* %arrayidx3, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
+ %i2 = load i32, i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
+ %i3 = load i32, i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.bswap.i32(i32 %add2) nounwind readnone
- %arrayidx4 = getelementptr inbounds i32* %a, i32 2
- %i4 = load i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i32* %b, i32 2
- %i5 = load i32* %arrayidx5, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
+ %i4 = load i32, i32* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
+ %i5 = load i32, i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.bswap.i32(i32 %add3) nounwind readnone
- %arrayidx6 = getelementptr inbounds i32* %a, i32 3
- %i6 = load i32* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds i32* %b, i32 3
- %i7 = load i32* %arrayidx7, align 4
+ %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
+ %i6 = load i32, i32* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
+ %i7 = load i32, i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.bswap.i32(i32 %add4) nounwind readnone
store i32 %call1, i32* %c, align 4
- %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
store i32 %call2, i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
store i32 %call3, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
store i32 %call4, i32* %arrayidx10, align 4
ret void
@@ -122,38 +122,38 @@ declare i32 @llvm.ctlz.i32(i32,i1) nounwind readnone
define void @vec_ctlz_i32(i32* %a, i32* %b, i32* %c, i1) {
entry:
- %i0 = load i32* %a, align 4
- %i1 = load i32* %b, align 4
+ %i0 = load i32, i32* %a, align 4
+ %i1 = load i32, i32* %b, align 4
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.ctlz.i32(i32 %add1,i1 true) nounwind readnone
- %arrayidx2 = getelementptr inbounds i32* %a, i32 1
- %i2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i32 1
- %i3 = load i32* %arrayidx3, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
+ %i2 = load i32, i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
+ %i3 = load i32, i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.ctlz.i32(i32 %add2,i1 true) nounwind readnone
- %arrayidx4 = getelementptr inbounds i32* %a, i32 2
- %i4 = load i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i32* %b, i32 2
- %i5 = load i32* %arrayidx5, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
+ %i4 = load i32, i32* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
+ %i5 = load i32, i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.ctlz.i32(i32 %add3,i1 true) nounwind readnone
- %arrayidx6 = getelementptr inbounds i32* %a, i32 3
- %i6 = load i32* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds i32* %b, i32 3
- %i7 = load i32* %arrayidx7, align 4
+ %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
+ %i6 = load i32, i32* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
+ %i7 = load i32, i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.ctlz.i32(i32 %add4,i1 true) nounwind readnone
store i32 %call1, i32* %c, align 4
- %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
store i32 %call2, i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
store i32 %call3, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
store i32 %call4, i32* %arrayidx10, align 4
ret void
@@ -167,38 +167,38 @@ entry:
define void @vec_ctlz_i32_neg(i32* %a, i32* %b, i32* %c, i1) {
entry:
- %i0 = load i32* %a, align 4
- %i1 = load i32* %b, align 4
+ %i0 = load i32, i32* %a, align 4
+ %i1 = load i32, i32* %b, align 4
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.ctlz.i32(i32 %add1,i1 true) nounwind readnone
- %arrayidx2 = getelementptr inbounds i32* %a, i32 1
- %i2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i32 1
- %i3 = load i32* %arrayidx3, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
+ %i2 = load i32, i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
+ %i3 = load i32, i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.ctlz.i32(i32 %add2,i1 false) nounwind readnone
- %arrayidx4 = getelementptr inbounds i32* %a, i32 2
- %i4 = load i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i32* %b, i32 2
- %i5 = load i32* %arrayidx5, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
+ %i4 = load i32, i32* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
+ %i5 = load i32, i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.ctlz.i32(i32 %add3,i1 true) nounwind readnone
- %arrayidx6 = getelementptr inbounds i32* %a, i32 3
- %i6 = load i32* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds i32* %b, i32 3
- %i7 = load i32* %arrayidx7, align 4
+ %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
+ %i6 = load i32, i32* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
+ %i7 = load i32, i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.ctlz.i32(i32 %add4,i1 false) nounwind readnone
store i32 %call1, i32* %c, align 4
- %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
store i32 %call2, i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
store i32 %call3, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
store i32 %call4, i32* %arrayidx10, align 4
ret void
@@ -212,38 +212,38 @@ declare i32 @llvm.cttz.i32(i32,i1) nounwind readnone
define void @vec_cttz_i32(i32* %a, i32* %b, i32* %c, i1) {
entry:
- %i0 = load i32* %a, align 4
- %i1 = load i32* %b, align 4
+ %i0 = load i32, i32* %a, align 4
+ %i1 = load i32, i32* %b, align 4
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.cttz.i32(i32 %add1,i1 true) nounwind readnone
- %arrayidx2 = getelementptr inbounds i32* %a, i32 1
- %i2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i32 1
- %i3 = load i32* %arrayidx3, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
+ %i2 = load i32, i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
+ %i3 = load i32, i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.cttz.i32(i32 %add2,i1 true) nounwind readnone
- %arrayidx4 = getelementptr inbounds i32* %a, i32 2
- %i4 = load i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i32* %b, i32 2
- %i5 = load i32* %arrayidx5, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
+ %i4 = load i32, i32* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
+ %i5 = load i32, i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.cttz.i32(i32 %add3,i1 true) nounwind readnone
- %arrayidx6 = getelementptr inbounds i32* %a, i32 3
- %i6 = load i32* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds i32* %b, i32 3
- %i7 = load i32* %arrayidx7, align 4
+ %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
+ %i6 = load i32, i32* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
+ %i7 = load i32, i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.cttz.i32(i32 %add4,i1 true) nounwind readnone
store i32 %call1, i32* %c, align 4
- %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
store i32 %call2, i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
store i32 %call3, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
store i32 %call4, i32* %arrayidx10, align 4
ret void
@@ -257,38 +257,38 @@ entry:
define void @vec_cttz_i32_neg(i32* %a, i32* %b, i32* %c, i1) {
entry:
- %i0 = load i32* %a, align 4
- %i1 = load i32* %b, align 4
+ %i0 = load i32, i32* %a, align 4
+ %i1 = load i32, i32* %b, align 4
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.cttz.i32(i32 %add1,i1 true) nounwind readnone
- %arrayidx2 = getelementptr inbounds i32* %a, i32 1
- %i2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i32 1
- %i3 = load i32* %arrayidx3, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
+ %i2 = load i32, i32* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
+ %i3 = load i32, i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.cttz.i32(i32 %add2,i1 false) nounwind readnone
- %arrayidx4 = getelementptr inbounds i32* %a, i32 2
- %i4 = load i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i32* %b, i32 2
- %i5 = load i32* %arrayidx5, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
+ %i4 = load i32, i32* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
+ %i5 = load i32, i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.cttz.i32(i32 %add3,i1 true) nounwind readnone
- %arrayidx6 = getelementptr inbounds i32* %a, i32 3
- %i6 = load i32* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds i32* %b, i32 3
- %i7 = load i32* %arrayidx7, align 4
+ %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
+ %i6 = load i32, i32* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
+ %i7 = load i32, i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.cttz.i32(i32 %add4,i1 false) nounwind readnone
store i32 %call1, i32* %c, align 4
- %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
store i32 %call2, i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
store i32 %call3, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
store i32 %call4, i32* %arrayidx10, align 4
ret void
@@ -300,38 +300,38 @@ entry:
declare float @llvm.powi.f32(float, i32)
define void @vec_powi_f32(float* %a, float* %b, float* %c, i32 %P) {
entry:
- %i0 = load float* %a, align 4
- %i1 = load float* %b, align 4
+ %i0 = load float, float* %a, align 4
+ %i1 = load float, float* %b, align 4
%add1 = fadd float %i0, %i1
%call1 = tail call float @llvm.powi.f32(float %add1,i32 %P) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %a, i32 1
- %i2 = load float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds float* %b, i32 1
- %i3 = load float* %arrayidx3, align 4
+ %arrayidx2 = getelementptr inbounds float, float* %a, i32 1
+ %i2 = load float, float* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds float, float* %b, i32 1
+ %i3 = load float, float* %arrayidx3, align 4
%add2 = fadd float %i2, %i3
%call2 = tail call float @llvm.powi.f32(float %add2,i32 %P) nounwind readnone
- %arrayidx4 = getelementptr inbounds float* %a, i32 2
- %i4 = load float* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds float* %b, i32 2
- %i5 = load float* %arrayidx5, align 4
+ %arrayidx4 = getelementptr inbounds float, float* %a, i32 2
+ %i4 = load float, float* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds float, float* %b, i32 2
+ %i5 = load float, float* %arrayidx5, align 4
%add3 = fadd float %i4, %i5
%call3 = tail call float @llvm.powi.f32(float %add3,i32 %P) nounwind readnone
- %arrayidx6 = getelementptr inbounds float* %a, i32 3
- %i6 = load float* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds float* %b, i32 3
- %i7 = load float* %arrayidx7, align 4
+ %arrayidx6 = getelementptr inbounds float, float* %a, i32 3
+ %i6 = load float, float* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds float, float* %b, i32 3
+ %i7 = load float, float* %arrayidx7, align 4
%add4 = fadd float %i6, %i7
%call4 = tail call float @llvm.powi.f32(float %add4,i32 %P) nounwind readnone
store float %call1, float* %c, align 4
- %arrayidx8 = getelementptr inbounds float* %c, i32 1
+ %arrayidx8 = getelementptr inbounds float, float* %c, i32 1
store float %call2, float* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds float* %c, i32 2
+ %arrayidx9 = getelementptr inbounds float, float* %c, i32 2
store float %call3, float* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds float* %c, i32 3
+ %arrayidx10 = getelementptr inbounds float, float* %c, i32 3
store float %call4, float* %arrayidx10, align 4
ret void
@@ -346,38 +346,38 @@ entry:
define void @vec_powi_f32_neg(float* %a, float* %b, float* %c, i32 %P, i32 %Q) {
entry:
- %i0 = load float* %a, align 4
- %i1 = load float* %b, align 4
+ %i0 = load float, float* %a, align 4
+ %i1 = load float, float* %b, align 4
%add1 = fadd float %i0, %i1
%call1 = tail call float @llvm.powi.f32(float %add1,i32 %P) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %a, i32 1
- %i2 = load float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds float* %b, i32 1
- %i3 = load float* %arrayidx3, align 4
+ %arrayidx2 = getelementptr inbounds float, float* %a, i32 1
+ %i2 = load float, float* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds float, float* %b, i32 1
+ %i3 = load float, float* %arrayidx3, align 4
%add2 = fadd float %i2, %i3
%call2 = tail call float @llvm.powi.f32(float %add2,i32 %Q) nounwind readnone
- %arrayidx4 = getelementptr inbounds float* %a, i32 2
- %i4 = load float* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds float* %b, i32 2
- %i5 = load float* %arrayidx5, align 4
+ %arrayidx4 = getelementptr inbounds float, float* %a, i32 2
+ %i4 = load float, float* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds float, float* %b, i32 2
+ %i5 = load float, float* %arrayidx5, align 4
%add3 = fadd float %i4, %i5
%call3 = tail call float @llvm.powi.f32(float %add3,i32 %P) nounwind readnone
- %arrayidx6 = getelementptr inbounds float* %a, i32 3
- %i6 = load float* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds float* %b, i32 3
- %i7 = load float* %arrayidx7, align 4
+ %arrayidx6 = getelementptr inbounds float, float* %a, i32 3
+ %i6 = load float, float* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds float, float* %b, i32 3
+ %i7 = load float, float* %arrayidx7, align 4
%add4 = fadd float %i6, %i7
%call4 = tail call float @llvm.powi.f32(float %add4,i32 %Q) nounwind readnone
store float %call1, float* %c, align 4
- %arrayidx8 = getelementptr inbounds float* %c, i32 1
+ %arrayidx8 = getelementptr inbounds float, float* %c, i32 1
store float %call2, float* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds float* %c, i32 2
+ %arrayidx9 = getelementptr inbounds float, float* %c, i32 2
store float %call3, float* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds float* %c, i32 3
+ %arrayidx10 = getelementptr inbounds float, float* %c, i32 3
store float %call4, float* %arrayidx10, align 4
ret void
diff --git a/test/Transforms/SLPVectorizer/X86/long_chains.ll b/test/Transforms/SLPVectorizer/X86/long_chains.ll
index 5af3e6d..f87dabf 100644
--- a/test/Transforms/SLPVectorizer/X86/long_chains.ll
+++ b/test/Transforms/SLPVectorizer/X86/long_chains.ll
@@ -13,9 +13,9 @@ target triple = "x86_64-apple-macosx10.8.0"
; CHECK: ret
define i32 @test(double* nocapture %A, i8* nocapture %B) {
entry:
- %0 = load i8* %B, align 1
- %arrayidx1 = getelementptr inbounds i8* %B, i64 1
- %1 = load i8* %arrayidx1, align 1
+ %0 = load i8, i8* %B, align 1
+ %arrayidx1 = getelementptr inbounds i8, i8* %B, i64 1
+ %1 = load i8, i8* %arrayidx1, align 1
%add = add i8 %0, 3
%add4 = add i8 %1, 3
%conv6 = sitofp i8 %add to double
@@ -41,7 +41,7 @@ entry:
%mul25 = fmul double %add22, %add22
%add26 = fadd double %mul25, 1.000000e+00
store double %add24, double* %A, align 8
- %arrayidx28 = getelementptr inbounds double* %A, i64 1
+ %arrayidx28 = getelementptr inbounds double, double* %A, i64 1
store double %add26, double* %arrayidx28, align 8
ret i32 undef
}
diff --git a/test/Transforms/SLPVectorizer/X86/loopinvariant.ll b/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
index bc12926..0c16c34 100644
--- a/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
+++ b/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
@@ -18,43 +18,43 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %0 = load i32, i32* %arrayidx, align 4
%add1 = add nsw i32 %0, %n
store i32 %add1, i32* %arrayidx, align 4
%1 = or i64 %indvars.iv, 1
- %arrayidx4 = getelementptr inbounds i32* %A, i64 %1
- %2 = load i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %1
+ %2 = load i32, i32* %arrayidx4, align 4
%add5 = add nsw i32 %2, %n
store i32 %add5, i32* %arrayidx4, align 4
%3 = or i64 %indvars.iv, 2
- %arrayidx8 = getelementptr inbounds i32* %A, i64 %3
- %4 = load i32* %arrayidx8, align 4
+ %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %3
+ %4 = load i32, i32* %arrayidx8, align 4
%add9 = add nsw i32 %4, %n
store i32 %add9, i32* %arrayidx8, align 4
%5 = or i64 %indvars.iv, 3
- %arrayidx12 = getelementptr inbounds i32* %A, i64 %5
- %6 = load i32* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %5
+ %6 = load i32, i32* %arrayidx12, align 4
%add13 = add nsw i32 %6, %n
store i32 %add13, i32* %arrayidx12, align 4
%7 = or i64 %indvars.iv, 4
- %arrayidx16 = getelementptr inbounds i32* %A, i64 %7
- %8 = load i32* %arrayidx16, align 4
+ %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 %7
+ %8 = load i32, i32* %arrayidx16, align 4
%add17 = add nsw i32 %8, %n
store i32 %add17, i32* %arrayidx16, align 4
%9 = or i64 %indvars.iv, 5
- %arrayidx20 = getelementptr inbounds i32* %A, i64 %9
- %10 = load i32* %arrayidx20, align 4
+ %arrayidx20 = getelementptr inbounds i32, i32* %A, i64 %9
+ %10 = load i32, i32* %arrayidx20, align 4
%add21 = add nsw i32 %10, %n
store i32 %add21, i32* %arrayidx20, align 4
%11 = or i64 %indvars.iv, 6
- %arrayidx24 = getelementptr inbounds i32* %A, i64 %11
- %12 = load i32* %arrayidx24, align 4
+ %arrayidx24 = getelementptr inbounds i32, i32* %A, i64 %11
+ %12 = load i32, i32* %arrayidx24, align 4
%add25 = add nsw i32 %12, %n
store i32 %add25, i32* %arrayidx24, align 4
%13 = or i64 %indvars.iv, 7
- %arrayidx28 = getelementptr inbounds i32* %A, i64 %13
- %14 = load i32* %arrayidx28, align 4
+ %arrayidx28 = getelementptr inbounds i32, i32* %A, i64 %13
+ %14 = load i32, i32* %arrayidx28, align 4
%add29 = add nsw i32 %14, %n
store i32 %add29, i32* %arrayidx28, align 4
%indvars.iv.next = add i64 %indvars.iv, 8
diff --git a/test/Transforms/SLPVectorizer/X86/metadata.ll b/test/Transforms/SLPVectorizer/X86/metadata.ll
index e021cca..ebef6b5 100644
--- a/test/Transforms/SLPVectorizer/X86/metadata.ll
+++ b/test/Transforms/SLPVectorizer/X86/metadata.ll
@@ -12,16 +12,16 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @test1(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8, !tbaa !4
- %i1 = load double* %b, align 8, !tbaa !4
+ %i0 = load double, double* %a, align 8, !tbaa !4
+ %i1 = load double, double* %b, align 8, !tbaa !4
%mul = fmul double %i0, %i1, !fpmath !0
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8, !tbaa !4
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8, !tbaa !4
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8, !tbaa !4
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8, !tbaa !4
%mul5 = fmul double %i3, %i4, !fpmath !0
store double %mul, double* %c, align 8, !tbaa !4
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8, !tbaa !4
ret void
}
@@ -35,17 +35,17 @@ entry:
define void @test2(double* %a, double* %b, i8* %e) {
entry:
- %i0 = load double* %a, align 8, !tbaa !4
- %i1 = load double* %b, align 8, !tbaa !4
+ %i0 = load double, double* %a, align 8, !tbaa !4
+ %i1 = load double, double* %b, align 8, !tbaa !4
%mul = fmul double %i0, %i1, !fpmath !1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8, !tbaa !4
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8, !tbaa !4
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8, !tbaa !4
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8, !tbaa !4
%mul5 = fmul double %i3, %i4, !fpmath !1
%c = bitcast i8* %e to double*
store double %mul, double* %c, align 8, !tbaa !4
- %carrayidx5 = getelementptr inbounds i8* %e, i64 8
+ %carrayidx5 = getelementptr inbounds i8, i8* %e, i64 8
%arrayidx5 = bitcast i8* %carrayidx5 to double*
store double %mul5, double* %arrayidx5, align 8, !tbaa !4
ret void
diff --git a/test/Transforms/SLPVectorizer/X86/multi_block.ll b/test/Transforms/SLPVectorizer/X86/multi_block.ll
index 2f1cc74..993054a 100644
--- a/test/Transforms/SLPVectorizer/X86/multi_block.ll
+++ b/test/Transforms/SLPVectorizer/X86/multi_block.ll
@@ -25,9 +25,9 @@ target triple = "x86_64-apple-macosx10.7.0"
;CHECK: store <2 x double>
;CHECK: ret
define i32 @bar(double* nocapture %A, i32 %d) {
- %1 = load double* %A, align 8
- %2 = getelementptr inbounds double* %A, i64 1
- %3 = load double* %2, align 8
+ %1 = load double, double* %A, align 8
+ %2 = getelementptr inbounds double, double* %A, i64 1
+ %3 = load double, double* %2, align 8
%4 = fptrunc double %1 to float
%5 = fptrunc double %3 to float
%6 = icmp eq i32 %d, 0
@@ -42,11 +42,11 @@ define i32 @bar(double* nocapture %A, i32 %d) {
%11 = fadd float %5, 5.000000e+00
%12 = fpext float %10 to double
%13 = fadd double %12, 9.000000e+00
- %14 = getelementptr inbounds double* %A, i64 8
+ %14 = getelementptr inbounds double, double* %A, i64 8
store double %13, double* %14, align 8
%15 = fpext float %11 to double
%16 = fadd double %15, 5.000000e+00
- %17 = getelementptr inbounds double* %A, i64 9
+ %17 = getelementptr inbounds double, double* %A, i64 9
store double %16, double* %17, align 8
ret i32 undef
}
diff --git a/test/Transforms/SLPVectorizer/X86/multi_user.ll b/test/Transforms/SLPVectorizer/X86/multi_user.ll
index 63a77e4..3197f6d 100644
--- a/test/Transforms/SLPVectorizer/X86/multi_user.ll
+++ b/test/Transforms/SLPVectorizer/X86/multi_user.ll
@@ -20,27 +20,27 @@ target triple = "x86_64-apple-macosx10.7.0"
define i32 @foo(i32* nocapture %A, i32 %n) {
%1 = mul nsw i32 %n, 5
%2 = add nsw i32 %1, 7
- %3 = load i32* %A, align 4
+ %3 = load i32, i32* %A, align 4
%4 = add nsw i32 %2, %3
store i32 %4, i32* %A, align 4
%5 = add nsw i32 %1, 8
- %6 = getelementptr inbounds i32* %A, i64 1
- %7 = load i32* %6, align 4
+ %6 = getelementptr inbounds i32, i32* %A, i64 1
+ %7 = load i32, i32* %6, align 4
%8 = add nsw i32 %5, %7
store i32 %8, i32* %6, align 4
%9 = add nsw i32 %1, 9
- %10 = getelementptr inbounds i32* %A, i64 2
- %11 = load i32* %10, align 4
+ %10 = getelementptr inbounds i32, i32* %A, i64 2
+ %11 = load i32, i32* %10, align 4
%12 = add nsw i32 %9, %11
store i32 %12, i32* %10, align 4
%13 = add nsw i32 %1, 10
- %14 = getelementptr inbounds i32* %A, i64 3
- %15 = load i32* %14, align 4
+ %14 = getelementptr inbounds i32, i32* %A, i64 3
+ %15 = load i32, i32* %14, align 4
%16 = add nsw i32 %13, %15
store i32 %16, i32* %14, align 4
%17 = add nsw i32 %1, 11
- %18 = getelementptr inbounds i32* %A, i64 4
- %19 = load i32* %18, align 4
+ %18 = getelementptr inbounds i32, i32* %A, i64 4
+ %19 = load i32, i32* %18, align 4
%20 = add nsw i32 %17, %19
store i32 %20, i32* %18, align 4
ret i32 undef
diff --git a/test/Transforms/SLPVectorizer/X86/odd_store.ll b/test/Transforms/SLPVectorizer/X86/odd_store.ll
index 027f601..25f049a 100644
--- a/test/Transforms/SLPVectorizer/X86/odd_store.ll
+++ b/test/Transforms/SLPVectorizer/X86/odd_store.ll
@@ -18,28 +18,28 @@ target triple = "x86_64-apple-macosx10.8.0"
;CHECK-NOT: store <3 x i8>
;CHECK: ret
define i32 @foo(i8* noalias nocapture %A, float* noalias nocapture %B, float %T) {
- %1 = getelementptr inbounds float* %B, i64 10
- %2 = load float* %1, align 4
+ %1 = getelementptr inbounds float, float* %B, i64 10
+ %2 = load float, float* %1, align 4
%3 = fmul float %2, %T
%4 = fpext float %3 to double
%5 = fadd double %4, 4.000000e+00
%6 = fptosi double %5 to i8
store i8 %6, i8* %A, align 1
- %7 = getelementptr inbounds float* %B, i64 11
- %8 = load float* %7, align 4
+ %7 = getelementptr inbounds float, float* %B, i64 11
+ %8 = load float, float* %7, align 4
%9 = fmul float %8, %T
%10 = fpext float %9 to double
%11 = fadd double %10, 5.000000e+00
%12 = fptosi double %11 to i8
- %13 = getelementptr inbounds i8* %A, i64 1
+ %13 = getelementptr inbounds i8, i8* %A, i64 1
store i8 %12, i8* %13, align 1
- %14 = getelementptr inbounds float* %B, i64 12
- %15 = load float* %14, align 4
+ %14 = getelementptr inbounds float, float* %B, i64 12
+ %15 = load float, float* %14, align 4
%16 = fmul float %15, %T
%17 = fpext float %16 to double
%18 = fadd double %17, 6.000000e+00
%19 = fptosi double %18 to i8
- %20 = getelementptr inbounds i8* %A, i64 2
+ %20 = getelementptr inbounds i8, i8* %A, i64 2
store i8 %19, i8* %20, align 1
ret i32 undef
}
diff --git a/test/Transforms/SLPVectorizer/X86/operandorder.ll b/test/Transforms/SLPVectorizer/X86/operandorder.ll
index cd446f0..109540b 100644
--- a/test/Transforms/SLPVectorizer/X86/operandorder.ll
+++ b/test/Transforms/SLPVectorizer/X86/operandorder.ll
@@ -13,12 +13,12 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
define void @shuffle_operands1(double * noalias %from, double * noalias %to,
double %v1, double %v2) {
- %from_1 = getelementptr double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %from_1 = getelementptr double, double *%from, i64 1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %v0_1, %v1
%v1_2 = fadd double %v2, %v0_2
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
ret void
@@ -35,12 +35,12 @@ br label %lp
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
- %from_1 = getelementptr double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %from_1 = getelementptr double, double *%from, i64 1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %v0_1, %p
%v1_2 = fadd double %v0_1, %v0_2
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
br i1 undef, label %lp, label %ext
@@ -60,12 +60,12 @@ br label %lp
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
- %from_1 = getelementptr double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %from_1 = getelementptr double, double *%from, i64 1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %p, %v0_1
%v1_2 = fadd double %v0_2, %v0_1
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
br i1 undef, label %lp, label %ext
@@ -85,12 +85,12 @@ br label %lp
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
- %from_1 = getelementptr double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %from_1 = getelementptr double, double *%from, i64 1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %p, %v0_1
%v1_2 = fadd double %v0_1, %v0_2
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
br i1 undef, label %lp, label %ext
@@ -111,12 +111,12 @@ br label %lp
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
- %from_1 = getelementptr double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %from_1 = getelementptr double, double *%from, i64 1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %v0_2, %v0_1
%v1_2 = fadd double %p, %v0_1
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
br i1 undef, label %lp, label %ext
@@ -136,12 +136,12 @@ br label %lp
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
- %from_1 = getelementptr double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %from_1 = getelementptr double, double *%from, i64 1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %v0_1, %v0_2
%v1_2 = fadd double %p, %v0_1
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
br i1 undef, label %lp, label %ext
@@ -162,12 +162,12 @@ br label %lp
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
- %from_1 = getelementptr double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %from_1 = getelementptr double, double *%from, i64 1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %v0_1, %v0_2
%v1_2 = fadd double %v0_1, %p
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
br i1 undef, label %lp, label %ext
@@ -181,7 +181,7 @@ ext:
; CHECK-LABEL: good_load_order
-; CHECK: %[[V1:[0-9]+]] = load <4 x float>*
+; CHECK: %[[V1:[0-9]+]] = load <4 x float>, <4 x float>*
; CHECK: %[[V2:[0-9]+]] = insertelement <4 x float> undef, float %1, i32 0
; CHECK: %[[V3:[0-9]+]] = shufflevector <4 x float> %[[V2]], <4 x float> %[[V1]], <4 x i32> <i32 0, i32 4, i32 5, i32 6>
; CHECK: = fmul <4 x float> %[[V1]], %[[V3]]
@@ -193,36 +193,36 @@ entry:
br label %for.cond1.preheader
for.cond1.preheader:
- %0 = load float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), align 16
+ %0 = load float, float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i64 0, i64 0), align 16
br label %for.body3
for.body3:
%1 = phi float [ %0, %for.cond1.preheader ], [ %10, %for.body3 ]
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
%2 = add nsw i64 %indvars.iv, 1
- %arrayidx = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %2
- %3 = load float* %arrayidx, align 4
- %arrayidx5 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %2
+ %3 = load float, float* %arrayidx, align 4
+ %arrayidx5 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv
%mul6 = fmul float %3, %1
store float %mul6, float* %arrayidx5, align 4
%4 = add nsw i64 %indvars.iv, 2
- %arrayidx11 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %4
- %5 = load float* %arrayidx11, align 4
+ %arrayidx11 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %4
+ %5 = load float, float* %arrayidx11, align 4
%mul15 = fmul float %5, %3
store float %mul15, float* %arrayidx, align 4
%6 = add nsw i64 %indvars.iv, 3
- %arrayidx21 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %6
- %7 = load float* %arrayidx21, align 4
+ %arrayidx21 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %6
+ %7 = load float, float* %arrayidx21, align 4
%mul25 = fmul float %7, %5
store float %mul25, float* %arrayidx11, align 4
%8 = add nsw i64 %indvars.iv, 4
- %arrayidx31 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %8
- %9 = load float* %arrayidx31, align 4
+ %arrayidx31 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %8
+ %9 = load float, float* %arrayidx31, align 4
%mul35 = fmul float %9, %7
store float %mul35, float* %arrayidx21, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 5
- %arrayidx41 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.next
- %10 = load float* %arrayidx41, align 4
+ %arrayidx41 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.next
+ %10 = load float, float* %arrayidx41, align 4
%mul45 = fmul float %10, %9
store float %mul45, float* %arrayidx31, align 4
%11 = trunc i64 %indvars.iv.next to i32
@@ -238,19 +238,19 @@ for.end:
; c[1] = b[1]+a[1]; // swapped b[1] and a[1]
; CHECK-LABEL: load_reorder_double
-; CHECK: load <2 x double>*
+; CHECK: load <2 x double>, <2 x double>*
; CHECK: fadd <2 x double>
define void @load_reorder_double(double* nocapture %c, double* noalias nocapture readonly %a, double* noalias nocapture readonly %b){
- %1 = load double* %a
- %2 = load double* %b
+ %1 = load double, double* %a
+ %2 = load double, double* %b
%3 = fadd double %1, %2
store double %3, double* %c
- %4 = getelementptr inbounds double* %b, i64 1
- %5 = load double* %4
- %6 = getelementptr inbounds double* %a, i64 1
- %7 = load double* %6
+ %4 = getelementptr inbounds double, double* %b, i64 1
+ %5 = load double, double* %4
+ %6 = getelementptr inbounds double, double* %a, i64 1
+ %7 = load double, double* %6
%8 = fadd double %5, %7
- %9 = getelementptr inbounds double* %c, i64 1
+ %9 = getelementptr inbounds double, double* %c, i64 1
store double %8, double* %9
ret void
}
@@ -262,33 +262,33 @@ define void @load_reorder_double(double* nocapture %c, double* noalias nocapture
; c[3] = a[3]+b[3];
; CHECK-LABEL: load_reorder_float
-; CHECK: load <4 x float>*
+; CHECK: load <4 x float>, <4 x float>*
; CHECK: fadd <4 x float>
define void @load_reorder_float(float* nocapture %c, float* noalias nocapture readonly %a, float* noalias nocapture readonly %b){
- %1 = load float* %a
- %2 = load float* %b
+ %1 = load float, float* %a
+ %2 = load float, float* %b
%3 = fadd float %1, %2
store float %3, float* %c
- %4 = getelementptr inbounds float* %b, i64 1
- %5 = load float* %4
- %6 = getelementptr inbounds float* %a, i64 1
- %7 = load float* %6
+ %4 = getelementptr inbounds float, float* %b, i64 1
+ %5 = load float, float* %4
+ %6 = getelementptr inbounds float, float* %a, i64 1
+ %7 = load float, float* %6
%8 = fadd float %5, %7
- %9 = getelementptr inbounds float* %c, i64 1
+ %9 = getelementptr inbounds float, float* %c, i64 1
store float %8, float* %9
- %10 = getelementptr inbounds float* %a, i64 2
- %11 = load float* %10
- %12 = getelementptr inbounds float* %b, i64 2
- %13 = load float* %12
+ %10 = getelementptr inbounds float, float* %a, i64 2
+ %11 = load float, float* %10
+ %12 = getelementptr inbounds float, float* %b, i64 2
+ %13 = load float, float* %12
%14 = fadd float %11, %13
- %15 = getelementptr inbounds float* %c, i64 2
+ %15 = getelementptr inbounds float, float* %c, i64 2
store float %14, float* %15
- %16 = getelementptr inbounds float* %a, i64 3
- %17 = load float* %16
- %18 = getelementptr inbounds float* %b, i64 3
- %19 = load float* %18
+ %16 = getelementptr inbounds float, float* %a, i64 3
+ %17 = load float, float* %16
+ %18 = getelementptr inbounds float, float* %b, i64 3
+ %19 = load float, float* %18
%20 = fadd float %17, %19
- %21 = getelementptr inbounds float* %c, i64 3
+ %21 = getelementptr inbounds float, float* %c, i64 3
store float %20, float* %21
ret void
}
@@ -300,45 +300,45 @@ define void @load_reorder_float(float* nocapture %c, float* noalias nocapture re
; a[3] = (b[3]+c[3])+d[3];
; CHECK-LABEL: opcode_reorder
-; CHECK: load <4 x float>*
+; CHECK: load <4 x float>, <4 x float>*
; CHECK: fadd <4 x float>
define void @opcode_reorder(float* noalias nocapture %a, float* noalias nocapture readonly %b,
float* noalias nocapture readonly %c,float* noalias nocapture readonly %d){
- %1 = load float* %b
- %2 = load float* %c
+ %1 = load float, float* %b
+ %2 = load float, float* %c
%3 = fadd float %1, %2
- %4 = load float* %d
+ %4 = load float, float* %d
%5 = fadd float %3, %4
store float %5, float* %a
- %6 = getelementptr inbounds float* %d, i64 1
- %7 = load float* %6
- %8 = getelementptr inbounds float* %b, i64 1
- %9 = load float* %8
- %10 = getelementptr inbounds float* %c, i64 1
- %11 = load float* %10
+ %6 = getelementptr inbounds float, float* %d, i64 1
+ %7 = load float, float* %6
+ %8 = getelementptr inbounds float, float* %b, i64 1
+ %9 = load float, float* %8
+ %10 = getelementptr inbounds float, float* %c, i64 1
+ %11 = load float, float* %10
%12 = fadd float %9, %11
%13 = fadd float %7, %12
- %14 = getelementptr inbounds float* %a, i64 1
+ %14 = getelementptr inbounds float, float* %a, i64 1
store float %13, float* %14
- %15 = getelementptr inbounds float* %b, i64 2
- %16 = load float* %15
- %17 = getelementptr inbounds float* %c, i64 2
- %18 = load float* %17
+ %15 = getelementptr inbounds float, float* %b, i64 2
+ %16 = load float, float* %15
+ %17 = getelementptr inbounds float, float* %c, i64 2
+ %18 = load float, float* %17
%19 = fadd float %16, %18
- %20 = getelementptr inbounds float* %d, i64 2
- %21 = load float* %20
+ %20 = getelementptr inbounds float, float* %d, i64 2
+ %21 = load float, float* %20
%22 = fadd float %19, %21
- %23 = getelementptr inbounds float* %a, i64 2
+ %23 = getelementptr inbounds float, float* %a, i64 2
store float %22, float* %23
- %24 = getelementptr inbounds float* %b, i64 3
- %25 = load float* %24
- %26 = getelementptr inbounds float* %c, i64 3
- %27 = load float* %26
+ %24 = getelementptr inbounds float, float* %b, i64 3
+ %25 = load float, float* %24
+ %26 = getelementptr inbounds float, float* %c, i64 3
+ %27 = load float, float* %26
%28 = fadd float %25, %27
- %29 = getelementptr inbounds float* %d, i64 3
- %30 = load float* %29
+ %29 = getelementptr inbounds float, float* %d, i64 3
+ %30 = load float, float* %29
%31 = fadd float %28, %30
- %32 = getelementptr inbounds float* %a, i64 3
+ %32 = getelementptr inbounds float, float* %a, i64 3
store float %31, float* %32
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/opt.ll b/test/Transforms/SLPVectorizer/X86/opt.ll
index 14137c1..824e999 100644
--- a/test/Transforms/SLPVectorizer/X86/opt.ll
+++ b/test/Transforms/SLPVectorizer/X86/opt.ll
@@ -15,16 +15,16 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @test1(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/ordering.ll b/test/Transforms/SLPVectorizer/X86/ordering.ll
index d2ecd45..0fa72c9 100644
--- a/test/Transforms/SLPVectorizer/X86/ordering.ll
+++ b/test/Transforms/SLPVectorizer/X86/ordering.ll
@@ -5,7 +5,7 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @updateModelQPFrame(i32 %m_Bits) {
entry:
- %0 = load double* undef, align 8
+ %0 = load double, double* undef, align 8
%mul = fmul double undef, %0
%mul2 = fmul double undef, %mul
%mul4 = fmul double %0, %mul2
diff --git a/test/Transforms/SLPVectorizer/X86/phi.ll b/test/Transforms/SLPVectorizer/X86/phi.ll
index 0c53b60..ef94467 100644
--- a/test/Transforms/SLPVectorizer/X86/phi.ll
+++ b/test/Transforms/SLPVectorizer/X86/phi.ll
@@ -29,17 +29,17 @@ entry:
br i1 %tobool, label %if.else, label %if.end
if.else: ; preds = %entry
- %arrayidx = getelementptr inbounds double* %A, i64 10
- %0 = load double* %arrayidx, align 8
- %arrayidx1 = getelementptr inbounds double* %A, i64 11
- %1 = load double* %arrayidx1, align 8
+ %arrayidx = getelementptr inbounds double, double* %A, i64 10
+ %0 = load double, double* %arrayidx, align 8
+ %arrayidx1 = getelementptr inbounds double, double* %A, i64 11
+ %1 = load double, double* %arrayidx1, align 8
br label %if.end
if.end: ; preds = %entry, %if.else
%A0.0 = phi double [ %0, %if.else ], [ 3.000000e+00, %entry ]
%A1.0 = phi double [ %1, %if.else ], [ 5.000000e+00, %entry ]
store double %A0.0, double* %A, align 8
- %arrayidx3 = getelementptr inbounds double* %A, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
store double %A1.0, double* %arrayidx3, align 8
ret i32 undef
}
@@ -69,9 +69,9 @@ if.end: ; preds = %entry, %if.else
;CHECK: ret
define i32 @foo2(double* noalias nocapture %B, double* noalias nocapture %A, i32 %n, i32 %m) #0 {
entry:
- %arrayidx = getelementptr inbounds double* %A, i64 1
- %0 = load double* %arrayidx, align 8
- %1 = load double* %A, align 8
+ %arrayidx = getelementptr inbounds double, double* %A, i64 1
+ %0 = load double, double* %arrayidx, align 8
+ %1 = load double, double* %A, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
@@ -90,7 +90,7 @@ for.body: ; preds = %for.body, %entry
for.end: ; preds = %for.body
store double %add5, double* %B, align 8
- %arrayidx7 = getelementptr inbounds double* %B, i64 1
+ %arrayidx7 = getelementptr inbounds double, double* %B, i64 1
store double %add4, double* %arrayidx7, align 8
ret i32 0
}
@@ -123,15 +123,15 @@ for.end: ; preds = %for.body
define float @foo3(float* nocapture readonly %A) #0 {
entry:
- %0 = load float* %A, align 4
- %arrayidx1 = getelementptr inbounds float* %A, i64 1
- %1 = load float* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds float* %A, i64 2
- %2 = load float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds float* %A, i64 3
- %3 = load float* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds float* %A, i64 4
- %4 = load float* %arrayidx4, align 4
+ %0 = load float, float* %A, align 4
+ %arrayidx1 = getelementptr inbounds float, float* %A, i64 1
+ %1 = load float, float* %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 2
+ %2 = load float, float* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds float, float* %A, i64 3
+ %3 = load float, float* %arrayidx3, align 4
+ %arrayidx4 = getelementptr inbounds float, float* %A, i64 4
+ %4 = load float, float* %arrayidx4, align 4
br label %for.body
for.body: ; preds = %for.body, %entry
@@ -148,18 +148,18 @@ for.body: ; preds = %for.body, %entry
%mul10 = fmul float %5, 8.000000e+00
%add11 = fadd float %G.053, %mul10
%7 = add nsw i64 %indvars.iv, 2
- %arrayidx14 = getelementptr inbounds float* %A, i64 %7
- %8 = load float* %arrayidx14, align 4
+ %arrayidx14 = getelementptr inbounds float, float* %A, i64 %7
+ %8 = load float, float* %arrayidx14, align 4
%mul15 = fmul float %8, 9.000000e+00
%add16 = fadd float %B.054, %mul15
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 3
- %arrayidx19 = getelementptr inbounds float* %A, i64 %indvars.iv.next
- %9 = load float* %arrayidx19, align 4
+ %arrayidx19 = getelementptr inbounds float, float* %A, i64 %indvars.iv.next
+ %9 = load float, float* %arrayidx19, align 4
%mul20 = fmul float %9, 1.000000e+01
%add21 = fadd float %Y.055, %mul20
%10 = add nsw i64 %indvars.iv, 4
- %arrayidx24 = getelementptr inbounds float* %A, i64 %10
- %11 = load float* %arrayidx24, align 4
+ %arrayidx24 = getelementptr inbounds float, float* %A, i64 %10
+ %11 = load float, float* %arrayidx24, align 4
%mul25 = fmul float %11, 1.100000e+01
%add26 = fadd float %P.056, %mul25
%12 = trunc i64 %indvars.iv.next to i32
@@ -215,22 +215,22 @@ define void @test(x86_fp80* %i1, x86_fp80* %i2, x86_fp80* %o) {
; We disable the vectorization of x86_fp80 for now.
entry:
- %i1.0 = load x86_fp80* %i1, align 16
- %i1.gep1 = getelementptr x86_fp80* %i1, i64 1
- %i1.1 = load x86_fp80* %i1.gep1, align 16
-; CHECK: load x86_fp80*
-; CHECK: load x86_fp80*
+ %i1.0 = load x86_fp80, x86_fp80* %i1, align 16
+ %i1.gep1 = getelementptr x86_fp80, x86_fp80* %i1, i64 1
+ %i1.1 = load x86_fp80, x86_fp80* %i1.gep1, align 16
+; CHECK: load x86_fp80, x86_fp80*
+; CHECK: load x86_fp80, x86_fp80*
; CHECK-NOT: insertelement <2 x x86_fp80>
; CHECK-NOT: insertelement <2 x x86_fp80>
br i1 undef, label %then, label %end
then:
- %i2.gep0 = getelementptr inbounds x86_fp80* %i2, i64 0
- %i2.0 = load x86_fp80* %i2.gep0, align 16
- %i2.gep1 = getelementptr inbounds x86_fp80* %i2, i64 1
- %i2.1 = load x86_fp80* %i2.gep1, align 16
-; CHECK: load x86_fp80*
-; CHECK: load x86_fp80*
+ %i2.gep0 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 0
+ %i2.0 = load x86_fp80, x86_fp80* %i2.gep0, align 16
+ %i2.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 1
+ %i2.1 = load x86_fp80, x86_fp80* %i2.gep1, align 16
+; CHECK: load x86_fp80, x86_fp80*
+; CHECK: load x86_fp80, x86_fp80*
; CHECK-NOT: insertelement <2 x x86_fp80>
; CHECK-NOT: insertelement <2 x x86_fp80>
br label %end
@@ -242,7 +242,7 @@ end:
; CHECK-NOT: extractelement <2 x x86_fp80>
; CHECK-NOT: extractelement <2 x x86_fp80>
store x86_fp80 %phi0, x86_fp80* %o, align 16
- %o.gep1 = getelementptr inbounds x86_fp80* %o, i64 1
+ %o.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %o, i64 1
store x86_fp80 %phi1, x86_fp80* %o.gep1, align 16
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/phi3.ll b/test/Transforms/SLPVectorizer/X86/phi3.ll
index fd8d361..6162830 100644
--- a/test/Transforms/SLPVectorizer/X86/phi3.ll
+++ b/test/Transforms/SLPVectorizer/X86/phi3.ll
@@ -12,7 +12,7 @@ declare %struct.GPar.0.16.26* @Rf_gpptr(...)
define void @Rf_GReset() {
entry:
%sub = fsub double -0.000000e+00, undef
- %0 = load double* @d, align 8
+ %0 = load double, double* @d, align 8
%sub1 = fsub double -0.000000e+00, %0
br i1 icmp eq (%struct.GPar.0.16.26* (...)* inttoptr (i64 115 to %struct.GPar.0.16.26* (...)*), %struct.GPar.0.16.26* (...)* @Rf_gpptr), label %if.then, label %if.end7
diff --git a/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll b/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll
index 520e672..fa08eff 100644
--- a/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll
+++ b/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll
@@ -12,22 +12,22 @@ define void @test(double* %i1, double* %i2, double* %o) {
; size is less than the alignment, and through various different GEP formations.
entry:
- %i1.0 = load double* %i1, align 16
- %i1.gep1 = getelementptr double* %i1, i64 1
- %i1.1 = load double* %i1.gep1, align 16
-; CHECK: load double*
-; CHECK: load double*
+ %i1.0 = load double, double* %i1, align 16
+ %i1.gep1 = getelementptr double, double* %i1, i64 1
+ %i1.1 = load double, double* %i1.gep1, align 16
+; CHECK: load double, double*
+; CHECK: load double, double*
; CHECK: insertelement <2 x double>
; CHECK: insertelement <2 x double>
br i1 undef, label %then, label %end
then:
- %i2.gep0 = getelementptr inbounds double* %i2, i64 0
- %i2.0 = load double* %i2.gep0, align 16
- %i2.gep1 = getelementptr inbounds double* %i2, i64 1
- %i2.1 = load double* %i2.gep1, align 16
-; CHECK: load double*
-; CHECK: load double*
+ %i2.gep0 = getelementptr inbounds double, double* %i2, i64 0
+ %i2.0 = load double, double* %i2.gep0, align 16
+ %i2.gep1 = getelementptr inbounds double, double* %i2, i64 1
+ %i2.1 = load double, double* %i2.gep1, align 16
+; CHECK: load double, double*
+; CHECK: load double, double*
; CHECK: insertelement <2 x double>
; CHECK: insertelement <2 x double>
br label %end
@@ -39,7 +39,7 @@ end:
; CHECK: extractelement <2 x double>
; CHECK: extractelement <2 x double>
store double %phi0, double* %o, align 16
- %o.gep1 = getelementptr inbounds double* %o, i64 1
+ %o.gep1 = getelementptr inbounds double, double* %o, i64 1
store double %phi1, double* %o.gep1, align 16
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/powof2div.ll b/test/Transforms/SLPVectorizer/X86/powof2div.ll
index 7aa1efd..a97b870 100644
--- a/test/Transforms/SLPVectorizer/X86/powof2div.ll
+++ b/test/Transforms/SLPVectorizer/X86/powof2div.ll
@@ -4,39 +4,39 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
;CHECK-LABEL: @powof2div(
-;CHECK: load <4 x i32>*
+;CHECK: load <4 x i32>, <4 x i32>*
;CHECK: add nsw <4 x i32>
;CHECK: sdiv <4 x i32>
define void @powof2div(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* noalias nocapture readonly %c){
entry:
- %0 = load i32* %b, align 4
- %1 = load i32* %c, align 4
+ %0 = load i32, i32* %b, align 4
+ %1 = load i32, i32* %c, align 4
%add = add nsw i32 %1, %0
%div = sdiv i32 %add, 2
store i32 %div, i32* %a, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i64 1
- %2 = load i32* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds i32* %c, i64 1
- %3 = load i32* %arrayidx4, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 1
+ %2 = load i32, i32* %arrayidx3, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32* %c, i64 1
+ %3 = load i32, i32* %arrayidx4, align 4
%add5 = add nsw i32 %3, %2
%div6 = sdiv i32 %add5, 2
- %arrayidx7 = getelementptr inbounds i32* %a, i64 1
+ %arrayidx7 = getelementptr inbounds i32, i32* %a, i64 1
store i32 %div6, i32* %arrayidx7, align 4
- %arrayidx8 = getelementptr inbounds i32* %b, i64 2
- %4 = load i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %c, i64 2
- %5 = load i32* %arrayidx9, align 4
+ %arrayidx8 = getelementptr inbounds i32, i32* %b, i64 2
+ %4 = load i32, i32* %arrayidx8, align 4
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i64 2
+ %5 = load i32, i32* %arrayidx9, align 4
%add10 = add nsw i32 %5, %4
%div11 = sdiv i32 %add10, 2
- %arrayidx12 = getelementptr inbounds i32* %a, i64 2
+ %arrayidx12 = getelementptr inbounds i32, i32* %a, i64 2
store i32 %div11, i32* %arrayidx12, align 4
- %arrayidx13 = getelementptr inbounds i32* %b, i64 3
- %6 = load i32* %arrayidx13, align 4
- %arrayidx14 = getelementptr inbounds i32* %c, i64 3
- %7 = load i32* %arrayidx14, align 4
+ %arrayidx13 = getelementptr inbounds i32, i32* %b, i64 3
+ %6 = load i32, i32* %arrayidx13, align 4
+ %arrayidx14 = getelementptr inbounds i32, i32* %c, i64 3
+ %7 = load i32, i32* %arrayidx14, align 4
%add15 = add nsw i32 %7, %6
%div16 = sdiv i32 %add15, 2
- %arrayidx17 = getelementptr inbounds i32* %a, i64 3
+ %arrayidx17 = getelementptr inbounds i32, i32* %a, i64 3
store i32 %div16, i32* %arrayidx17, align 4
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/pr16628.ll b/test/Transforms/SLPVectorizer/X86/pr16628.ll
index 3f9d775..c22ed34 100644
--- a/test/Transforms/SLPVectorizer/X86/pr16628.ll
+++ b/test/Transforms/SLPVectorizer/X86/pr16628.ll
@@ -11,10 +11,10 @@ target triple = "x86_64-apple-macosx10.9.0"
define void @f() {
entry:
%call = tail call i32 (...)* @g()
- %0 = load i32* @c, align 4
+ %0 = load i32, i32* @c, align 4
%lnot = icmp eq i32 %0, 0
%lnot.ext = zext i1 %lnot to i32
- %1 = load i16* @a, align 2
+ %1 = load i16, i16* @a, align 2
%lnot2 = icmp eq i16 %1, 0
%lnot.ext3 = zext i1 %lnot2 to i32
%or = or i32 %lnot.ext3, %lnot.ext
diff --git a/test/Transforms/SLPVectorizer/X86/pr16899.ll b/test/Transforms/SLPVectorizer/X86/pr16899.ll
index c642f3c..0de14ec 100644
--- a/test/Transforms/SLPVectorizer/X86/pr16899.ll
+++ b/test/Transforms/SLPVectorizer/X86/pr16899.ll
@@ -7,10 +7,10 @@ target triple = "i386--netbsd"
; Function Attrs: noreturn nounwind readonly
define i32 @fn1() #0 {
entry:
- %0 = load i32** @a, align 4, !tbaa !4
- %1 = load i32* %0, align 4, !tbaa !5
- %arrayidx1 = getelementptr inbounds i32* %0, i32 1
- %2 = load i32* %arrayidx1, align 4, !tbaa !5
+ %0 = load i32*, i32** @a, align 4, !tbaa !4
+ %1 = load i32, i32* %0, align 4, !tbaa !5
+ %arrayidx1 = getelementptr inbounds i32, i32* %0, i32 1
+ %2 = load i32, i32* %arrayidx1, align 4, !tbaa !5
br label %do.body
do.body: ; preds = %do.body, %entry
diff --git a/test/Transforms/SLPVectorizer/X86/pr19657.ll b/test/Transforms/SLPVectorizer/X86/pr19657.ll
index 9352308..a687aec 100644
--- a/test/Transforms/SLPVectorizer/X86/pr19657.ll
+++ b/test/Transforms/SLPVectorizer/X86/pr19657.ll
@@ -3,7 +3,7 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-;CHECK: load <2 x double>*
+;CHECK: load <2 x double>, <2 x double>*
;CHECK: fadd <2 x double>
;CHECK: store <2 x double>
@@ -11,61 +11,61 @@ target triple = "x86_64-unknown-linux-gnu"
define void @foo(double* %x) #0 {
%1 = alloca double*, align 8
store double* %x, double** %1, align 8
- %2 = load double** %1, align 8
- %3 = getelementptr inbounds double* %2, i64 0
- %4 = load double* %3, align 8
- %5 = load double** %1, align 8
- %6 = getelementptr inbounds double* %5, i64 0
- %7 = load double* %6, align 8
+ %2 = load double*, double** %1, align 8
+ %3 = getelementptr inbounds double, double* %2, i64 0
+ %4 = load double, double* %3, align 8
+ %5 = load double*, double** %1, align 8
+ %6 = getelementptr inbounds double, double* %5, i64 0
+ %7 = load double, double* %6, align 8
%8 = fadd double %4, %7
- %9 = load double** %1, align 8
- %10 = getelementptr inbounds double* %9, i64 0
- %11 = load double* %10, align 8
+ %9 = load double*, double** %1, align 8
+ %10 = getelementptr inbounds double, double* %9, i64 0
+ %11 = load double, double* %10, align 8
%12 = fadd double %8, %11
- %13 = load double** %1, align 8
- %14 = getelementptr inbounds double* %13, i64 0
+ %13 = load double*, double** %1, align 8
+ %14 = getelementptr inbounds double, double* %13, i64 0
store double %12, double* %14, align 8
- %15 = load double** %1, align 8
- %16 = getelementptr inbounds double* %15, i64 1
- %17 = load double* %16, align 8
- %18 = load double** %1, align 8
- %19 = getelementptr inbounds double* %18, i64 1
- %20 = load double* %19, align 8
+ %15 = load double*, double** %1, align 8
+ %16 = getelementptr inbounds double, double* %15, i64 1
+ %17 = load double, double* %16, align 8
+ %18 = load double*, double** %1, align 8
+ %19 = getelementptr inbounds double, double* %18, i64 1
+ %20 = load double, double* %19, align 8
%21 = fadd double %17, %20
- %22 = load double** %1, align 8
- %23 = getelementptr inbounds double* %22, i64 1
- %24 = load double* %23, align 8
+ %22 = load double*, double** %1, align 8
+ %23 = getelementptr inbounds double, double* %22, i64 1
+ %24 = load double, double* %23, align 8
%25 = fadd double %21, %24
- %26 = load double** %1, align 8
- %27 = getelementptr inbounds double* %26, i64 1
+ %26 = load double*, double** %1, align 8
+ %27 = getelementptr inbounds double, double* %26, i64 1
store double %25, double* %27, align 8
- %28 = load double** %1, align 8
- %29 = getelementptr inbounds double* %28, i64 2
- %30 = load double* %29, align 8
- %31 = load double** %1, align 8
- %32 = getelementptr inbounds double* %31, i64 2
- %33 = load double* %32, align 8
+ %28 = load double*, double** %1, align 8
+ %29 = getelementptr inbounds double, double* %28, i64 2
+ %30 = load double, double* %29, align 8
+ %31 = load double*, double** %1, align 8
+ %32 = getelementptr inbounds double, double* %31, i64 2
+ %33 = load double, double* %32, align 8
%34 = fadd double %30, %33
- %35 = load double** %1, align 8
- %36 = getelementptr inbounds double* %35, i64 2
- %37 = load double* %36, align 8
+ %35 = load double*, double** %1, align 8
+ %36 = getelementptr inbounds double, double* %35, i64 2
+ %37 = load double, double* %36, align 8
%38 = fadd double %34, %37
- %39 = load double** %1, align 8
- %40 = getelementptr inbounds double* %39, i64 2
+ %39 = load double*, double** %1, align 8
+ %40 = getelementptr inbounds double, double* %39, i64 2
store double %38, double* %40, align 8
- %41 = load double** %1, align 8
- %42 = getelementptr inbounds double* %41, i64 3
- %43 = load double* %42, align 8
- %44 = load double** %1, align 8
- %45 = getelementptr inbounds double* %44, i64 3
- %46 = load double* %45, align 8
+ %41 = load double*, double** %1, align 8
+ %42 = getelementptr inbounds double, double* %41, i64 3
+ %43 = load double, double* %42, align 8
+ %44 = load double*, double** %1, align 8
+ %45 = getelementptr inbounds double, double* %44, i64 3
+ %46 = load double, double* %45, align 8
%47 = fadd double %43, %46
- %48 = load double** %1, align 8
- %49 = getelementptr inbounds double* %48, i64 3
- %50 = load double* %49, align 8
+ %48 = load double*, double** %1, align 8
+ %49 = getelementptr inbounds double, double* %48, i64 3
+ %50 = load double, double* %49, align 8
%51 = fadd double %47, %50
- %52 = load double** %1, align 8
- %53 = getelementptr inbounds double* %52, i64 3
+ %52 = load double*, double** %1, align 8
+ %53 = getelementptr inbounds double, double* %52, i64 3
store double %51, double* %53, align 8
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll b/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll
index 3843ef7..a3b0c8f 100644
--- a/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll
+++ b/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll
@@ -10,15 +10,15 @@ target triple = "x86_64-unknown-unknown"
; CHECK-LABEL: @exact(
; CHECK: lshr exact <4 x i32>
define void @exact(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = lshr exact i32 %load1, 1
%op2 = lshr exact i32 %load2, 1
@@ -36,15 +36,15 @@ define void @exact(i32* %x) {
; CHECK-LABEL: @not_exact(
; CHECK: lshr <4 x i32>
define void @not_exact(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = lshr exact i32 %load1, 1
%op2 = lshr i32 %load2, 1
@@ -62,15 +62,15 @@ define void @not_exact(i32* %x) {
; CHECK-LABEL: @nsw(
; CHECK: add nsw <4 x i32>
define void @nsw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add nsw i32 %load1, 1
%op2 = add nsw i32 %load2, 1
@@ -88,15 +88,15 @@ define void @nsw(i32* %x) {
; CHECK-LABEL: @not_nsw(
; CHECK: add <4 x i32>
define void @not_nsw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add nsw i32 %load1, 1
%op2 = add nsw i32 %load2, 1
@@ -114,15 +114,15 @@ define void @not_nsw(i32* %x) {
; CHECK-LABEL: @nuw(
; CHECK: add nuw <4 x i32>
define void @nuw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add nuw i32 %load1, 1
%op2 = add nuw i32 %load2, 1
@@ -140,15 +140,15 @@ define void @nuw(i32* %x) {
; CHECK-LABEL: @not_nuw(
; CHECK: add <4 x i32>
define void @not_nuw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add nuw i32 %load1, 1
%op2 = add i32 %load2, 1
@@ -166,15 +166,15 @@ define void @not_nuw(i32* %x) {
; CHECK-LABEL: @nnan(
; CHECK: fadd nnan <4 x float>
define void @nnan(float* %x) {
- %idx1 = getelementptr inbounds float* %x, i64 0
- %idx2 = getelementptr inbounds float* %x, i64 1
- %idx3 = getelementptr inbounds float* %x, i64 2
- %idx4 = getelementptr inbounds float* %x, i64 3
+ %idx1 = getelementptr inbounds float, float* %x, i64 0
+ %idx2 = getelementptr inbounds float, float* %x, i64 1
+ %idx3 = getelementptr inbounds float, float* %x, i64 2
+ %idx4 = getelementptr inbounds float, float* %x, i64 3
- %load1 = load float* %idx1, align 4
- %load2 = load float* %idx2, align 4
- %load3 = load float* %idx3, align 4
- %load4 = load float* %idx4, align 4
+ %load1 = load float, float* %idx1, align 4
+ %load2 = load float, float* %idx2, align 4
+ %load3 = load float, float* %idx3, align 4
+ %load4 = load float, float* %idx4, align 4
%op1 = fadd fast nnan float %load1, 1.0
%op2 = fadd nnan ninf float %load2, 1.0
@@ -192,15 +192,15 @@ define void @nnan(float* %x) {
; CHECK-LABEL: @not_nnan(
; CHECK: fadd <4 x float>
define void @not_nnan(float* %x) {
- %idx1 = getelementptr inbounds float* %x, i64 0
- %idx2 = getelementptr inbounds float* %x, i64 1
- %idx3 = getelementptr inbounds float* %x, i64 2
- %idx4 = getelementptr inbounds float* %x, i64 3
+ %idx1 = getelementptr inbounds float, float* %x, i64 0
+ %idx2 = getelementptr inbounds float, float* %x, i64 1
+ %idx3 = getelementptr inbounds float, float* %x, i64 2
+ %idx4 = getelementptr inbounds float, float* %x, i64 3
- %load1 = load float* %idx1, align 4
- %load2 = load float* %idx2, align 4
- %load3 = load float* %idx3, align 4
- %load4 = load float* %idx4, align 4
+ %load1 = load float, float* %idx1, align 4
+ %load2 = load float, float* %idx2, align 4
+ %load3 = load float, float* %idx3, align 4
+ %load4 = load float, float* %idx4, align 4
%op1 = fadd nnan float %load1, 1.0
%op2 = fadd ninf float %load2, 1.0
@@ -218,15 +218,15 @@ define void @not_nnan(float* %x) {
; CHECK-LABEL: @only_fast(
; CHECK: fadd fast <4 x float>
define void @only_fast(float* %x) {
- %idx1 = getelementptr inbounds float* %x, i64 0
- %idx2 = getelementptr inbounds float* %x, i64 1
- %idx3 = getelementptr inbounds float* %x, i64 2
- %idx4 = getelementptr inbounds float* %x, i64 3
+ %idx1 = getelementptr inbounds float, float* %x, i64 0
+ %idx2 = getelementptr inbounds float, float* %x, i64 1
+ %idx3 = getelementptr inbounds float, float* %x, i64 2
+ %idx4 = getelementptr inbounds float, float* %x, i64 3
- %load1 = load float* %idx1, align 4
- %load2 = load float* %idx2, align 4
- %load3 = load float* %idx3, align 4
- %load4 = load float* %idx4, align 4
+ %load1 = load float, float* %idx1, align 4
+ %load2 = load float, float* %idx2, align 4
+ %load3 = load float, float* %idx3, align 4
+ %load4 = load float, float* %idx4, align 4
%op1 = fadd fast nnan float %load1, 1.0
%op2 = fadd fast nnan ninf float %load2, 1.0
@@ -244,15 +244,15 @@ define void @only_fast(float* %x) {
; CHECK-LABEL: @only_arcp(
; CHECK: fadd arcp <4 x float>
define void @only_arcp(float* %x) {
- %idx1 = getelementptr inbounds float* %x, i64 0
- %idx2 = getelementptr inbounds float* %x, i64 1
- %idx3 = getelementptr inbounds float* %x, i64 2
- %idx4 = getelementptr inbounds float* %x, i64 3
+ %idx1 = getelementptr inbounds float, float* %x, i64 0
+ %idx2 = getelementptr inbounds float, float* %x, i64 1
+ %idx3 = getelementptr inbounds float, float* %x, i64 2
+ %idx4 = getelementptr inbounds float, float* %x, i64 3
- %load1 = load float* %idx1, align 4
- %load2 = load float* %idx2, align 4
- %load3 = load float* %idx3, align 4
- %load4 = load float* %idx4, align 4
+ %load1 = load float, float* %idx1, align 4
+ %load2 = load float, float* %idx2, align 4
+ %load3 = load float, float* %idx3, align 4
+ %load4 = load float, float* %idx4, align 4
%op1 = fadd fast float %load1, 1.0
%op2 = fadd fast float %load2, 1.0
@@ -271,15 +271,15 @@ define void @only_arcp(float* %x) {
; CHECK: add nsw <4 x i32>
; CHECK: sub nsw <4 x i32>
define void @addsub_all_nsw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add nsw i32 %load1, 1
%op2 = sub nsw i32 %load2, 1
@@ -298,15 +298,15 @@ define void @addsub_all_nsw(i32* %x) {
; CHECK: add nsw <4 x i32>
; CHECK: sub <4 x i32>
define void @addsub_some_nsw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add nsw i32 %load1, 1
%op2 = sub nsw i32 %load2, 1
@@ -325,15 +325,15 @@ define void @addsub_some_nsw(i32* %x) {
; CHECK: add <4 x i32>
; CHECK: sub <4 x i32>
define void @addsub_no_nsw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
-
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
+
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add i32 %load1, 1
%op2 = sub nsw i32 %load2, 1
diff --git a/test/Transforms/SLPVectorizer/X86/reduction.ll b/test/Transforms/SLPVectorizer/X86/reduction.ll
index 70b7c3a..4c5f126 100644
--- a/test/Transforms/SLPVectorizer/X86/reduction.ll
+++ b/test/Transforms/SLPVectorizer/X86/reduction.ll
@@ -23,12 +23,12 @@ for.body: ; preds = %entry, %for.body
%i.015 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%sum.014 = phi double [ %add6, %for.body ], [ 0.000000e+00, %entry ]
%mul = shl nsw i32 %i.015, 1
- %arrayidx = getelementptr inbounds double* %A, i32 %mul
- %0 = load double* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds double, double* %A, i32 %mul
+ %0 = load double, double* %arrayidx, align 4
%mul1 = fmul double %0, 7.000000e+00
%add12 = or i32 %mul, 1
- %arrayidx3 = getelementptr inbounds double* %A, i32 %add12
- %1 = load double* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds double, double* %A, i32 %add12
+ %1 = load double, double* %arrayidx3, align 4
%mul4 = fmul double %1, 7.000000e+00
%add5 = fadd double %mul1, %mul4
%add6 = fadd double %sum.014, %add5
diff --git a/test/Transforms/SLPVectorizer/X86/reduction2.ll b/test/Transforms/SLPVectorizer/X86/reduction2.ll
index f21e86c..507a61a 100644
--- a/test/Transforms/SLPVectorizer/X86/reduction2.ll
+++ b/test/Transforms/SLPVectorizer/X86/reduction2.ll
@@ -13,13 +13,13 @@ define double @foo(double* nocapture %D) {
%i.02 = phi i32 [ 0, %0 ], [ %10, %1 ]
%sum.01 = phi double [ 0.000000e+00, %0 ], [ %9, %1 ]
%2 = shl nsw i32 %i.02, 1
- %3 = getelementptr inbounds double* %D, i32 %2
- %4 = load double* %3, align 4
+ %3 = getelementptr inbounds double, double* %D, i32 %2
+ %4 = load double, double* %3, align 4
%A4 = fmul double %4, %4
%A42 = fmul double %A4, %A4
%5 = or i32 %2, 1
- %6 = getelementptr inbounds double* %D, i32 %5
- %7 = load double* %6, align 4
+ %6 = getelementptr inbounds double, double* %D, i32 %5
+ %7 = load double, double* %6, align 4
%A7 = fmul double %7, %7
%A72 = fmul double %A7, %A7
%8 = fadd double %A42, %A72
diff --git a/test/Transforms/SLPVectorizer/X86/return.ll b/test/Transforms/SLPVectorizer/X86/return.ll
index 1a81c23..e2b2637 100644
--- a/test/Transforms/SLPVectorizer/X86/return.ll
+++ b/test/Transforms/SLPVectorizer/X86/return.ll
@@ -13,17 +13,17 @@ target triple = "x86_64--linux-gnu"
; }
; CHECK-LABEL: @return1
-; CHECK: %0 = load <2 x double>*
-; CHECK: %1 = load <2 x double>*
+; CHECK: %0 = load <2 x double>, <2 x double>*
+; CHECK: %1 = load <2 x double>, <2 x double>*
; CHECK: %2 = fadd <2 x double>
define double @return1() {
entry:
- %a0 = load double* getelementptr inbounds ([4 x double]* @a, i32 0, i32 0), align 8
- %b0 = load double* getelementptr inbounds ([4 x double]* @b, i32 0, i32 0), align 8
+ %a0 = load double, double* getelementptr inbounds ([4 x double], [4 x double]* @a, i32 0, i32 0), align 8
+ %b0 = load double, double* getelementptr inbounds ([4 x double], [4 x double]* @b, i32 0, i32 0), align 8
%add0 = fadd double %a0, %b0
- %a1 = load double* getelementptr inbounds ([4 x double]* @a, i32 0, i32 1), align 8
- %b1 = load double* getelementptr inbounds ([4 x double]* @b, i32 0, i32 1), align 8
+ %a1 = load double, double* getelementptr inbounds ([4 x double], [4 x double]* @a, i32 0, i32 1), align 8
+ %b1 = load double, double* getelementptr inbounds ([4 x double], [4 x double]* @b, i32 0, i32 1), align 8
%add1 = fadd double %a1, %b1
%add2 = fadd double %add0, %add1
ret double %add2
@@ -34,20 +34,20 @@ entry:
; }
; CHECK-LABEL: @return2
-; CHECK: %1 = load <2 x double>*
-; CHECK: %3 = load <2 x double>* %2
+; CHECK: %1 = load <2 x double>, <2 x double>*
+; CHECK: %3 = load <2 x double>, <2 x double>* %2
; CHECK: %4 = fadd <2 x double> %1, %3
define double @return2(double* nocapture readonly %x) {
entry:
- %x0 = load double* %x, align 4
- %arrayidx1 = getelementptr inbounds double* %x, i32 2
- %x2 = load double* %arrayidx1, align 4
+ %x0 = load double, double* %x, align 4
+ %arrayidx1 = getelementptr inbounds double, double* %x, i32 2
+ %x2 = load double, double* %arrayidx1, align 4
%add3 = fadd double %x0, %x2
- %arrayidx2 = getelementptr inbounds double* %x, i32 1
- %x1 = load double* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds double* %x, i32 3
- %x3 = load double* %arrayidx3, align 4
+ %arrayidx2 = getelementptr inbounds double, double* %x, i32 1
+ %x1 = load double, double* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds double, double* %x, i32 3
+ %x3 = load double, double* %arrayidx3, align 4
%add4 = fadd double %x1, %x3
%add5 = fadd double %add3, %add4
ret double %add5
diff --git a/test/Transforms/SLPVectorizer/X86/rgb_phi.ll b/test/Transforms/SLPVectorizer/X86/rgb_phi.ll
index 6aea5d3..0bdb7da 100644
--- a/test/Transforms/SLPVectorizer/X86/rgb_phi.ll
+++ b/test/Transforms/SLPVectorizer/X86/rgb_phi.ll
@@ -33,11 +33,11 @@ target triple = "i386-apple-macosx10.9.0"
define float @foo(float* nocapture readonly %A) {
entry:
- %0 = load float* %A, align 4
- %arrayidx1 = getelementptr inbounds float* %A, i64 1
- %1 = load float* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds float* %A, i64 2
- %2 = load float* %arrayidx2, align 4
+ %0 = load float, float* %A, align 4
+ %arrayidx1 = getelementptr inbounds float, float* %A, i64 1
+ %1 = load float, float* %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 2
+ %2 = load float, float* %arrayidx2, align 4
br label %for.body
for.body: ; preds = %for.body.for.body_crit_edge, %entry
@@ -49,13 +49,13 @@ for.body: ; preds = %for.body.for.body_c
%mul = fmul float %3, 7.000000e+00
%add4 = fadd float %R.030, %mul
%4 = add nsw i64 %indvars.iv, 1
- %arrayidx7 = getelementptr inbounds float* %A, i64 %4
- %5 = load float* %arrayidx7, align 4
+ %arrayidx7 = getelementptr inbounds float, float* %A, i64 %4
+ %5 = load float, float* %arrayidx7, align 4
%mul8 = fmul float %5, 8.000000e+00
%add9 = fadd float %G.031, %mul8
%6 = add nsw i64 %indvars.iv, 2
- %arrayidx12 = getelementptr inbounds float* %A, i64 %6
- %7 = load float* %arrayidx12, align 4
+ %arrayidx12 = getelementptr inbounds float, float* %A, i64 %6
+ %7 = load float, float* %arrayidx12, align 4
%mul13 = fmul float %7, 9.000000e+00
%add14 = fadd float %B.032, %mul13
%indvars.iv.next = add i64 %indvars.iv, 3
@@ -64,8 +64,8 @@ for.body: ; preds = %for.body.for.body_c
br i1 %cmp, label %for.body.for.body_crit_edge, label %for.end
for.body.for.body_crit_edge: ; preds = %for.body
- %arrayidx3.phi.trans.insert = getelementptr inbounds float* %A, i64 %indvars.iv.next
- %.pre = load float* %arrayidx3.phi.trans.insert, align 4
+ %arrayidx3.phi.trans.insert = getelementptr inbounds float, float* %A, i64 %indvars.iv.next
+ %.pre = load float, float* %arrayidx3.phi.trans.insert, align 4
br label %for.body
for.end: ; preds = %for.body
diff --git a/test/Transforms/SLPVectorizer/X86/saxpy.ll b/test/Transforms/SLPVectorizer/X86/saxpy.ll
index 4b39d46..a9ca093 100644
--- a/test/Transforms/SLPVectorizer/X86/saxpy.ll
+++ b/test/Transforms/SLPVectorizer/X86/saxpy.ll
@@ -9,35 +9,35 @@ target triple = "x86_64-apple-macosx10.8.0"
;CHECK: ret
define void @SAXPY(i32* noalias nocapture %x, i32* noalias nocapture %y, i32 %a, i64 %i) {
- %1 = getelementptr inbounds i32* %x, i64 %i
- %2 = load i32* %1, align 4
+ %1 = getelementptr inbounds i32, i32* %x, i64 %i
+ %2 = load i32, i32* %1, align 4
%3 = mul nsw i32 %2, %a
- %4 = getelementptr inbounds i32* %y, i64 %i
- %5 = load i32* %4, align 4
+ %4 = getelementptr inbounds i32, i32* %y, i64 %i
+ %5 = load i32, i32* %4, align 4
%6 = add nsw i32 %3, %5
store i32 %6, i32* %1, align 4
%7 = add i64 %i, 1
- %8 = getelementptr inbounds i32* %x, i64 %7
- %9 = load i32* %8, align 4
+ %8 = getelementptr inbounds i32, i32* %x, i64 %7
+ %9 = load i32, i32* %8, align 4
%10 = mul nsw i32 %9, %a
- %11 = getelementptr inbounds i32* %y, i64 %7
- %12 = load i32* %11, align 4
+ %11 = getelementptr inbounds i32, i32* %y, i64 %7
+ %12 = load i32, i32* %11, align 4
%13 = add nsw i32 %10, %12
store i32 %13, i32* %8, align 4
%14 = add i64 %i, 2
- %15 = getelementptr inbounds i32* %x, i64 %14
- %16 = load i32* %15, align 4
+ %15 = getelementptr inbounds i32, i32* %x, i64 %14
+ %16 = load i32, i32* %15, align 4
%17 = mul nsw i32 %16, %a
- %18 = getelementptr inbounds i32* %y, i64 %14
- %19 = load i32* %18, align 4
+ %18 = getelementptr inbounds i32, i32* %y, i64 %14
+ %19 = load i32, i32* %18, align 4
%20 = add nsw i32 %17, %19
store i32 %20, i32* %15, align 4
%21 = add i64 %i, 3
- %22 = getelementptr inbounds i32* %x, i64 %21
- %23 = load i32* %22, align 4
+ %22 = getelementptr inbounds i32, i32* %x, i64 %21
+ %23 = load i32, i32* %22, align 4
%24 = mul nsw i32 %23, %a
- %25 = getelementptr inbounds i32* %y, i64 %21
- %26 = load i32* %25, align 4
+ %25 = getelementptr inbounds i32, i32* %y, i64 %21
+ %26 = load i32, i32* %25, align 4
%27 = add nsw i32 %24, %26
store i32 %27, i32* %22, align 4
ret void
@@ -46,15 +46,15 @@ define void @SAXPY(i32* noalias nocapture %x, i32* noalias nocapture %y, i32 %a,
; Make sure we don't crash on this one.
define void @SAXPY_crash(i32* noalias nocapture %x, i32* noalias nocapture %y, i64 %i) {
%1 = add i64 %i, 1
- %2 = getelementptr inbounds i32* %x, i64 %1
- %3 = getelementptr inbounds i32* %y, i64 %1
- %4 = load i32* %3, align 4
+ %2 = getelementptr inbounds i32, i32* %x, i64 %1
+ %3 = getelementptr inbounds i32, i32* %y, i64 %1
+ %4 = load i32, i32* %3, align 4
%5 = add nsw i32 undef, %4
store i32 %5, i32* %2, align 4
%6 = add i64 %i, 2
- %7 = getelementptr inbounds i32* %x, i64 %6
- %8 = getelementptr inbounds i32* %y, i64 %6
- %9 = load i32* %8, align 4
+ %7 = getelementptr inbounds i32, i32* %x, i64 %6
+ %8 = getelementptr inbounds i32, i32* %y, i64 %6
+ %9 = load i32, i32* %8, align 4
%10 = add nsw i32 undef, %9
store i32 %10, i32* %7, align 4
ret void
diff --git a/test/Transforms/SLPVectorizer/X86/scheduling.ll b/test/Transforms/SLPVectorizer/X86/scheduling.ll
index 3b3bd80..33bdc6a 100644
--- a/test/Transforms/SLPVectorizer/X86/scheduling.ll
+++ b/test/Transforms/SLPVectorizer/X86/scheduling.ll
@@ -24,43 +24,43 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%a.088 = phi i32 [ 0, %entry ], [ %add52, %for.body ]
%1 = shl i64 %indvars.iv, 3
- %arrayidx = getelementptr inbounds i32* %diff, i64 %1
- %2 = load i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, i32* %diff, i64 %1
+ %2 = load i32, i32* %arrayidx, align 4
%3 = or i64 %1, 4
- %arrayidx2 = getelementptr inbounds i32* %diff, i64 %3
- %4 = load i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %diff, i64 %3
+ %4 = load i32, i32* %arrayidx2, align 4
%add3 = add nsw i32 %4, %2
- %arrayidx6 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 0
+ %arrayidx6 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 0
store i32 %add3, i32* %arrayidx6, align 16
%add10 = add nsw i32 %add3, %a.088
%5 = or i64 %1, 1
- %arrayidx13 = getelementptr inbounds i32* %diff, i64 %5
- %6 = load i32* %arrayidx13, align 4
+ %arrayidx13 = getelementptr inbounds i32, i32* %diff, i64 %5
+ %6 = load i32, i32* %arrayidx13, align 4
%7 = or i64 %1, 5
- %arrayidx16 = getelementptr inbounds i32* %diff, i64 %7
- %8 = load i32* %arrayidx16, align 4
+ %arrayidx16 = getelementptr inbounds i32, i32* %diff, i64 %7
+ %8 = load i32, i32* %arrayidx16, align 4
%add17 = add nsw i32 %8, %6
- %arrayidx20 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 1
+ %arrayidx20 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 1
store i32 %add17, i32* %arrayidx20, align 4
%add24 = add nsw i32 %add10, %add17
%9 = or i64 %1, 2
- %arrayidx27 = getelementptr inbounds i32* %diff, i64 %9
- %10 = load i32* %arrayidx27, align 4
+ %arrayidx27 = getelementptr inbounds i32, i32* %diff, i64 %9
+ %10 = load i32, i32* %arrayidx27, align 4
%11 = or i64 %1, 6
- %arrayidx30 = getelementptr inbounds i32* %diff, i64 %11
- %12 = load i32* %arrayidx30, align 4
+ %arrayidx30 = getelementptr inbounds i32, i32* %diff, i64 %11
+ %12 = load i32, i32* %arrayidx30, align 4
%add31 = add nsw i32 %12, %10
- %arrayidx34 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 2
+ %arrayidx34 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 2
store i32 %add31, i32* %arrayidx34, align 8
%add38 = add nsw i32 %add24, %add31
%13 = or i64 %1, 3
- %arrayidx41 = getelementptr inbounds i32* %diff, i64 %13
- %14 = load i32* %arrayidx41, align 4
+ %arrayidx41 = getelementptr inbounds i32, i32* %diff, i64 %13
+ %14 = load i32, i32* %arrayidx41, align 4
%15 = or i64 %1, 7
- %arrayidx44 = getelementptr inbounds i32* %diff, i64 %15
- %16 = load i32* %arrayidx44, align 4
+ %arrayidx44 = getelementptr inbounds i32, i32* %diff, i64 %15
+ %16 = load i32, i32* %arrayidx44, align 4
%add45 = add nsw i32 %16, %14
- %arrayidx48 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 3
+ %arrayidx48 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 3
store i32 %add45, i32* %arrayidx48, align 4
%add52 = add nsw i32 %add38, %add45
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -68,7 +68,7 @@ for.body: ; preds = %for.body, %entry
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body
- %arraydecay = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 0
call void @ff([8 x i32]* %arraydecay) #1
ret i32 %add52
}
diff --git a/test/Transforms/SLPVectorizer/X86/simple-loop.ll b/test/Transforms/SLPVectorizer/X86/simple-loop.ll
index 0111b94..c9bb884 100644
--- a/test/Transforms/SLPVectorizer/X86/simple-loop.ll
+++ b/test/Transforms/SLPVectorizer/X86/simple-loop.ll
@@ -11,18 +11,18 @@ define i32 @rollable(i32* noalias nocapture %in, i32* noalias nocapture %out, i6
.lr.ph: ; preds = %0, %.lr.ph
%i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
%2 = shl i64 %i.019, 2
- %3 = getelementptr inbounds i32* %in, i64 %2
+ %3 = getelementptr inbounds i32, i32* %in, i64 %2
;CHECK:load <4 x i32>
- %4 = load i32* %3, align 4
+ %4 = load i32, i32* %3, align 4
%5 = or i64 %2, 1
- %6 = getelementptr inbounds i32* %in, i64 %5
- %7 = load i32* %6, align 4
+ %6 = getelementptr inbounds i32, i32* %in, i64 %5
+ %7 = load i32, i32* %6, align 4
%8 = or i64 %2, 2
- %9 = getelementptr inbounds i32* %in, i64 %8
- %10 = load i32* %9, align 4
+ %9 = getelementptr inbounds i32, i32* %in, i64 %8
+ %10 = load i32, i32* %9, align 4
%11 = or i64 %2, 3
- %12 = getelementptr inbounds i32* %in, i64 %11
- %13 = load i32* %12, align 4
+ %12 = getelementptr inbounds i32, i32* %in, i64 %11
+ %13 = load i32, i32* %12, align 4
;CHECK:mul <4 x i32>
%14 = mul i32 %4, 7
;CHECK:add <4 x i32>
@@ -33,14 +33,14 @@ define i32 @rollable(i32* noalias nocapture %in, i32* noalias nocapture %out, i6
%19 = add i32 %18, 21
%20 = mul i32 %13, 7
%21 = add i32 %20, 28
- %22 = getelementptr inbounds i32* %out, i64 %2
+ %22 = getelementptr inbounds i32, i32* %out, i64 %2
;CHECK:store <4 x i32>
store i32 %15, i32* %22, align 4
- %23 = getelementptr inbounds i32* %out, i64 %5
+ %23 = getelementptr inbounds i32, i32* %out, i64 %5
store i32 %17, i32* %23, align 4
- %24 = getelementptr inbounds i32* %out, i64 %8
+ %24 = getelementptr inbounds i32, i32* %out, i64 %8
store i32 %19, i32* %24, align 4
- %25 = getelementptr inbounds i32* %out, i64 %11
+ %25 = getelementptr inbounds i32, i32* %out, i64 %11
store i32 %21, i32* %25, align 4
%26 = add i64 %i.019, 1
%exitcond = icmp eq i64 %26, %n
@@ -61,17 +61,17 @@ define i32 @unrollable(i32* %in, i32* %out, i64 %n) nounwind ssp uwtable {
.lr.ph: ; preds = %0, %.lr.ph
%i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
%2 = shl i64 %i.019, 2
- %3 = getelementptr inbounds i32* %in, i64 %2
- %4 = load i32* %3, align 4
+ %3 = getelementptr inbounds i32, i32* %in, i64 %2
+ %4 = load i32, i32* %3, align 4
%5 = or i64 %2, 1
- %6 = getelementptr inbounds i32* %in, i64 %5
- %7 = load i32* %6, align 4
+ %6 = getelementptr inbounds i32, i32* %in, i64 %5
+ %7 = load i32, i32* %6, align 4
%8 = or i64 %2, 2
- %9 = getelementptr inbounds i32* %in, i64 %8
- %10 = load i32* %9, align 4
+ %9 = getelementptr inbounds i32, i32* %in, i64 %8
+ %10 = load i32, i32* %9, align 4
%11 = or i64 %2, 3
- %12 = getelementptr inbounds i32* %in, i64 %11
- %13 = load i32* %12, align 4
+ %12 = getelementptr inbounds i32, i32* %in, i64 %11
+ %13 = load i32, i32* %12, align 4
%14 = mul i32 %4, 7
%15 = add i32 %14, 7
%16 = mul i32 %7, 7
@@ -80,14 +80,14 @@ define i32 @unrollable(i32* %in, i32* %out, i64 %n) nounwind ssp uwtable {
%19 = add i32 %18, 21
%20 = mul i32 %13, 7
%21 = add i32 %20, 28
- %22 = getelementptr inbounds i32* %out, i64 %2
+ %22 = getelementptr inbounds i32, i32* %out, i64 %2
store i32 %15, i32* %22, align 4
- %23 = getelementptr inbounds i32* %out, i64 %5
+ %23 = getelementptr inbounds i32, i32* %out, i64 %5
store i32 %17, i32* %23, align 4
%barrier = call i32 @goo(i32 0) ; <---------------- memory barrier.
- %24 = getelementptr inbounds i32* %out, i64 %8
+ %24 = getelementptr inbounds i32, i32* %out, i64 %8
store i32 %19, i32* %24, align 4
- %25 = getelementptr inbounds i32* %out, i64 %11
+ %25 = getelementptr inbounds i32, i32* %out, i64 %11
store i32 %21, i32* %25, align 4
%26 = add i64 %i.019, 1
%exitcond = icmp eq i64 %26, %n
diff --git a/test/Transforms/SLPVectorizer/X86/simplebb.ll b/test/Transforms/SLPVectorizer/X86/simplebb.ll
index 7d682e5..a5d9ad9 100644
--- a/test/Transforms/SLPVectorizer/X86/simplebb.ll
+++ b/test/Transforms/SLPVectorizer/X86/simplebb.ll
@@ -9,16 +9,16 @@ target triple = "x86_64-apple-macosx10.8.0"
; CHECK: ret
define void @test1(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
}
@@ -29,17 +29,17 @@ entry:
; CHECK: ret
define void @test2(double* %a, double* %b, i8* %e) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%c = bitcast i8* %e to double*
store double %mul, double* %c, align 8
- %carrayidx5 = getelementptr inbounds i8* %e, i64 8
+ %carrayidx5 = getelementptr inbounds i8, i8* %e, i64 8
%arrayidx5 = bitcast i8* %carrayidx5 to double*
store double %mul5, double* %arrayidx5, align 8
ret void
@@ -52,16 +52,16 @@ entry:
; CHECK: ret
define void @test_volatile_load(double* %a, double* %b, double* %c) {
entry:
- %i0 = load volatile double* %a, align 8
- %i1 = load volatile double* %b, align 8
+ %i0 = load volatile double, double* %a, align 8
+ %i1 = load volatile double, double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
}
@@ -72,16 +72,16 @@ entry:
; CHECK: ret
define void @test_volatile_store(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
+ %i3 = load double, double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store volatile double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store volatile double %mul5, double* %arrayidx5, align 8
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/tiny-tree.ll b/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
index 10c3130..6c93222 100644
--- a/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
+++ b/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
@@ -17,14 +17,14 @@ for.body: ; preds = %entry, %for.body
%i.015 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%dst.addr.014 = phi double* [ %add.ptr4, %for.body ], [ %dst, %entry ]
%src.addr.013 = phi double* [ %add.ptr, %for.body ], [ %src, %entry ]
- %0 = load double* %src.addr.013, align 8
+ %0 = load double, double* %src.addr.013, align 8
store double %0, double* %dst.addr.014, align 8
- %arrayidx2 = getelementptr inbounds double* %src.addr.013, i64 1
- %1 = load double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double* %dst.addr.014, i64 1
+ %arrayidx2 = getelementptr inbounds double, double* %src.addr.013, i64 1
+ %1 = load double, double* %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %dst.addr.014, i64 1
store double %1, double* %arrayidx3, align 8
- %add.ptr = getelementptr inbounds double* %src.addr.013, i64 %i.015
- %add.ptr4 = getelementptr inbounds double* %dst.addr.014, i64 %i.015
+ %add.ptr = getelementptr inbounds double, double* %src.addr.013, i64 %i.015
+ %add.ptr4 = getelementptr inbounds double, double* %dst.addr.014, i64 %i.015
%inc = add i64 %i.015, 1
%exitcond = icmp eq i64 %inc, %count
br i1 %exitcond, label %for.end, label %for.body
@@ -47,22 +47,22 @@ for.body: ; preds = %entry, %for.body
%i.023 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%dst.addr.022 = phi float* [ %add.ptr8, %for.body ], [ %dst, %entry ]
%src.addr.021 = phi float* [ %add.ptr, %for.body ], [ %src, %entry ]
- %0 = load float* %src.addr.021, align 4
+ %0 = load float, float* %src.addr.021, align 4
store float %0, float* %dst.addr.022, align 4
- %arrayidx2 = getelementptr inbounds float* %src.addr.021, i64 1
- %1 = load float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds float* %dst.addr.022, i64 1
+ %arrayidx2 = getelementptr inbounds float, float* %src.addr.021, i64 1
+ %1 = load float, float* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds float, float* %dst.addr.022, i64 1
store float %1, float* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds float* %src.addr.021, i64 2
- %2 = load float* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds float* %dst.addr.022, i64 2
+ %arrayidx4 = getelementptr inbounds float, float* %src.addr.021, i64 2
+ %2 = load float, float* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds float, float* %dst.addr.022, i64 2
store float %2, float* %arrayidx5, align 4
- %arrayidx6 = getelementptr inbounds float* %src.addr.021, i64 3
- %3 = load float* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds float* %dst.addr.022, i64 3
+ %arrayidx6 = getelementptr inbounds float, float* %src.addr.021, i64 3
+ %3 = load float, float* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds float, float* %dst.addr.022, i64 3
store float %3, float* %arrayidx7, align 4
- %add.ptr = getelementptr inbounds float* %src.addr.021, i64 %i.023
- %add.ptr8 = getelementptr inbounds float* %dst.addr.022, i64 %i.023
+ %add.ptr = getelementptr inbounds float, float* %src.addr.021, i64 %i.023
+ %add.ptr8 = getelementptr inbounds float, float* %dst.addr.022, i64 %i.023
%inc = add i64 %i.023, 1
%exitcond = icmp eq i64 %inc, %count
br i1 %exitcond, label %for.end, label %for.body
@@ -85,14 +85,14 @@ for.body: ; preds = %entry, %for.body
%i.015 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%dst.addr.014 = phi double* [ %add.ptr4, %for.body ], [ %dst, %entry ]
%src.addr.013 = phi double* [ %add.ptr, %for.body ], [ %src, %entry ]
- %0 = load double* %src.addr.013, align 8
+ %0 = load double, double* %src.addr.013, align 8
store double %0, double* %dst.addr.014, align 8
- %arrayidx2 = getelementptr inbounds double* %src.addr.013, i64 2
- %1 = load double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double* %dst.addr.014, i64 1
+ %arrayidx2 = getelementptr inbounds double, double* %src.addr.013, i64 2
+ %1 = load double, double* %arrayidx2, align 8
+ %arrayidx3 = getelementptr inbounds double, double* %dst.addr.014, i64 1
store double %1, double* %arrayidx3, align 8
- %add.ptr = getelementptr inbounds double* %src.addr.013, i64 %i.015
- %add.ptr4 = getelementptr inbounds double* %dst.addr.014, i64 %i.015
+ %add.ptr = getelementptr inbounds double, double* %src.addr.013, i64 %i.015
+ %add.ptr4 = getelementptr inbounds double, double* %dst.addr.014, i64 %i.015
%inc = add i64 %i.015, 1
%exitcond = icmp eq i64 %inc, %count
br i1 %exitcond, label %for.end, label %for.body
@@ -115,22 +115,22 @@ for.body: ; preds = %entry, %for.body
%i.023 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%dst.addr.022 = phi float* [ %add.ptr8, %for.body ], [ %dst, %entry ]
%src.addr.021 = phi float* [ %add.ptr, %for.body ], [ %src, %entry ]
- %0 = load float* %src.addr.021, align 4
+ %0 = load float, float* %src.addr.021, align 4
store float %0, float* %dst.addr.022, align 4
- %arrayidx2 = getelementptr inbounds float* %src.addr.021, i64 4
- %1 = load float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds float* %dst.addr.022, i64 1
+ %arrayidx2 = getelementptr inbounds float, float* %src.addr.021, i64 4
+ %1 = load float, float* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds float, float* %dst.addr.022, i64 1
store float %1, float* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds float* %src.addr.021, i64 2
- %2 = load float* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds float* %dst.addr.022, i64 2
+ %arrayidx4 = getelementptr inbounds float, float* %src.addr.021, i64 2
+ %2 = load float, float* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds float, float* %dst.addr.022, i64 2
store float %2, float* %arrayidx5, align 4
- %arrayidx6 = getelementptr inbounds float* %src.addr.021, i64 3
- %3 = load float* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds float* %dst.addr.022, i64 3
+ %arrayidx6 = getelementptr inbounds float, float* %src.addr.021, i64 3
+ %3 = load float, float* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds float, float* %dst.addr.022, i64 3
store float %3, float* %arrayidx7, align 4
- %add.ptr = getelementptr inbounds float* %src.addr.021, i64 %i.023
- %add.ptr8 = getelementptr inbounds float* %dst.addr.022, i64 %i.023
+ %add.ptr = getelementptr inbounds float, float* %src.addr.021, i64 %i.023
+ %add.ptr8 = getelementptr inbounds float, float* %dst.addr.022, i64 %i.023
%inc = add i64 %i.023, 1
%exitcond = icmp eq i64 %inc, %count
br i1 %exitcond, label %for.end, label %for.body
@@ -143,13 +143,13 @@ for.end: ; preds = %for.body, %entry
; CHECK-LABEL: store_splat
; CHECK: store <4 x float>
define void @store_splat(float*, float) {
- %3 = getelementptr inbounds float* %0, i64 0
+ %3 = getelementptr inbounds float, float* %0, i64 0
store float %1, float* %3, align 4
- %4 = getelementptr inbounds float* %0, i64 1
+ %4 = getelementptr inbounds float, float* %0, i64 1
store float %1, float* %4, align 4
- %5 = getelementptr inbounds float* %0, i64 2
+ %5 = getelementptr inbounds float, float* %0, i64 2
store float %1, float* %5, align 4
- %6 = getelementptr inbounds float* %0, i64 3
+ %6 = getelementptr inbounds float, float* %0, i64 3
store float %1, float* %6, align 4
ret void
}
diff --git a/test/Transforms/SLPVectorizer/X86/unreachable.ll b/test/Transforms/SLPVectorizer/X86/unreachable.ll
index 8d60957..f29f69d 100644
--- a/test/Transforms/SLPVectorizer/X86/unreachable.ll
+++ b/test/Transforms/SLPVectorizer/X86/unreachable.ll
@@ -11,16 +11,16 @@ entry:
br label %bb2
bb1: ; an unreachable block
- %t3 = getelementptr inbounds i32* %x, i64 4
- %t4 = load i32* %t3, align 4
- %t5 = getelementptr inbounds i32* %x, i64 5
- %t6 = load i32* %t5, align 4
+ %t3 = getelementptr inbounds i32, i32* %x, i64 4
+ %t4 = load i32, i32* %t3, align 4
+ %t5 = getelementptr inbounds i32, i32* %x, i64 5
+ %t6 = load i32, i32* %t5, align 4
%bad = fadd float %bad, 0.000000e+00 ; <- an instruction with self dependency,
; but legal in unreachable code
- %t7 = getelementptr inbounds i32* %x, i64 6
- %t8 = load i32* %t7, align 4
- %t9 = getelementptr inbounds i32* %x, i64 7
- %t10 = load i32* %t9, align 4
+ %t7 = getelementptr inbounds i32, i32* %x, i64 6
+ %t8 = load i32, i32* %t7, align 4
+ %t9 = getelementptr inbounds i32, i32* %x, i64 7
+ %t10 = load i32, i32* %t9, align 4
br label %bb2
bb2:
@@ -29,11 +29,11 @@ bb2:
%t3.0 = phi i32 [ %t8, %bb1 ], [ 2, %entry ]
%t4.0 = phi i32 [ %t10, %bb1 ], [ 2, %entry ]
store i32 %t1.0, i32* %x, align 4
- %t12 = getelementptr inbounds i32* %x, i64 1
+ %t12 = getelementptr inbounds i32, i32* %x, i64 1
store i32 %t2.0, i32* %t12, align 4
- %t13 = getelementptr inbounds i32* %x, i64 2
+ %t13 = getelementptr inbounds i32, i32* %x, i64 2
store i32 %t3.0, i32* %t13, align 4
- %t14 = getelementptr inbounds i32* %x, i64 3
+ %t14 = getelementptr inbounds i32, i32* %x, i64 3
store i32 %t4.0, i32* %t14, align 4
ret void
}