diff options
author | Stephen Hines <srhines@google.com> | 2014-12-01 14:51:49 -0800 |
---|---|---|
committer | Stephen Hines <srhines@google.com> | 2014-12-02 16:08:10 -0800 |
commit | 37ed9c199ca639565f6ce88105f9e39e898d82d0 (patch) | |
tree | 8fb36d3910e3ee4c4e1b7422f4f017108efc52f5 /test/Transforms/SLPVectorizer/X86 | |
parent | d2327b22152ced7bc46dc629fc908959e8a52d03 (diff) | |
download | external_llvm-37ed9c199ca639565f6ce88105f9e39e898d82d0.zip external_llvm-37ed9c199ca639565f6ce88105f9e39e898d82d0.tar.gz external_llvm-37ed9c199ca639565f6ce88105f9e39e898d82d0.tar.bz2 |
Update aosp/master LLVM for rebase to r222494.
Change-Id: Ic787f5e0124df789bd26f3f24680f45e678eef2d
Diffstat (limited to 'test/Transforms/SLPVectorizer/X86')
21 files changed, 859 insertions, 43 deletions
diff --git a/test/Transforms/SLPVectorizer/X86/addsub.ll b/test/Transforms/SLPVectorizer/X86/addsub.ll index 8303bc8..174d400 100644 --- a/test/Transforms/SLPVectorizer/X86/addsub.ll +++ b/test/Transforms/SLPVectorizer/X86/addsub.ll @@ -12,9 +12,9 @@ target triple = "x86_64-unknown-linux-gnu" @fa = common global [4 x float] zeroinitializer, align 16 ; CHECK-LABEL: @addsub -; CHECK: %5 = add <4 x i32> %3, %4 -; CHECK: %6 = add <4 x i32> %2, %5 -; CHECK: %7 = sub <4 x i32> %2, %5 +; CHECK: %5 = add nsw <4 x i32> %3, %4 +; CHECK: %6 = add nsw <4 x i32> %2, %5 +; CHECK: %7 = sub nsw <4 x i32> %2, %5 ; CHECK: %8 = shufflevector <4 x i32> %6, <4 x i32> %7, <4 x i32> <i32 0, i32 5, i32 2, i32 7> ; Function Attrs: nounwind uwtable @@ -56,9 +56,9 @@ entry: } ; CHECK-LABEL: @subadd -; CHECK: %5 = add <4 x i32> %3, %4 -; CHECK: %6 = sub <4 x i32> %2, %5 -; CHECK: %7 = add <4 x i32> %2, %5 +; CHECK: %5 = add nsw <4 x i32> %3, %4 +; CHECK: %6 = sub nsw <4 x i32> %2, %5 +; CHECK: %7 = add nsw <4 x i32> %2, %5 ; CHECK: %8 = shufflevector <4 x i32> %6, <4 x i32> %7, <4 x i32> <i32 0, i32 5, i32 2, i32 7> ; Function Attrs: nounwind uwtable diff --git a/test/Transforms/SLPVectorizer/X86/align.ll b/test/Transforms/SLPVectorizer/X86/align.ll index f586573..ce80620 100644 --- a/test/Transforms/SLPVectorizer/X86/align.ll +++ b/test/Transforms/SLPVectorizer/X86/align.ll @@ -4,7 +4,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 target triple = "x86_64-apple-macosx10.8.0" ; Simple 3-pair chain with loads and stores -; CHECK: test1 +; CHECK-LABEL: @test1 define void @test1(double* %a, double* %b, double* %c) { entry: %agg.tmp.i.i.sroa.0 = alloca [3 x double], align 16 @@ -25,3 +25,31 @@ entry: ; CHECK: ret ret void } + +; Float has 4 byte abi alignment on x86_64. We must use the alignmnet of the +; value being loaded/stored not the alignment of the pointer type. + +; CHECK-LABEL: @test2 +; CHECK-NOT: align 8 +; CHECK: load <4 x float>{{.*}}, align 4 +; CHECK: store <4 x float>{{.*}}, align 4 +; CHECK: ret + +define void @test2(float * %a, float * %b) { +entry: + %l0 = load float* %a + %a1 = getelementptr inbounds float* %a, i64 1 + %l1 = load float* %a1 + %a2 = getelementptr inbounds float* %a, i64 2 + %l2 = load float* %a2 + %a3 = getelementptr inbounds float* %a, i64 3 + %l3 = load float* %a3 + store float %l0, float* %b + %b1 = getelementptr inbounds float* %b, i64 1 + store float %l1, float* %b1 + %b2 = getelementptr inbounds float* %b, i64 2 + store float %l2, float* %b2 + %b3 = getelementptr inbounds float* %b, i64 3 + store float %l3, float* %b3 + ret void +} diff --git a/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll b/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll new file mode 100644 index 0000000..dc99366 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll @@ -0,0 +1,41 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-darwin13.3.0" + +@a = common global double 0.000000e+00, align 8 + +define i32 @fn1() { +entry: + %init = load double* @a, align 8 + br label %loop + +loop: + %phi = phi double [ %add2, %loop ], [ %init, %entry ] + %postadd1_phi = phi double [ %postadd1, %loop ], [ %init, %entry ] + %postadd2_phi = phi double [ %postadd2, %loop ], [ %init, %entry ] + %add1 = fadd double %postadd1_phi, undef + %add2 = fadd double %postadd2_phi, %phi + %mul2 = fmul double %add2, 0.000000e+00 + %binaryop_B = fadd double %postadd1_phi, %mul2 + %mul1 = fmul double %add1, 0.000000e+00 + %tmp = fadd double %postadd2_phi, 0.000000e+00 + + ; tryToVectorize() starts with this binary instruction. + ; At the same time vectorization wraps around the loop, vectorizes + ; postadd1/2 and eventually binary_V and tmp. So binary_V itself is replaced + ; with a vector instruction. + ; The SLPVectorizer crashed because it tried to use binary_V + ; after vectorization to re-arrange instructions. + %binary_V = fadd double %mul1, %binaryop_B + + %postadd1 = fadd double %binary_V, 0.000000e+00 + %postadd2 = fadd double %tmp, 1.000000e+00 + %tobool = fcmp une double %postadd1, 0.000000e+00 + br i1 %tobool, label %exit, label %loop + +exit: + ret i32 1 +} + + diff --git a/test/Transforms/SLPVectorizer/X86/crash_gep.ll b/test/Transforms/SLPVectorizer/X86/crash_gep.ll new file mode 100644 index 0000000..dd4034c --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/crash_gep.ll @@ -0,0 +1,19 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-unknown-linux-gnu + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@a = common global i64* null, align 8 + +; Function Attrs: nounwind uwtable +define i32 @fn1() { +entry: + %0 = load i64** @a, align 8 + %add.ptr = getelementptr inbounds i64* %0, i64 1 + %1 = ptrtoint i64* %add.ptr to i64 + %arrayidx = getelementptr inbounds i64* %0, i64 2 + store i64 %1, i64* %arrayidx, align 8 + %2 = ptrtoint i64* %arrayidx to i64 + store i64 %2, i64* %add.ptr, align 8 + ret i32 undef +} diff --git a/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll b/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll new file mode 100644 index 0000000..dddc1be --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/crash_scheduling.ll @@ -0,0 +1,47 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-darwin13.3.0" + +define void @_foo(double %p1, double %p2, double %p3) #0 { +entry: + %tab1 = alloca [256 x i32], align 16 + %tab2 = alloca [256 x i32], align 16 + br label %bb1 + + +bb1: + %mul19 = fmul double %p1, 1.638400e+04 + %mul20 = fmul double %p3, 1.638400e+04 + %add = fadd double %mul20, 8.192000e+03 + %mul21 = fmul double %p2, 1.638400e+04 + ; The SLPVectorizer crashed when scheduling this block after it inserted an + ; insertelement instruction (during vectorizing the for.body block) at this position. + br label %for.body + +for.body: + %indvars.iv266 = phi i64 [ 0, %bb1 ], [ %indvars.iv.next267, %for.body ] + %t.0259 = phi double [ 0.000000e+00, %bb1 ], [ %add27, %for.body ] + %p3.addr.0258 = phi double [ %add, %bb1 ], [ %add28, %for.body ] + %vecinit.i.i237 = insertelement <2 x double> undef, double %t.0259, i32 0 + %x13 = tail call i32 @_xfn(<2 x double> %vecinit.i.i237) #2 + %arrayidx = getelementptr inbounds [256 x i32]* %tab1, i64 0, i64 %indvars.iv266 + store i32 %x13, i32* %arrayidx, align 4, !tbaa !4 + %vecinit.i.i = insertelement <2 x double> undef, double %p3.addr.0258, i32 0 + %x14 = tail call i32 @_xfn(<2 x double> %vecinit.i.i) #2 + %arrayidx26 = getelementptr inbounds [256 x i32]* %tab2, i64 0, i64 %indvars.iv266 + store i32 %x14, i32* %arrayidx26, align 4, !tbaa !4 + %add27 = fadd double %mul19, %t.0259 + %add28 = fadd double %mul21, %p3.addr.0258 + %indvars.iv.next267 = add nuw nsw i64 %indvars.iv266, 1 + %exitcond = icmp eq i64 %indvars.iv.next267, 256 + br i1 %exitcond, label %return, label %for.body + +return: + ret void +} + +declare i32 @_xfn(<2 x double>) #4 + +!3 = metadata !{metadata !"int", metadata !4, i64 0} +!4 = metadata !{metadata !3, metadata !3, i64 0} diff --git a/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll b/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll index c7ec98a..9f1fb71 100644 --- a/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll +++ b/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll @@ -1,4 +1,4 @@ -; RUN: opt -slp-vectorizer -mtriple=x86_64-apple-macosx10.9.0 -mcpu=corei7-avx -S < %s | FileCheck %s +; RUN: opt -basicaa -slp-vectorizer -mtriple=x86_64-apple-macosx10.9.0 -mcpu=corei7-avx -S < %s | FileCheck %s target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.9.0" diff --git a/test/Transforms/SLPVectorizer/X86/cycle_dup.ll b/test/Transforms/SLPVectorizer/X86/cycle_dup.ll index fba3549..bac2c3c 100644 --- a/test/Transforms/SLPVectorizer/X86/cycle_dup.ll +++ b/test/Transforms/SLPVectorizer/X86/cycle_dup.ll @@ -15,7 +15,7 @@ target triple = "x86_64-apple-macosx10.9.0" ;CHECK: bitcast i32* %A to <4 x i32>* ;CHECK-NEXT: load <4 x i32> ;CHECK: phi <4 x i32> -;CHECK-NEXT: mul <4 x i32> +;CHECK-NEXT: mul nsw <4 x i32> ;CHECK-NOT: mul ;CHECK: phi <4 x i32> ;CHECK: bitcast i32* %A to <4 x i32>* diff --git a/test/Transforms/SLPVectorizer/X86/debug_info.ll b/test/Transforms/SLPVectorizer/X86/debug_info.ll index f4e68f2..1046087 100644 --- a/test/Transforms/SLPVectorizer/X86/debug_info.ll +++ b/test/Transforms/SLPVectorizer/X86/debug_info.ll @@ -23,11 +23,11 @@ target triple = "x86_64-apple-macosx10.7.0" define i32 @depth(double* nocapture %A, i32 %m) #0 { entry: - tail call void @llvm.dbg.value(metadata !{double* %A}, i64 0, metadata !12), !dbg !19 - tail call void @llvm.dbg.value(metadata !{i32 %m}, i64 0, metadata !13), !dbg !19 - tail call void @llvm.dbg.value(metadata !20, i64 0, metadata !14), !dbg !21 - tail call void @llvm.dbg.value(metadata !22, i64 0, metadata !15), !dbg !21 - tail call void @llvm.dbg.value(metadata !2, i64 0, metadata !16), !dbg !23 + tail call void @llvm.dbg.value(metadata !{double* %A}, i64 0, metadata !12, metadata !{}), !dbg !19 + tail call void @llvm.dbg.value(metadata !{i32 %m}, i64 0, metadata !13, metadata !{}), !dbg !19 + tail call void @llvm.dbg.value(metadata !20, i64 0, metadata !14, metadata !{}), !dbg !21 + tail call void @llvm.dbg.value(metadata !22, i64 0, metadata !15, metadata !{}), !dbg !21 + tail call void @llvm.dbg.value(metadata !2, i64 0, metadata !16, metadata !{}), !dbg !23 %cmp8 = icmp sgt i32 %m, 0, !dbg !23 br i1 %cmp8, label %for.body.lr.ph, label %for.end, !dbg !23 @@ -49,7 +49,7 @@ for.end: ; preds = %for.body.lr.ph, %en } ; Function Attrs: nounwind readnone -declare void @llvm.dbg.value(metadata, i64, metadata) #1 +declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1 attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind readnone } @@ -57,24 +57,24 @@ attributes #1 = { nounwind readnone } !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!18, !32} -!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 (trunk 187335) (llvm/trunk 187335:187340M)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/Users/nadav/file.c] [DW_LANG_C99] +!0 = metadata !{metadata !"0x11\0012\00clang version 3.4 (trunk 187335) (llvm/trunk 187335:187340M)\001\00\000\00\000", metadata !1, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2} ; [ DW_TAG_compile_unit ] [/Users/nadav/file.c] [DW_LANG_C99] !1 = metadata !{metadata !"file.c", metadata !"/Users/nadav"} !2 = metadata !{i32 0} !3 = metadata !{metadata !4} -!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"depth", metadata !"depth", metadata !"", i32 1, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (double*, i32)* @depth, null, null, metadata !11, i32 1} ; [ DW_TAG_subprogram ] [line 1] [def] [depth] -!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/Users/nadav/file.c] -!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ] +!4 = metadata !{metadata !"0x2e\00depth\00depth\00\001\000\001\000\006\00256\001\001", metadata !1, metadata !5, metadata !6, null, i32 (double*, i32)* @depth, null, null, metadata !11} ; [ DW_TAG_subprogram ] [line 1] [def] [depth] +!5 = metadata !{metadata !"0x29", metadata !1} ; [ DW_TAG_file_type ] [/Users/nadav/file.c] +!6 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ] !7 = metadata !{metadata !8, metadata !9, metadata !8} -!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed] -!9 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !10} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from double] -!10 = metadata !{i32 786468, null, null, metadata !"double", i32 0, i64 64, i64 64, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ] [double] [line 0, size 64, align 64, offset 0, enc DW_ATE_float] +!8 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed] +!9 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !10} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from double] +!10 = metadata !{metadata !"0x24\00double\000\0064\0064\000\000\004", null, null} ; [ DW_TAG_base_type ] [double] [line 0, size 64, align 64, offset 0, enc DW_ATE_float] !11 = metadata !{metadata !12, metadata !13, metadata !14, metadata !15, metadata !16} -!12 = metadata !{i32 786689, metadata !4, metadata !"A", metadata !5, i32 16777217, metadata !9, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [A] [line 1] -!13 = metadata !{i32 786689, metadata !4, metadata !"m", metadata !5, i32 33554433, metadata !8, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [m] [line 1] -!14 = metadata !{i32 786688, metadata !4, metadata !"y0", metadata !5, i32 2, metadata !10, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [y0] [line 2] -!15 = metadata !{i32 786688, metadata !4, metadata !"y1", metadata !5, i32 2, metadata !10, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [y1] [line 2] -!16 = metadata !{i32 786688, metadata !17, metadata !"i", metadata !5, i32 3, metadata !8, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [i] [line 3] -!17 = metadata !{i32 786443, metadata !1, metadata !4, i32 3, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/Users/nadav/file.c] +!12 = metadata !{metadata !"0x101\00A\0016777217\000", metadata !4, metadata !5, metadata !9} ; [ DW_TAG_arg_variable ] [A] [line 1] +!13 = metadata !{metadata !"0x101\00m\0033554433\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_arg_variable ] [m] [line 1] +!14 = metadata !{metadata !"0x100\00y0\002\000", metadata !4, metadata !5, metadata !10} ; [ DW_TAG_auto_variable ] [y0] [line 2] +!15 = metadata !{metadata !"0x100\00y1\002\000", metadata !4, metadata !5, metadata !10} ; [ DW_TAG_auto_variable ] [y1] [line 2] +!16 = metadata !{metadata !"0x100\00i\003\000", metadata !17, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 3] +!17 = metadata !{metadata !"0xb\003\000\000", metadata !1, metadata !4} ; [ DW_TAG_lexical_block ] [/Users/nadav/file.c] !18 = metadata !{i32 2, metadata !"Dwarf Version", i32 2} !19 = metadata !{i32 1, i32 0, metadata !4, null} !20 = metadata !{double 0.000000e+00} @@ -82,8 +82,8 @@ attributes #1 = { nounwind readnone } !22 = metadata !{double 1.000000e+00} !23 = metadata !{i32 3, i32 0, metadata !17, null} !24 = metadata !{i32 4, i32 0, metadata !25, null} -!25 = metadata !{i32 786443, metadata !1, metadata !17, i32 3, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [/Users/nadav/file.c] +!25 = metadata !{metadata !"0xb\003\000\001", metadata !1, metadata !17} ; [ DW_TAG_lexical_block ] [/Users/nadav/file.c] !29 = metadata !{i32 5, i32 0, metadata !25, null} !30 = metadata !{i32 7, i32 0, metadata !4, null} -!31 = metadata !{i32 8, i32 0, metadata !4, null} ; [ DW_TAG_imported_declaration ] -!32 = metadata !{i32 1, metadata !"Debug Info Version", i32 1} +!31 = metadata !{i32 8, i32 0, metadata !4, null} +!32 = metadata !{i32 1, metadata !"Debug Info Version", i32 2} diff --git a/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll b/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll new file mode 100644 index 0000000..3628042 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll @@ -0,0 +1,70 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=i386-apple-macosx10.9.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" + +@a = common global i64* null, align 8 + +; Function Attrs: nounwind ssp uwtable +define i32 @fn1() { +entry: + %0 = load i64** @a, align 8 + %add.ptr = getelementptr inbounds i64* %0, i64 11 + %1 = ptrtoint i64* %add.ptr to i64 + store i64 %1, i64* %add.ptr, align 8 + %add.ptr1 = getelementptr inbounds i64* %0, i64 56 + %2 = ptrtoint i64* %add.ptr1 to i64 + %arrayidx2 = getelementptr inbounds i64* %0, i64 12 + store i64 %2, i64* %arrayidx2, align 8 + ret i32 undef +; CHECK-LABEL: @fn1( +; CHECK: extractelement <2 x i64*> +; CHECK: ret +} + + +declare float @llvm.powi.f32(float, i32) +define void @fn2(i32* %a, i32* %b, float* %c) { +entry: + %i0 = load i32* %a, align 4 + %i1 = load i32* %b, align 4 + %add1 = add i32 %i0, %i1 + %fp1 = sitofp i32 %add1 to float + %call1 = tail call float @llvm.powi.f32(float %fp1,i32 %add1) nounwind readnone + + %arrayidx2 = getelementptr inbounds i32* %a, i32 1 + %i2 = load i32* %arrayidx2, align 4 + %arrayidx3 = getelementptr inbounds i32* %b, i32 1 + %i3 = load i32* %arrayidx3, align 4 + %add2 = add i32 %i2, %i3 + %fp2 = sitofp i32 %add2 to float + %call2 = tail call float @llvm.powi.f32(float %fp2,i32 %add1) nounwind readnone + + %arrayidx4 = getelementptr inbounds i32* %a, i32 2 + %i4 = load i32* %arrayidx4, align 4 + %arrayidx5 = getelementptr inbounds i32* %b, i32 2 + %i5 = load i32* %arrayidx5, align 4 + %add3 = add i32 %i4, %i5 + %fp3 = sitofp i32 %add3 to float + %call3 = tail call float @llvm.powi.f32(float %fp3,i32 %add1) nounwind readnone + + %arrayidx6 = getelementptr inbounds i32* %a, i32 3 + %i6 = load i32* %arrayidx6, align 4 + %arrayidx7 = getelementptr inbounds i32* %b, i32 3 + %i7 = load i32* %arrayidx7, align 4 + %add4 = add i32 %i6, %i7 + %fp4 = sitofp i32 %add4 to float + %call4 = tail call float @llvm.powi.f32(float %fp4,i32 %add1) nounwind readnone + + store float %call1, float* %c, align 4 + %arrayidx8 = getelementptr inbounds float* %c, i32 1 + store float %call2, float* %arrayidx8, align 4 + %arrayidx9 = getelementptr inbounds float* %c, i32 2 + store float %call3, float* %arrayidx9, align 4 + %arrayidx10 = getelementptr inbounds float* %c, i32 3 + store float %call4, float* %arrayidx10, align 4 + ret void + +; CHECK-LABEL: @fn2( +; CHECK: extractelement <4 x i32> +; CHECK: ret +} diff --git a/test/Transforms/SLPVectorizer/X86/hoist.ll b/test/Transforms/SLPVectorizer/X86/hoist.ll index 5074cea..78c58f1 100644 --- a/test/Transforms/SLPVectorizer/X86/hoist.ll +++ b/test/Transforms/SLPVectorizer/X86/hoist.ll @@ -21,7 +21,7 @@ target triple = "i386-apple-macosx10.9.0" ; loop body: ;CHECK: phi ;CHECK: load <4 x i32> -;CHECK: add <4 x i32> +;CHECK: add nsw <4 x i32> ;CHECK: store <4 x i32> ;CHECK: ret define i32 @foo(i32* nocapture %A, i32 %n, i32 %k) { diff --git a/test/Transforms/SLPVectorizer/X86/horizontal.ll b/test/Transforms/SLPVectorizer/X86/horizontal.ll index 8f91951..1836047 100644 --- a/test/Transforms/SLPVectorizer/X86/horizontal.ll +++ b/test/Transforms/SLPVectorizer/X86/horizontal.ll @@ -148,7 +148,7 @@ for.end: ; } ; CHECK-LABEL: long_red -; CHECK: fmul <4 x float> +; CHECK: fmul fast <4 x float> ; CHECK: shufflevector <4 x float> define i32 @long_red(float* noalias %A, float* noalias %B, i32 %n) { @@ -250,7 +250,7 @@ for.end: ; } ; CHECK-LABEL: chain_red -; CHECK: fmul <4 x float> +; CHECK: fmul fast <4 x float> ; CHECK: shufflevector <4 x float> define i32 @chain_red(float* noalias %A, float* noalias %B, i32 %n) { @@ -317,7 +317,7 @@ for.end: ; } ; CHECK-LABEL: store_red -; CHECK: fmul <4 x float> +; CHECK: fmul fast <4 x float> ; CHECK: shufflevector <4 x float> define i32 @store_red(float* noalias %A, float* noalias %B, float* noalias %C, i32 %n) { @@ -379,7 +379,7 @@ for.end: ; } ; STORE-LABEL: store_red_double -; STORE: fmul <2 x double> +; STORE: fmul fast <2 x double> ; STORE: extractelement <2 x double> ; STORE: extractelement <2 x double> diff --git a/test/Transforms/SLPVectorizer/X86/in-tree-user.ll b/test/Transforms/SLPVectorizer/X86/in-tree-user.ll index 3115232..194a0fb 100644 --- a/test/Transforms/SLPVectorizer/X86/in-tree-user.ll +++ b/test/Transforms/SLPVectorizer/X86/in-tree-user.ll @@ -5,9 +5,11 @@ target triple = "x86_64-apple-macosx10.7.0" @.str = private unnamed_addr constant [6 x i8] c"bingo\00", align 1 -; We can't vectorize when the roots are used inside the tree. +; Uses inside the tree must be scheduled after the corresponding tree bundle. ;CHECK-LABEL: @in_tree_user( -;CHECK-NOT: load <2 x double> +;CHECK: load <2 x double> +;CHECK: fadd <2 x double> +;CHECK: InTreeUser = fadd ;CHECK: ret define void @in_tree_user(double* nocapture %A, i32 %n) { entry: @@ -22,7 +24,7 @@ for.body: ; preds = %for.inc, %entry %mul1 = fmul double %conv, %1 %mul2 = fmul double %mul1, 7.000000e+00 %add = fadd double %mul2, 5.000000e+00 - %BadValue = fadd double %add, %add ; <------------------ In tree user. + %InTreeUser = fadd double %add, %add ; <------------------ In tree user. %2 = or i64 %0, 1 %arrayidx6 = getelementptr inbounds double* %A, i64 %2 %3 = load double* %arrayidx6, align 8 @@ -43,6 +45,7 @@ for.inc: ; preds = %for.body, %if.then br i1 %exitcond, label %for.end, label %for.body for.end: ; preds = %for.inc + store double %InTreeUser, double* %A, align 8 ; Avoid dead code elimination of the InTreeUser. ret void } diff --git a/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll b/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll index 9eda29f..0221613 100644 --- a/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll +++ b/test/Transforms/SLPVectorizer/X86/insert-element-build-vector.ll @@ -35,6 +35,49 @@ define <4 x float> @simple_select(<4 x float> %a, <4 x float> %b, <4 x i32> %c) ret <4 x float> %rd } +declare void @llvm.assume(i1) nounwind + +; This entire tree is ephemeral, don't vectorize any of it. +define <4 x float> @simple_select_eph(<4 x float> %a, <4 x float> %b, <4 x i32> %c) #0 { +; CHECK-LABEL: @simple_select_eph( +; CHECK-NOT: icmp ne <4 x i32> +; CHECK-NOT: select <4 x i1> + %c0 = extractelement <4 x i32> %c, i32 0 + %c1 = extractelement <4 x i32> %c, i32 1 + %c2 = extractelement <4 x i32> %c, i32 2 + %c3 = extractelement <4 x i32> %c, i32 3 + %a0 = extractelement <4 x float> %a, i32 0 + %a1 = extractelement <4 x float> %a, i32 1 + %a2 = extractelement <4 x float> %a, i32 2 + %a3 = extractelement <4 x float> %a, i32 3 + %b0 = extractelement <4 x float> %b, i32 0 + %b1 = extractelement <4 x float> %b, i32 1 + %b2 = extractelement <4 x float> %b, i32 2 + %b3 = extractelement <4 x float> %b, i32 3 + %cmp0 = icmp ne i32 %c0, 0 + %cmp1 = icmp ne i32 %c1, 0 + %cmp2 = icmp ne i32 %c2, 0 + %cmp3 = icmp ne i32 %c3, 0 + %s0 = select i1 %cmp0, float %a0, float %b0 + %s1 = select i1 %cmp1, float %a1, float %b1 + %s2 = select i1 %cmp2, float %a2, float %b2 + %s3 = select i1 %cmp3, float %a3, float %b3 + %ra = insertelement <4 x float> undef, float %s0, i32 0 + %rb = insertelement <4 x float> %ra, float %s1, i32 1 + %rc = insertelement <4 x float> %rb, float %s2, i32 2 + %rd = insertelement <4 x float> %rc, float %s3, i32 3 + %q0 = extractelement <4 x float> %rd, i32 0 + %q1 = extractelement <4 x float> %rd, i32 1 + %q2 = extractelement <4 x float> %rd, i32 2 + %q3 = extractelement <4 x float> %rd, i32 3 + %q4 = fadd float %q0, %q1 + %q5 = fadd float %q2, %q3 + %q6 = fadd float %q4, %q5 + %qi = fcmp olt float %q6, %q5 + call void @llvm.assume(i1 %qi) + ret <4 x float> undef +} + ; Insert in an order different from the vector indices to make sure it ; doesn't matter define <4 x float> @simple_select_insert_out_of_order(<4 x float> %a, <4 x float> %b, <4 x i32> %c) #0 { diff --git a/test/Transforms/SLPVectorizer/X86/loopinvariant.ll b/test/Transforms/SLPVectorizer/X86/loopinvariant.ll index aef2479..bc12926 100644 --- a/test/Transforms/SLPVectorizer/X86/loopinvariant.ll +++ b/test/Transforms/SLPVectorizer/X86/loopinvariant.ll @@ -5,10 +5,10 @@ target triple = "x86_64-apple-macosx10.8.0" ;CHECK-LABEL: @foo( ;CHECK: load <4 x i32> -;CHECK: add <4 x i32> +;CHECK: add nsw <4 x i32> ;CHECK: store <4 x i32> ;CHECK: load <4 x i32> -;CHECK: add <4 x i32> +;CHECK: add nsw <4 x i32> ;CHECK: store <4 x i32> ;CHECK: ret define i32 @foo(i32* nocapture %A, i32 %n) #0 { diff --git a/test/Transforms/SLPVectorizer/X86/multi_user.ll b/test/Transforms/SLPVectorizer/X86/multi_user.ll index cab9994..63a77e4 100644 --- a/test/Transforms/SLPVectorizer/X86/multi_user.ll +++ b/test/Transforms/SLPVectorizer/X86/multi_user.ll @@ -14,7 +14,7 @@ target triple = "x86_64-apple-macosx10.7.0" ;CHECK-LABEL: @foo( ;CHECK: insertelement <4 x i32> ;CHECK: load <4 x i32> -;CHECK: add <4 x i32> +;CHECK: add nsw <4 x i32> ;CHECK: store <4 x i32> ;CHECK: ret define i32 @foo(i32* nocapture %A, i32 %n) { diff --git a/test/Transforms/SLPVectorizer/X86/powof2div.ll b/test/Transforms/SLPVectorizer/X86/powof2div.ll new file mode 100644 index 0000000..7aa1efd --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/powof2div.ll @@ -0,0 +1,43 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +;CHECK-LABEL: @powof2div( +;CHECK: load <4 x i32>* +;CHECK: add nsw <4 x i32> +;CHECK: sdiv <4 x i32> +define void @powof2div(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* noalias nocapture readonly %c){ +entry: + %0 = load i32* %b, align 4 + %1 = load i32* %c, align 4 + %add = add nsw i32 %1, %0 + %div = sdiv i32 %add, 2 + store i32 %div, i32* %a, align 4 + %arrayidx3 = getelementptr inbounds i32* %b, i64 1 + %2 = load i32* %arrayidx3, align 4 + %arrayidx4 = getelementptr inbounds i32* %c, i64 1 + %3 = load i32* %arrayidx4, align 4 + %add5 = add nsw i32 %3, %2 + %div6 = sdiv i32 %add5, 2 + %arrayidx7 = getelementptr inbounds i32* %a, i64 1 + store i32 %div6, i32* %arrayidx7, align 4 + %arrayidx8 = getelementptr inbounds i32* %b, i64 2 + %4 = load i32* %arrayidx8, align 4 + %arrayidx9 = getelementptr inbounds i32* %c, i64 2 + %5 = load i32* %arrayidx9, align 4 + %add10 = add nsw i32 %5, %4 + %div11 = sdiv i32 %add10, 2 + %arrayidx12 = getelementptr inbounds i32* %a, i64 2 + store i32 %div11, i32* %arrayidx12, align 4 + %arrayidx13 = getelementptr inbounds i32* %b, i64 3 + %6 = load i32* %arrayidx13, align 4 + %arrayidx14 = getelementptr inbounds i32* %c, i64 3 + %7 = load i32* %arrayidx14, align 4 + %add15 = add nsw i32 %7, %6 + %div16 = sdiv i32 %add15, 2 + %arrayidx17 = getelementptr inbounds i32* %a, i64 3 + store i32 %div16, i32* %arrayidx17, align 4 + ret void +} + diff --git a/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll b/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll new file mode 100644 index 0000000..3843ef7 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll @@ -0,0 +1,350 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -S | FileCheck %s + +; Check propagation of optional IR flags (PR20802). For a flag to +; propagate from scalar instructions to their vector replacement, +; *all* scalar instructions must have the flag. + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-unknown" + +; CHECK-LABEL: @exact( +; CHECK: lshr exact <4 x i32> +define void @exact(i32* %x) { + %idx1 = getelementptr inbounds i32* %x, i64 0 + %idx2 = getelementptr inbounds i32* %x, i64 1 + %idx3 = getelementptr inbounds i32* %x, i64 2 + %idx4 = getelementptr inbounds i32* %x, i64 3 + + %load1 = load i32* %idx1, align 4 + %load2 = load i32* %idx2, align 4 + %load3 = load i32* %idx3, align 4 + %load4 = load i32* %idx4, align 4 + + %op1 = lshr exact i32 %load1, 1 + %op2 = lshr exact i32 %load2, 1 + %op3 = lshr exact i32 %load3, 1 + %op4 = lshr exact i32 %load4, 1 + + store i32 %op1, i32* %idx1, align 4 + store i32 %op2, i32* %idx2, align 4 + store i32 %op3, i32* %idx3, align 4 + store i32 %op4, i32* %idx4, align 4 + + ret void +} + +; CHECK-LABEL: @not_exact( +; CHECK: lshr <4 x i32> +define void @not_exact(i32* %x) { + %idx1 = getelementptr inbounds i32* %x, i64 0 + %idx2 = getelementptr inbounds i32* %x, i64 1 + %idx3 = getelementptr inbounds i32* %x, i64 2 + %idx4 = getelementptr inbounds i32* %x, i64 3 + + %load1 = load i32* %idx1, align 4 + %load2 = load i32* %idx2, align 4 + %load3 = load i32* %idx3, align 4 + %load4 = load i32* %idx4, align 4 + + %op1 = lshr exact i32 %load1, 1 + %op2 = lshr i32 %load2, 1 + %op3 = lshr exact i32 %load3, 1 + %op4 = lshr exact i32 %load4, 1 + + store i32 %op1, i32* %idx1, align 4 + store i32 %op2, i32* %idx2, align 4 + store i32 %op3, i32* %idx3, align 4 + store i32 %op4, i32* %idx4, align 4 + + ret void +} + +; CHECK-LABEL: @nsw( +; CHECK: add nsw <4 x i32> +define void @nsw(i32* %x) { + %idx1 = getelementptr inbounds i32* %x, i64 0 + %idx2 = getelementptr inbounds i32* %x, i64 1 + %idx3 = getelementptr inbounds i32* %x, i64 2 + %idx4 = getelementptr inbounds i32* %x, i64 3 + + %load1 = load i32* %idx1, align 4 + %load2 = load i32* %idx2, align 4 + %load3 = load i32* %idx3, align 4 + %load4 = load i32* %idx4, align 4 + + %op1 = add nsw i32 %load1, 1 + %op2 = add nsw i32 %load2, 1 + %op3 = add nsw i32 %load3, 1 + %op4 = add nsw i32 %load4, 1 + + store i32 %op1, i32* %idx1, align 4 + store i32 %op2, i32* %idx2, align 4 + store i32 %op3, i32* %idx3, align 4 + store i32 %op4, i32* %idx4, align 4 + + ret void +} + +; CHECK-LABEL: @not_nsw( +; CHECK: add <4 x i32> +define void @not_nsw(i32* %x) { + %idx1 = getelementptr inbounds i32* %x, i64 0 + %idx2 = getelementptr inbounds i32* %x, i64 1 + %idx3 = getelementptr inbounds i32* %x, i64 2 + %idx4 = getelementptr inbounds i32* %x, i64 3 + + %load1 = load i32* %idx1, align 4 + %load2 = load i32* %idx2, align 4 + %load3 = load i32* %idx3, align 4 + %load4 = load i32* %idx4, align 4 + + %op1 = add nsw i32 %load1, 1 + %op2 = add nsw i32 %load2, 1 + %op3 = add nsw i32 %load3, 1 + %op4 = add i32 %load4, 1 + + store i32 %op1, i32* %idx1, align 4 + store i32 %op2, i32* %idx2, align 4 + store i32 %op3, i32* %idx3, align 4 + store i32 %op4, i32* %idx4, align 4 + + ret void +} + +; CHECK-LABEL: @nuw( +; CHECK: add nuw <4 x i32> +define void @nuw(i32* %x) { + %idx1 = getelementptr inbounds i32* %x, i64 0 + %idx2 = getelementptr inbounds i32* %x, i64 1 + %idx3 = getelementptr inbounds i32* %x, i64 2 + %idx4 = getelementptr inbounds i32* %x, i64 3 + + %load1 = load i32* %idx1, align 4 + %load2 = load i32* %idx2, align 4 + %load3 = load i32* %idx3, align 4 + %load4 = load i32* %idx4, align 4 + + %op1 = add nuw i32 %load1, 1 + %op2 = add nuw i32 %load2, 1 + %op3 = add nuw i32 %load3, 1 + %op4 = add nuw i32 %load4, 1 + + store i32 %op1, i32* %idx1, align 4 + store i32 %op2, i32* %idx2, align 4 + store i32 %op3, i32* %idx3, align 4 + store i32 %op4, i32* %idx4, align 4 + + ret void +} + +; CHECK-LABEL: @not_nuw( +; CHECK: add <4 x i32> +define void @not_nuw(i32* %x) { + %idx1 = getelementptr inbounds i32* %x, i64 0 + %idx2 = getelementptr inbounds i32* %x, i64 1 + %idx3 = getelementptr inbounds i32* %x, i64 2 + %idx4 = getelementptr inbounds i32* %x, i64 3 + + %load1 = load i32* %idx1, align 4 + %load2 = load i32* %idx2, align 4 + %load3 = load i32* %idx3, align 4 + %load4 = load i32* %idx4, align 4 + + %op1 = add nuw i32 %load1, 1 + %op2 = add i32 %load2, 1 + %op3 = add i32 %load3, 1 + %op4 = add nuw i32 %load4, 1 + + store i32 %op1, i32* %idx1, align 4 + store i32 %op2, i32* %idx2, align 4 + store i32 %op3, i32* %idx3, align 4 + store i32 %op4, i32* %idx4, align 4 + + ret void +} + +; CHECK-LABEL: @nnan( +; CHECK: fadd nnan <4 x float> +define void @nnan(float* %x) { + %idx1 = getelementptr inbounds float* %x, i64 0 + %idx2 = getelementptr inbounds float* %x, i64 1 + %idx3 = getelementptr inbounds float* %x, i64 2 + %idx4 = getelementptr inbounds float* %x, i64 3 + + %load1 = load float* %idx1, align 4 + %load2 = load float* %idx2, align 4 + %load3 = load float* %idx3, align 4 + %load4 = load float* %idx4, align 4 + + %op1 = fadd fast nnan float %load1, 1.0 + %op2 = fadd nnan ninf float %load2, 1.0 + %op3 = fadd nsz nnan float %load3, 1.0 + %op4 = fadd arcp nnan float %load4, 1.0 + + store float %op1, float* %idx1, align 4 + store float %op2, float* %idx2, align 4 + store float %op3, float* %idx3, align 4 + store float %op4, float* %idx4, align 4 + + ret void +} + +; CHECK-LABEL: @not_nnan( +; CHECK: fadd <4 x float> +define void @not_nnan(float* %x) { + %idx1 = getelementptr inbounds float* %x, i64 0 + %idx2 = getelementptr inbounds float* %x, i64 1 + %idx3 = getelementptr inbounds float* %x, i64 2 + %idx4 = getelementptr inbounds float* %x, i64 3 + + %load1 = load float* %idx1, align 4 + %load2 = load float* %idx2, align 4 + %load3 = load float* %idx3, align 4 + %load4 = load float* %idx4, align 4 + + %op1 = fadd nnan float %load1, 1.0 + %op2 = fadd ninf float %load2, 1.0 + %op3 = fadd nsz float %load3, 1.0 + %op4 = fadd arcp float %load4, 1.0 + + store float %op1, float* %idx1, align 4 + store float %op2, float* %idx2, align 4 + store float %op3, float* %idx3, align 4 + store float %op4, float* %idx4, align 4 + + ret void +} + +; CHECK-LABEL: @only_fast( +; CHECK: fadd fast <4 x float> +define void @only_fast(float* %x) { + %idx1 = getelementptr inbounds float* %x, i64 0 + %idx2 = getelementptr inbounds float* %x, i64 1 + %idx3 = getelementptr inbounds float* %x, i64 2 + %idx4 = getelementptr inbounds float* %x, i64 3 + + %load1 = load float* %idx1, align 4 + %load2 = load float* %idx2, align 4 + %load3 = load float* %idx3, align 4 + %load4 = load float* %idx4, align 4 + + %op1 = fadd fast nnan float %load1, 1.0 + %op2 = fadd fast nnan ninf float %load2, 1.0 + %op3 = fadd fast nsz nnan float %load3, 1.0 + %op4 = fadd arcp nnan fast float %load4, 1.0 + + store float %op1, float* %idx1, align 4 + store float %op2, float* %idx2, align 4 + store float %op3, float* %idx3, align 4 + store float %op4, float* %idx4, align 4 + + ret void +} + +; CHECK-LABEL: @only_arcp( +; CHECK: fadd arcp <4 x float> +define void @only_arcp(float* %x) { + %idx1 = getelementptr inbounds float* %x, i64 0 + %idx2 = getelementptr inbounds float* %x, i64 1 + %idx3 = getelementptr inbounds float* %x, i64 2 + %idx4 = getelementptr inbounds float* %x, i64 3 + + %load1 = load float* %idx1, align 4 + %load2 = load float* %idx2, align 4 + %load3 = load float* %idx3, align 4 + %load4 = load float* %idx4, align 4 + + %op1 = fadd fast float %load1, 1.0 + %op2 = fadd fast float %load2, 1.0 + %op3 = fadd fast float %load3, 1.0 + %op4 = fadd arcp float %load4, 1.0 + + store float %op1, float* %idx1, align 4 + store float %op2, float* %idx2, align 4 + store float %op3, float* %idx3, align 4 + store float %op4, float* %idx4, align 4 + + ret void +} + +; CHECK-LABEL: @addsub_all_nsw +; CHECK: add nsw <4 x i32> +; CHECK: sub nsw <4 x i32> +define void @addsub_all_nsw(i32* %x) { + %idx1 = getelementptr inbounds i32* %x, i64 0 + %idx2 = getelementptr inbounds i32* %x, i64 1 + %idx3 = getelementptr inbounds i32* %x, i64 2 + %idx4 = getelementptr inbounds i32* %x, i64 3 + + %load1 = load i32* %idx1, align 4 + %load2 = load i32* %idx2, align 4 + %load3 = load i32* %idx3, align 4 + %load4 = load i32* %idx4, align 4 + + %op1 = add nsw i32 %load1, 1 + %op2 = sub nsw i32 %load2, 1 + %op3 = add nsw i32 %load3, 1 + %op4 = sub nsw i32 %load4, 1 + + store i32 %op1, i32* %idx1, align 4 + store i32 %op2, i32* %idx2, align 4 + store i32 %op3, i32* %idx3, align 4 + store i32 %op4, i32* %idx4, align 4 + + ret void +} + +; CHECK-LABEL: @addsub_some_nsw +; CHECK: add nsw <4 x i32> +; CHECK: sub <4 x i32> +define void @addsub_some_nsw(i32* %x) { + %idx1 = getelementptr inbounds i32* %x, i64 0 + %idx2 = getelementptr inbounds i32* %x, i64 1 + %idx3 = getelementptr inbounds i32* %x, i64 2 + %idx4 = getelementptr inbounds i32* %x, i64 3 + + %load1 = load i32* %idx1, align 4 + %load2 = load i32* %idx2, align 4 + %load3 = load i32* %idx3, align 4 + %load4 = load i32* %idx4, align 4 + + %op1 = add nsw i32 %load1, 1 + %op2 = sub nsw i32 %load2, 1 + %op3 = add nsw i32 %load3, 1 + %op4 = sub i32 %load4, 1 + + store i32 %op1, i32* %idx1, align 4 + store i32 %op2, i32* %idx2, align 4 + store i32 %op3, i32* %idx3, align 4 + store i32 %op4, i32* %idx4, align 4 + + ret void +} + +; CHECK-LABEL: @addsub_no_nsw +; CHECK: add <4 x i32> +; CHECK: sub <4 x i32> +define void @addsub_no_nsw(i32* %x) { + %idx1 = getelementptr inbounds i32* %x, i64 0 + %idx2 = getelementptr inbounds i32* %x, i64 1 + %idx3 = getelementptr inbounds i32* %x, i64 2 + %idx4 = getelementptr inbounds i32* %x, i64 3 + + %load1 = load i32* %idx1, align 4 + %load2 = load i32* %idx2, align 4 + %load3 = load i32* %idx3, align 4 + %load4 = load i32* %idx4, align 4 + + %op1 = add i32 %load1, 1 + %op2 = sub nsw i32 %load2, 1 + %op3 = add nsw i32 %load3, 1 + %op4 = sub i32 %load4, 1 + + store i32 %op1, i32* %idx1, align 4 + store i32 %op2, i32* %idx2, align 4 + store i32 %op3, i32* %idx3, align 4 + store i32 %op4, i32* %idx4, align 4 + + ret void +} + diff --git a/test/Transforms/SLPVectorizer/X86/return.ll b/test/Transforms/SLPVectorizer/X86/return.ll new file mode 100644 index 0000000..1a81c23 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/return.ll @@ -0,0 +1,54 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -S | FileCheck %s +target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" +target triple = "x86_64--linux-gnu" + +@a = common global [4 x double] zeroinitializer, align 8 +@b = common global [4 x double] zeroinitializer, align 8 + +; [4], b[4]; +; double foo() { +; double sum =0; +; sum = (a[0]+b[0]) + (a[1]+b[1]); +; return sum; +; } + +; CHECK-LABEL: @return1 +; CHECK: %0 = load <2 x double>* +; CHECK: %1 = load <2 x double>* +; CHECK: %2 = fadd <2 x double> + +define double @return1() { +entry: + %a0 = load double* getelementptr inbounds ([4 x double]* @a, i32 0, i32 0), align 8 + %b0 = load double* getelementptr inbounds ([4 x double]* @b, i32 0, i32 0), align 8 + %add0 = fadd double %a0, %b0 + %a1 = load double* getelementptr inbounds ([4 x double]* @a, i32 0, i32 1), align 8 + %b1 = load double* getelementptr inbounds ([4 x double]* @b, i32 0, i32 1), align 8 + %add1 = fadd double %a1, %b1 + %add2 = fadd double %add0, %add1 + ret double %add2 +} + +; double hadd(double *x) { +; return ((x[0] + x[2]) + (x[1] + x[3])); +; } + +; CHECK-LABEL: @return2 +; CHECK: %1 = load <2 x double>* +; CHECK: %3 = load <2 x double>* %2 +; CHECK: %4 = fadd <2 x double> %1, %3 + +define double @return2(double* nocapture readonly %x) { +entry: + %x0 = load double* %x, align 4 + %arrayidx1 = getelementptr inbounds double* %x, i32 2 + %x2 = load double* %arrayidx1, align 4 + %add3 = fadd double %x0, %x2 + %arrayidx2 = getelementptr inbounds double* %x, i32 1 + %x1 = load double* %arrayidx2, align 4 + %arrayidx3 = getelementptr inbounds double* %x, i32 3 + %x3 = load double* %arrayidx3, align 4 + %add4 = fadd double %x1, %x3 + %add5 = fadd double %add3, %add4 + ret double %add5 +} diff --git a/test/Transforms/SLPVectorizer/X86/saxpy.ll b/test/Transforms/SLPVectorizer/X86/saxpy.ll index 4626341..4b39d46 100644 --- a/test/Transforms/SLPVectorizer/X86/saxpy.ll +++ b/test/Transforms/SLPVectorizer/X86/saxpy.ll @@ -5,7 +5,7 @@ target triple = "x86_64-apple-macosx10.8.0" ; SLP vectorization example from http://cs.stanford.edu/people/eschkufz/research/asplos291-schkufza.pdf ;CHECK: SAXPY -;CHECK: mul <4 x i32> +;CHECK: mul nsw <4 x i32> ;CHECK: ret define void @SAXPY(i32* noalias nocapture %x, i32* noalias nocapture %y, i32 %a, i64 %i) { diff --git a/test/Transforms/SLPVectorizer/X86/scheduling.ll b/test/Transforms/SLPVectorizer/X86/scheduling.ll new file mode 100644 index 0000000..3b3bd80 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/scheduling.ll @@ -0,0 +1,78 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.9.0" + +;CHECK-LABEL: @foo +;CHECK: load <4 x i32> +;CHECK: load <4 x i32> +;CHECK: %[[S1:.+]] = add nsw <4 x i32> +;CHECK-DAG: store <4 x i32> %[[S1]] +;CHECK-DAG: %[[A1:.+]] = add nsw i32 +;CHECK-DAG: %[[A2:.+]] = add nsw i32 %[[A1]] +;CHECK-DAG: %[[A3:.+]] = add nsw i32 %[[A2]] +;CHECK-DAG: %[[A4:.+]] = add nsw i32 %[[A3]] +;CHECK: ret i32 %[[A4]] + +define i32 @foo(i32* nocapture readonly %diff) #0 { +entry: + %m2 = alloca [8 x [8 x i32]], align 16 + %0 = bitcast [8 x [8 x i32]]* %m2 to i8* + br label %for.body + +for.body: ; preds = %for.body, %entry + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %a.088 = phi i32 [ 0, %entry ], [ %add52, %for.body ] + %1 = shl i64 %indvars.iv, 3 + %arrayidx = getelementptr inbounds i32* %diff, i64 %1 + %2 = load i32* %arrayidx, align 4 + %3 = or i64 %1, 4 + %arrayidx2 = getelementptr inbounds i32* %diff, i64 %3 + %4 = load i32* %arrayidx2, align 4 + %add3 = add nsw i32 %4, %2 + %arrayidx6 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 0 + store i32 %add3, i32* %arrayidx6, align 16 + %add10 = add nsw i32 %add3, %a.088 + %5 = or i64 %1, 1 + %arrayidx13 = getelementptr inbounds i32* %diff, i64 %5 + %6 = load i32* %arrayidx13, align 4 + %7 = or i64 %1, 5 + %arrayidx16 = getelementptr inbounds i32* %diff, i64 %7 + %8 = load i32* %arrayidx16, align 4 + %add17 = add nsw i32 %8, %6 + %arrayidx20 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 1 + store i32 %add17, i32* %arrayidx20, align 4 + %add24 = add nsw i32 %add10, %add17 + %9 = or i64 %1, 2 + %arrayidx27 = getelementptr inbounds i32* %diff, i64 %9 + %10 = load i32* %arrayidx27, align 4 + %11 = or i64 %1, 6 + %arrayidx30 = getelementptr inbounds i32* %diff, i64 %11 + %12 = load i32* %arrayidx30, align 4 + %add31 = add nsw i32 %12, %10 + %arrayidx34 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 2 + store i32 %add31, i32* %arrayidx34, align 8 + %add38 = add nsw i32 %add24, %add31 + %13 = or i64 %1, 3 + %arrayidx41 = getelementptr inbounds i32* %diff, i64 %13 + %14 = load i32* %arrayidx41, align 4 + %15 = or i64 %1, 7 + %arrayidx44 = getelementptr inbounds i32* %diff, i64 %15 + %16 = load i32* %arrayidx44, align 4 + %add45 = add nsw i32 %16, %14 + %arrayidx48 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 3 + store i32 %add45, i32* %arrayidx48, align 4 + %add52 = add nsw i32 %add38, %add45 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond = icmp eq i64 %indvars.iv.next, 8 + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body + %arraydecay = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 0 + call void @ff([8 x i32]* %arraydecay) #1 + ret i32 %add52 +} + +declare void @ff([8 x i32]*) #2 + + diff --git a/test/Transforms/SLPVectorizer/X86/unreachable.ll b/test/Transforms/SLPVectorizer/X86/unreachable.ll new file mode 100644 index 0000000..8d60957 --- /dev/null +++ b/test/Transforms/SLPVectorizer/X86/unreachable.ll @@ -0,0 +1,40 @@ +; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 + +; Check if the SLPVectorizer does not crash when handling +; unreachable blocks with unscheduleable instructions. + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.9.0" + +define void @foo(i32* nocapture %x) #0 { +entry: + br label %bb2 + +bb1: ; an unreachable block + %t3 = getelementptr inbounds i32* %x, i64 4 + %t4 = load i32* %t3, align 4 + %t5 = getelementptr inbounds i32* %x, i64 5 + %t6 = load i32* %t5, align 4 + %bad = fadd float %bad, 0.000000e+00 ; <- an instruction with self dependency, + ; but legal in unreachable code + %t7 = getelementptr inbounds i32* %x, i64 6 + %t8 = load i32* %t7, align 4 + %t9 = getelementptr inbounds i32* %x, i64 7 + %t10 = load i32* %t9, align 4 + br label %bb2 + +bb2: + %t1.0 = phi i32 [ %t4, %bb1 ], [ 2, %entry ] + %t2.0 = phi i32 [ %t6, %bb1 ], [ 2, %entry ] + %t3.0 = phi i32 [ %t8, %bb1 ], [ 2, %entry ] + %t4.0 = phi i32 [ %t10, %bb1 ], [ 2, %entry ] + store i32 %t1.0, i32* %x, align 4 + %t12 = getelementptr inbounds i32* %x, i64 1 + store i32 %t2.0, i32* %t12, align 4 + %t13 = getelementptr inbounds i32* %x, i64 2 + store i32 %t3.0, i32* %t13, align 4 + %t14 = getelementptr inbounds i32* %x, i64 3 + store i32 %t4.0, i32* %t14, align 4 + ret void +} + |