aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen/X86
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-07-21 00:45:20 -0700
committerStephen Hines <srhines@google.com>2014-07-25 00:48:57 -0700
commitcd81d94322a39503e4a3e87b6ee03d4fcb3465fb (patch)
tree81b7dd2bb4370a392f31d332a566c903b5744764 /test/CodeGen/X86
parent0c5f13c0c4499eaf42ab5e9e2ceabd4e20e36861 (diff)
downloadexternal_llvm-cd81d94322a39503e4a3e87b6ee03d4fcb3465fb.zip
external_llvm-cd81d94322a39503e4a3e87b6ee03d4fcb3465fb.tar.gz
external_llvm-cd81d94322a39503e4a3e87b6ee03d4fcb3465fb.tar.bz2
Update LLVM for rebase to r212749.
Includes a cherry-pick of: r212948 - fixes a small issue with atomic calls Change-Id: Ib97bd980b59f18142a69506400911a6009d9df18
Diffstat (limited to 'test/CodeGen/X86')
-rw-r--r--test/CodeGen/X86/2007-05-05-Personality.ll6
-rw-r--r--test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll30
-rw-r--r--test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll2
-rw-r--r--test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll4
-rw-r--r--test/CodeGen/X86/2010-01-08-Atomic64Bug.ll4
-rw-r--r--test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll2
-rw-r--r--test/CodeGen/X86/2010-10-08-cmpxchg8b.ll3
-rw-r--r--test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll2
-rw-r--r--test/CodeGen/X86/2012-11-30-misched-dbg.ll18
-rw-r--r--test/CodeGen/X86/2014-05-29-factorial.ll24
-rw-r--r--test/CodeGen/X86/2014-05-30-CombineAddNSW.ll20
-rw-r--r--test/CodeGen/X86/Atomics-64.ll62
-rw-r--r--test/CodeGen/X86/GC/lit.local.cfg3
-rw-r--r--test/CodeGen/X86/aliases.ll33
-rw-r--r--test/CodeGen/X86/atom-fixup-lea4.ll23
-rw-r--r--test/CodeGen/X86/atomic-load-store-wide.ll2
-rw-r--r--test/CodeGen/X86/atomic-minmax-i6432.ll65
-rw-r--r--test/CodeGen/X86/atomic-ops-ancient-64.ll43
-rw-r--r--test/CodeGen/X86/atomic128.ll316
-rw-r--r--test/CodeGen/X86/atomic16.ll77
-rw-r--r--test/CodeGen/X86/atomic32.ll80
-rw-r--r--test/CodeGen/X86/atomic64.ll41
-rw-r--r--test/CodeGen/X86/atomic6432.ll92
-rw-r--r--test/CodeGen/X86/atomic8.ll79
-rw-r--r--test/CodeGen/X86/atomic_op.ll21
-rw-r--r--test/CodeGen/X86/avx-blend.ll44
-rw-r--r--test/CodeGen/X86/avx-intel-ocl.ll62
-rw-r--r--test/CodeGen/X86/avx-intrinsics-x86.ll24
-rw-r--r--test/CodeGen/X86/avx-shuffle.ll8
-rw-r--r--test/CodeGen/X86/avx-splat.ll9
-rw-r--r--test/CodeGen/X86/avx-vperm2f128.ll2
-rw-r--r--test/CodeGen/X86/avx-vshufp.ll10
-rw-r--r--test/CodeGen/X86/avx2-shuffle.ll18
-rw-r--r--test/CodeGen/X86/avx512-cvt.ll8
-rw-r--r--test/CodeGen/X86/avx512-inc-dec.ll13
-rw-r--r--test/CodeGen/X86/avx512-intrinsics.ll68
-rw-r--r--test/CodeGen/X86/avx512-nontemporal.ll19
-rw-r--r--test/CodeGen/X86/avx512-shuffle.ll62
-rw-r--r--test/CodeGen/X86/bswap-vector.ll29
-rw-r--r--test/CodeGen/X86/cmp.ll13
-rw-r--r--test/CodeGen/X86/cmpxchg-i1.ll87
-rw-r--r--test/CodeGen/X86/cmpxchg-i128-i1.ll83
-rw-r--r--test/CodeGen/X86/coalescer-remat.ll3
-rw-r--r--test/CodeGen/X86/coff-comdat.ll92
-rw-r--r--test/CodeGen/X86/coff-comdat2.ll9
-rw-r--r--test/CodeGen/X86/coff-comdat3.ll8
-rw-r--r--test/CodeGen/X86/combine-64bit-vec-binop.ll273
-rw-r--r--test/CodeGen/X86/combine-or.ll8
-rw-r--r--test/CodeGen/X86/combine-vec-shuffle-2.ll164
-rw-r--r--test/CodeGen/X86/computeKnownBits_urem.ll14
-rw-r--r--test/CodeGen/X86/cvt16.ll64
-rw-r--r--test/CodeGen/X86/dagcombine-and-setcc.ll47
-rw-r--r--test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll169
-rw-r--r--test/CodeGen/X86/dllexport-x86_64.ll2
-rw-r--r--test/CodeGen/X86/elf-comdat.ll11
-rw-r--r--test/CodeGen/X86/elf-comdat2.ll12
-rw-r--r--test/CodeGen/X86/fast-isel-args-fail2.ll10
-rw-r--r--test/CodeGen/X86/fast-isel-args.ll24
-rw-r--r--test/CodeGen/X86/fast-isel-branch_weights.ll19
-rw-r--r--test/CodeGen/X86/fast-isel-cmp-branch2.ll294
-rw-r--r--test/CodeGen/X86/fast-isel-cmp-branch3.ll470
-rw-r--r--test/CodeGen/X86/fast-isel-cmp.ll689
-rw-r--r--test/CodeGen/X86/fast-isel-fold-mem.ll12
-rw-r--r--test/CodeGen/X86/fast-isel-select-cmov.ll62
-rw-r--r--test/CodeGen/X86/fast-isel-select-cmov2.ll255
-rw-r--r--test/CodeGen/X86/fast-isel-select-cmp.ll50
-rw-r--r--test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll138
-rw-r--r--test/CodeGen/X86/fast-isel-select-sse.ll391
-rw-r--r--test/CodeGen/X86/fast-isel-select.ll4
-rw-r--r--test/CodeGen/X86/fast-isel-sse12-fptoint.ll54
-rw-r--r--test/CodeGen/X86/float-asmprint.ll5
-rw-r--r--test/CodeGen/X86/frameaddr.ll44
-rw-r--r--test/CodeGen/X86/gcc_except_table.ll10
-rw-r--r--test/CodeGen/X86/haddsub-2.ll802
-rw-r--r--test/CodeGen/X86/haddsub-undef.ll325
-rw-r--r--test/CodeGen/X86/i8-umulo.ll24
-rw-r--r--test/CodeGen/X86/jump_table_alias.ll33
-rw-r--r--test/CodeGen/X86/jump_table_bitcast.ll46
-rw-r--r--test/CodeGen/X86/jump_tables.ll272
-rw-r--r--test/CodeGen/X86/libcall-sret.ll28
-rw-r--r--test/CodeGen/X86/lit.local.cfg3
-rw-r--r--test/CodeGen/X86/lower-bitcast.ll103
-rw-r--r--test/CodeGen/X86/macho-comdat.ll6
-rw-r--r--test/CodeGen/X86/null-streamer.ll18
-rw-r--r--test/CodeGen/X86/pr20020.ll73
-rw-r--r--test/CodeGen/X86/pr20088.ll9
-rw-r--r--test/CodeGen/X86/pr5145.ll16
-rw-r--r--test/CodeGen/X86/pshufd-combine-crash.ll14
-rw-r--r--test/CodeGen/X86/rdpmc.ll22
-rw-r--r--test/CodeGen/X86/shift-parts.ll4
-rw-r--r--test/CodeGen/X86/sqrt.ll26
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-x86.ll27
-rw-r--r--test/CodeGen/X86/sse3-avx-addsub-2.ll318
-rw-r--r--test/CodeGen/X86/sse3-avx-addsub.ll296
-rw-r--r--test/CodeGen/X86/sse41-blend.ll18
-rw-r--r--test/CodeGen/X86/sse41.ll22
-rw-r--r--test/CodeGen/X86/stackmap-fast-isel.ll165
-rw-r--r--test/CodeGen/X86/stackmap-liveness.ll153
-rw-r--r--test/CodeGen/X86/swizzle-2.ll515
-rw-r--r--test/CodeGen/X86/swizzle-avx2.ll91
-rw-r--r--test/CodeGen/X86/testb-je-fusion.ll20
-rw-r--r--test/CodeGen/X86/vec_cast2.ll27
-rw-r--r--test/CodeGen/X86/vec_splat.ll20
-rw-r--r--test/CodeGen/X86/vec_split.ll33
-rw-r--r--test/CodeGen/X86/vector-gep.ll2
-rw-r--r--test/CodeGen/X86/vector-idiv.ll42
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v16.ll196
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v2.ll219
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v4.ll170
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v8.ll493
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining.ll119
-rw-r--r--test/CodeGen/X86/vselect.ll14
-rw-r--r--test/CodeGen/X86/widen_cast-4.ll25
-rw-r--r--test/CodeGen/X86/widen_cast-6.ll6
-rw-r--r--test/CodeGen/X86/widen_conversions.ll18
-rw-r--r--test/CodeGen/X86/widen_shuffle-1.ll4
-rw-r--r--test/CodeGen/X86/win64_eh.ll170
-rw-r--r--test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll6
-rw-r--r--test/CodeGen/X86/x86-64-frameaddr.ll15
-rw-r--r--test/CodeGen/X86/x86-64-static-relo-movl.ll24
-rw-r--r--test/CodeGen/X86/x86-frameaddr.ll9
-rw-r--r--test/CodeGen/X86/x86-frameaddr2.ll9
-rw-r--r--test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll41
-rw-r--r--test/CodeGen/X86/xaluo.ll743
124 files changed, 10256 insertions, 616 deletions
diff --git a/test/CodeGen/X86/2007-05-05-Personality.ll b/test/CodeGen/X86/2007-05-05-Personality.ll
index 5b8fe72..b99c58c 100644
--- a/test/CodeGen/X86/2007-05-05-Personality.ll
+++ b/test/CodeGen/X86/2007-05-05-Personality.ll
@@ -1,12 +1,14 @@
; RUN: llc < %s -mtriple=i686-pc-linux-gnu -o - | FileCheck %s --check-prefix=LIN
-; RUN: llc < %s -mtriple=x86_64-pc-windows-gnu -o - | FileCheck %s --check-prefix=LIN
; RUN: llc < %s -mtriple=i386-pc-mingw32 -o - | FileCheck %s --check-prefix=WIN
; RUN: llc < %s -mtriple=i686-pc-windows-gnu -o - | FileCheck %s --check-prefix=WIN
+; RUN: llc < %s -mtriple=x86_64-pc-windows-gnu -o - | FileCheck %s --check-prefix=WIN64
; LIN: .cfi_personality 0, __gnat_eh_personality
; LIN: .cfi_lsda 0, .Lexception0
; WIN: .cfi_personality 0, ___gnat_eh_personality
; WIN: .cfi_lsda 0, Lexception0
+; WIN64: .seh_handler __gnat_eh_personality
+; WIN64: .seh_handlerdata
@error = external global i8
@@ -15,7 +17,7 @@ entry:
invoke void @raise()
to label %eh_then unwind label %unwind
-unwind: ; preds = %entry
+unwind: ; preds = %entry
%eh_ptr = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gnat_eh_personality to i8*)
catch i8* @error
%eh_select = extractvalue { i8*, i32 } %eh_ptr, 1
diff --git a/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll b/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll
deleted file mode 100644
index 0ae1897..0000000
--- a/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep -- -86
-
-define i16 @f(<4 x float>* %tmp116117.i1061.i) nounwind {
-entry:
- alloca [4 x <4 x float>] ; <[4 x <4 x float>]*>:0 [#uses=167]
- alloca [4 x <4 x float>] ; <[4 x <4 x float>]*>:1 [#uses=170]
- alloca [4 x <4 x i32>] ; <[4 x <4 x i32>]*>:2 [#uses=12]
- %.sub6235.i = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0 ; <<4 x float>*> [#uses=76]
- %.sub.i = getelementptr [4 x <4 x float>]* %1, i32 0, i32 0 ; <<4 x float>*> [#uses=59]
-
- %tmp124.i1062.i = getelementptr <4 x float>* %tmp116117.i1061.i, i32 63 ; <<4 x float>*> [#uses=1]
- %tmp125.i1063.i = load <4 x float>* %tmp124.i1062.i ; <<4 x float>> [#uses=5]
- %tmp828.i1077.i = shufflevector <4 x float> %tmp125.i1063.i, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>> [#uses=4]
- %tmp704.i1085.i = load <4 x float>* %.sub6235.i ; <<4 x float>> [#uses=1]
- %tmp712.i1086.i = call <4 x float> @llvm.x86.sse.max.ps( <4 x float> %tmp704.i1085.i, <4 x float> %tmp828.i1077.i ) ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp712.i1086.i, <4 x float>* %.sub.i
-
- %tmp2587.i1145.gep.i = getelementptr [4 x <4 x float>]* %1, i32 0, i32 0, i32 2 ; <float*> [#uses=1]
- %tmp5334.i = load float* %tmp2587.i1145.gep.i ; <float> [#uses=5]
- %tmp2723.i1170.i = insertelement <4 x float> undef, float %tmp5334.i, i32 2 ; <<4 x float>> [#uses=5]
- store <4 x float> %tmp2723.i1170.i, <4 x float>* %.sub6235.i
-
- %tmp1406.i1367.i = shufflevector <4 x float> %tmp2723.i1170.i, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>> [#uses=1]
- %tmp84.i1413.i = load <4 x float>* %.sub6235.i ; <<4 x float>> [#uses=1]
- %tmp89.i1415.i = fmul <4 x float> %tmp84.i1413.i, %tmp1406.i1367.i ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp89.i1415.i, <4 x float>* %.sub.i
- ret i16 0
-}
-
-declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>)
diff --git a/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll b/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
index e64375a..a0106d7 100644
--- a/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
+++ b/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
@@ -8,7 +8,7 @@ target triple = "i386-pc-linux-gnu"
@__resp = thread_local global %struct.__res_state* @_res ; <%struct.__res_state**> [#uses=1]
@_res = global %struct.__res_state zeroinitializer, section ".bss" ; <%struct.__res_state*> [#uses=1]
-@__libc_resp = hidden alias %struct.__res_state** @__resp ; <%struct.__res_state**> [#uses=2]
+@__libc_resp = hidden thread_local alias %struct.__res_state** @__resp ; <%struct.__res_state**> [#uses=2]
define i32 @foo() {
; CHECK-LABEL: foo:
diff --git a/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll b/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll
index 1259cf4..dfb98bb 100644
--- a/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll
+++ b/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll
@@ -1,7 +1,7 @@
; RUN: llc -mcpu=generic -mtriple=x86_64-mingw32 < %s | FileCheck %s
; CHECK: subq $40, %rsp
-; CHECK: movaps %xmm8, (%rsp)
-; CHECK: movaps %xmm7, 16(%rsp)
+; CHECK: movaps %xmm8, 16(%rsp)
+; CHECK: movaps %xmm7, (%rsp)
define i32 @a() nounwind {
entry:
diff --git a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
index f9bf310..850f678 100644
--- a/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
+++ b/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
@@ -11,9 +11,9 @@ entry:
; CHECK: movl 4([[REG]]), %edx
; CHECK: LBB0_1:
; CHECK: movl %eax, %ebx
-; CHECK: addl {{%[a-z]+}}, %ebx
+; CHECK: addl $1, %ebx
; CHECK: movl %edx, %ecx
-; CHECK: adcl {{%[a-z]+}}, %ecx
+; CHECK: adcl $0, %ecx
; CHECK: lock
; CHECK-NEXT: cmpxchg8b ([[REG]])
; CHECK-NEXT: jne
diff --git a/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll b/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
index b45ac22..4181c26 100644
--- a/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
+++ b/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
@@ -24,7 +24,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.lv = !{!0, !14, !15, !16, !17, !24, !25, !28}
!0 = metadata !{i32 786689, metadata !1, metadata !"this", metadata !3, i32 11, metadata !12, i32 0, null} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{i32 786478, metadata !31, metadata !2, metadata !"bar", metadata !"bar", metadata !"_ZN3foo3barEi", i32 11, metadata !9, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, i32 (%struct.foo*, i32)* @_ZN3foo3bazEi, null, null, null, i32 11} ; [ DW_TAG_subprogram ]
+!1 = metadata !{i32 786478, metadata !31, metadata !2, metadata !"bar", metadata !"bar", metadata !"_ZN3foo3barEi", i32 11, metadata !9, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, i32 (%struct.foo*, i32)* null, null, null, null, i32 11} ; [ DW_TAG_subprogram ]
!2 = metadata !{i32 786451, metadata !31, metadata !3, metadata !"foo", i32 3, i64 32, i64 32, i64 0, i32 0, null, metadata !5, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [foo] [line 3, size 32, align 32, offset 0] [def] [from ]
!3 = metadata !{i32 786473, metadata !31} ; [ DW_TAG_file_type ]
!4 = metadata !{i32 786449, metadata !31, i32 4, metadata !"4.2.1 LLVM build", i1 true, metadata !"", i32 0, metadata !32, metadata !32, metadata !33, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
diff --git a/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll b/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
index f69cedc..ebf51a5 100644
--- a/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
+++ b/test/CodeGen/X86/2010-10-08-cmpxchg8b.ll
@@ -18,7 +18,8 @@ entry:
loop:
; CHECK: lock
; CHECK-NEXT: cmpxchg8b
- %r = cmpxchg i64* %ptr, i64 0, i64 1 monotonic monotonic
+ %pair = cmpxchg i64* %ptr, i64 0, i64 1 monotonic monotonic
+ %r = extractvalue { i64, i1 } %pair, 0
%stored1 = icmp eq i64 %r, 0
br i1 %stored1, label %loop, label %continue
continue:
diff --git a/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll b/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
index f016528..625a351 100644
--- a/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
+++ b/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
@@ -8,11 +8,11 @@ target triple = "x86_64-apple-darwin10.0.0"
; CHECK: DW_TAG_subprogram
; CHECK: DW_TAG_variable
; CHECK: DW_TAG_variable
+; CHECK-NEXT: DW_AT_location
; CHECK-NEXT: DW_AT_name {{.*}} "z_s"
; CHECK-NEXT: DW_AT_decl_file
; CHECK-NEXT: DW_AT_decl_line
; CHECK-NEXT: DW_AT_type{{.*}}{[[TYPE:.*]]}
-; CHECK-NEXT: DW_AT_location
; CHECK: [[TYPE]]:
; CHECK-NEXT: DW_AT_name {{.*}} "int"
diff --git a/test/CodeGen/X86/2012-11-30-misched-dbg.ll b/test/CodeGen/X86/2012-11-30-misched-dbg.ll
index 650839a..36667de 100644
--- a/test/CodeGen/X86/2012-11-30-misched-dbg.ll
+++ b/test/CodeGen/X86/2012-11-30-misched-dbg.ll
@@ -69,15 +69,15 @@ declare i32 @__sprintf_chk(i8*, i32, i64, i8*, ...)
!1 = metadata !{metadata !2}
!2 = metadata !{}
!4 = metadata !{i32 786688, metadata !5, metadata !"num1", metadata !14, i32 815, metadata !15, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [num1] [line 815]
-!5 = metadata !{i32 786443, metadata !6, i32 815, i32 0, metadata !14, i32 177} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!6 = metadata !{i32 786443, metadata !7, i32 812, i32 0, metadata !14, i32 176} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!7 = metadata !{i32 786443, metadata !8, i32 807, i32 0, metadata !14, i32 175} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!8 = metadata !{i32 786443, metadata !9, i32 440, i32 0, metadata !14, i32 94} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!9 = metadata !{i32 786443, metadata !10, i32 435, i32 0, metadata !14, i32 91} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!10 = metadata !{i32 786443, metadata !11, i32 434, i32 0, metadata !14, i32 90} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!11 = metadata !{i32 786443, metadata !12, i32 250, i32 0, metadata !14, i32 24} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!12 = metadata !{i32 786443, metadata !13, i32 249, i32 0, metadata !14, i32 23} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!13 = metadata !{i32 786443, metadata !2, i32 221, i32 0, metadata !14, i32 19} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!5 = metadata !{i32 786443, metadata !14, metadata !6, i32 815, i32 0, i32 177} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!6 = metadata !{i32 786443, metadata !14, metadata !7, i32 812, i32 0, i32 176} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!7 = metadata !{i32 786443, metadata !14, metadata !8, i32 807, i32 0, i32 175} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!8 = metadata !{i32 786443, metadata !14, metadata !9, i32 440, i32 0, i32 94} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!9 = metadata !{i32 786443, metadata !14, metadata !10, i32 435, i32 0, i32 91} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!10 = metadata !{i32 786443, metadata !14, metadata !11, i32 434, i32 0, i32 90} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!11 = metadata !{i32 786443, metadata !14, metadata !12, i32 250, i32 0, i32 24} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!12 = metadata !{i32 786443, metadata !14, metadata !13, i32 249, i32 0, i32 23} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!13 = metadata !{i32 786443, metadata !14, metadata !2, i32 221, i32 0, i32 19} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
!14 = metadata !{i32 786473, metadata !19} ; [ DW_TAG_file_type ]
!15 = metadata !{i32 786433, null, null, metadata !"", i32 0, i64 160, i64 8, i32 0, i32 0, metadata !16, metadata !17, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 160, align 8, offset 0] [from char]
!16 = metadata !{i32 786468, null, null, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
diff --git a/test/CodeGen/X86/2014-05-29-factorial.ll b/test/CodeGen/X86/2014-05-29-factorial.ll
new file mode 100644
index 0000000..987a21d
--- /dev/null
+++ b/test/CodeGen/X86/2014-05-29-factorial.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+; CHECK: decq [[X:%rdi|%rcx]]
+; CHECK-NOT: testq [[X]], [[X]]
+
+define i64 @fact2(i64 %x) {
+entry:
+ br label %while.body
+
+while.body:
+ %result.06 = phi i64 [ %mul, %while.body ], [ 1, %entry ]
+ %x.addr.05 = phi i64 [ %dec, %while.body ], [ %x, %entry ]
+ %mul = mul nsw i64 %result.06, %x.addr.05
+ %dec = add nsw i64 %x.addr.05, -1
+ %cmp = icmp sgt i64 %dec, 0
+ br i1 %cmp, label %while.body, label %while.end.loopexit
+
+while.end.loopexit:
+ %mul.lcssa = phi i64 [ %mul, %while.body ]
+ br label %while.end
+
+while.end:
+ %result.0.lcssa = phi i64 [ %mul.lcssa, %while.end.loopexit ]
+ ret i64 %result.0.lcssa
+}
diff --git a/test/CodeGen/X86/2014-05-30-CombineAddNSW.ll b/test/CodeGen/X86/2014-05-30-CombineAddNSW.ll
new file mode 100644
index 0000000..4580795
--- /dev/null
+++ b/test/CodeGen/X86/2014-05-30-CombineAddNSW.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+; CHECK: addl
+
+; The two additions are the same , but have different flags.
+; In theory this code should never be generated by the frontend, but this
+; tries to test that two identical instructions with two different flags
+; actually generate two different nodes.
+;
+; Normally the combiner would see this condition without the flags
+; and optimize the result of the sub into a register clear
+; (the final result would be 0). With the different flags though the combiner
+; needs to keep the add + sub nodes, because the two nodes result as different
+; nodes and so cannot assume that the subtraction of the two nodes
+; generates 0 as result
+define i32 @foo(i32 %a, i32 %b) {
+ %1 = add i32 %a, %b
+ %2 = add nsw i32 %a, %b
+ %3 = sub i32 %1, %2
+ ret i32 %3
+}
diff --git a/test/CodeGen/X86/Atomics-64.ll b/test/CodeGen/X86/Atomics-64.ll
index c274688..c392e94 100644
--- a/test/CodeGen/X86/Atomics-64.ll
+++ b/test/CodeGen/X86/Atomics-64.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=x86-64 > %t.x86-64
-; RUN: llc < %s -march=x86 > %t.x86
+; RUN: llc < %s -march=x86 -mattr=cx16 > %t.x86
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-apple-darwin8"
@@ -704,7 +704,8 @@ entry:
%3 = zext i8 %2 to i32
%4 = trunc i32 %3 to i8
%5 = trunc i32 %1 to i8
- %6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic monotonic
+ %pair6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic monotonic
+ %6 = extractvalue { i8, i1 } %pair6, 0
store i8 %6, i8* @sc, align 1
%7 = load i8* @sc, align 1
%8 = zext i8 %7 to i32
@@ -712,7 +713,8 @@ entry:
%10 = zext i8 %9 to i32
%11 = trunc i32 %10 to i8
%12 = trunc i32 %8 to i8
- %13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic monotonic
+ %pair13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic monotonic
+ %13 = extractvalue { i8, i1 } %pair13, 0
store i8 %13, i8* @uc, align 1
%14 = load i8* @sc, align 1
%15 = sext i8 %14 to i16
@@ -722,7 +724,8 @@ entry:
%19 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%20 = trunc i32 %18 to i16
%21 = trunc i32 %16 to i16
- %22 = cmpxchg i16* %19, i16 %20, i16 %21 monotonic monotonic
+ %pair22 = cmpxchg i16* %19, i16 %20, i16 %21 monotonic monotonic
+ %22 = extractvalue { i16, i1 } %pair22, 0
store i16 %22, i16* @ss, align 2
%23 = load i8* @sc, align 1
%24 = sext i8 %23 to i16
@@ -732,49 +735,56 @@ entry:
%28 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%29 = trunc i32 %27 to i16
%30 = trunc i32 %25 to i16
- %31 = cmpxchg i16* %28, i16 %29, i16 %30 monotonic monotonic
+ %pair31 = cmpxchg i16* %28, i16 %29, i16 %30 monotonic monotonic
+ %31 = extractvalue { i16, i1 } %pair31, 0
store i16 %31, i16* @us, align 2
%32 = load i8* @sc, align 1
%33 = sext i8 %32 to i32
%34 = load i8* @uc, align 1
%35 = zext i8 %34 to i32
%36 = bitcast i8* bitcast (i32* @si to i8*) to i32*
- %37 = cmpxchg i32* %36, i32 %35, i32 %33 monotonic monotonic
+ %pair37 = cmpxchg i32* %36, i32 %35, i32 %33 monotonic monotonic
+ %37 = extractvalue { i32, i1 } %pair37, 0
store i32 %37, i32* @si, align 4
%38 = load i8* @sc, align 1
%39 = sext i8 %38 to i32
%40 = load i8* @uc, align 1
%41 = zext i8 %40 to i32
%42 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
- %43 = cmpxchg i32* %42, i32 %41, i32 %39 monotonic monotonic
+ %pair43 = cmpxchg i32* %42, i32 %41, i32 %39 monotonic monotonic
+ %43 = extractvalue { i32, i1 } %pair43, 0
store i32 %43, i32* @ui, align 4
%44 = load i8* @sc, align 1
%45 = sext i8 %44 to i64
%46 = load i8* @uc, align 1
%47 = zext i8 %46 to i64
%48 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
- %49 = cmpxchg i64* %48, i64 %47, i64 %45 monotonic monotonic
+ %pair49 = cmpxchg i64* %48, i64 %47, i64 %45 monotonic monotonic
+ %49 = extractvalue { i64, i1 } %pair49, 0
store i64 %49, i64* @sl, align 8
%50 = load i8* @sc, align 1
%51 = sext i8 %50 to i64
%52 = load i8* @uc, align 1
%53 = zext i8 %52 to i64
%54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
- %55 = cmpxchg i64* %54, i64 %53, i64 %51 monotonic monotonic
+ %pair55 = cmpxchg i64* %54, i64 %53, i64 %51 monotonic monotonic
+ %55 = extractvalue { i64, i1 } %pair55, 0
store i64 %55, i64* @ul, align 8
%56 = load i8* @sc, align 1
%57 = sext i8 %56 to i64
%58 = load i8* @uc, align 1
%59 = zext i8 %58 to i64
%60 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
- %61 = cmpxchg i64* %60, i64 %59, i64 %57 monotonic monotonic
+ %pair61 = cmpxchg i64* %60, i64 %59, i64 %57 monotonic monotonic
+ %61 = extractvalue { i64, i1 } %pair61, 0
store i64 %61, i64* @sll, align 8
%62 = load i8* @sc, align 1
%63 = sext i8 %62 to i64
%64 = load i8* @uc, align 1
%65 = zext i8 %64 to i64
%66 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
- %67 = cmpxchg i64* %66, i64 %65, i64 %63 monotonic monotonic
+ %pair67 = cmpxchg i64* %66, i64 %65, i64 %63 monotonic monotonic
+ %67 = extractvalue { i64, i1 } %pair67, 0
store i64 %67, i64* @ull, align 8
%68 = load i8* @sc, align 1
%69 = zext i8 %68 to i32
@@ -782,7 +792,8 @@ entry:
%71 = zext i8 %70 to i32
%72 = trunc i32 %71 to i8
%73 = trunc i32 %69 to i8
- %74 = cmpxchg i8* @sc, i8 %72, i8 %73 monotonic monotonic
+ %pair74 = cmpxchg i8* @sc, i8 %72, i8 %73 monotonic monotonic
+ %74 = extractvalue { i8, i1 } %pair74, 0
%75 = icmp eq i8 %74, %72
%76 = zext i1 %75 to i8
%77 = zext i8 %76 to i32
@@ -793,7 +804,8 @@ entry:
%81 = zext i8 %80 to i32
%82 = trunc i32 %81 to i8
%83 = trunc i32 %79 to i8
- %84 = cmpxchg i8* @uc, i8 %82, i8 %83 monotonic monotonic
+ %pair84 = cmpxchg i8* @uc, i8 %82, i8 %83 monotonic monotonic
+ %84 = extractvalue { i8, i1 } %pair84, 0
%85 = icmp eq i8 %84, %82
%86 = zext i1 %85 to i8
%87 = zext i8 %86 to i32
@@ -805,7 +817,8 @@ entry:
%92 = zext i8 %91 to i32
%93 = trunc i32 %92 to i8
%94 = trunc i32 %90 to i8
- %95 = cmpxchg i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 monotonic monotonic
+ %pair95 = cmpxchg i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 monotonic monotonic
+ %95 = extractvalue { i8, i1 } %pair95, 0
%96 = icmp eq i8 %95, %93
%97 = zext i1 %96 to i8
%98 = zext i8 %97 to i32
@@ -817,7 +830,8 @@ entry:
%103 = zext i8 %102 to i32
%104 = trunc i32 %103 to i8
%105 = trunc i32 %101 to i8
- %106 = cmpxchg i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 monotonic monotonic
+ %pair106 = cmpxchg i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 monotonic monotonic
+ %106 = extractvalue { i8, i1 } %pair106, 0
%107 = icmp eq i8 %106, %104
%108 = zext i1 %107 to i8
%109 = zext i8 %108 to i32
@@ -828,7 +842,8 @@ entry:
%113 = zext i8 %112 to i32
%114 = trunc i32 %113 to i8
%115 = trunc i32 %111 to i8
- %116 = cmpxchg i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 monotonic monotonic
+ %pair116 = cmpxchg i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 monotonic monotonic
+ %116 = extractvalue { i8, i1 } %pair116, 0
%117 = icmp eq i8 %116, %114
%118 = zext i1 %117 to i8
%119 = zext i8 %118 to i32
@@ -839,7 +854,8 @@ entry:
%123 = zext i8 %122 to i32
%124 = trunc i32 %123 to i8
%125 = trunc i32 %121 to i8
- %126 = cmpxchg i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 monotonic monotonic
+ %pair126 = cmpxchg i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 monotonic monotonic
+ %126 = extractvalue { i8, i1 } %pair126, 0
%127 = icmp eq i8 %126, %124
%128 = zext i1 %127 to i8
%129 = zext i8 %128 to i32
@@ -850,7 +866,8 @@ entry:
%133 = zext i8 %132 to i64
%134 = trunc i64 %133 to i8
%135 = trunc i64 %131 to i8
- %136 = cmpxchg i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 monotonic monotonic
+ %pair136 = cmpxchg i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 monotonic monotonic
+ %136 = extractvalue { i8, i1 } %pair136, 0
%137 = icmp eq i8 %136, %134
%138 = zext i1 %137 to i8
%139 = zext i8 %138 to i32
@@ -861,7 +878,8 @@ entry:
%143 = zext i8 %142 to i64
%144 = trunc i64 %143 to i8
%145 = trunc i64 %141 to i8
- %146 = cmpxchg i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 monotonic monotonic
+ %pair146 = cmpxchg i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 monotonic monotonic
+ %146 = extractvalue { i8, i1 } %pair146, 0
%147 = icmp eq i8 %146, %144
%148 = zext i1 %147 to i8
%149 = zext i8 %148 to i32
@@ -872,7 +890,8 @@ entry:
%153 = zext i8 %152 to i64
%154 = trunc i64 %153 to i8
%155 = trunc i64 %151 to i8
- %156 = cmpxchg i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 monotonic monotonic
+ %pair156 = cmpxchg i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 monotonic monotonic
+ %156 = extractvalue { i8, i1 } %pair156, 0
%157 = icmp eq i8 %156, %154
%158 = zext i1 %157 to i8
%159 = zext i8 %158 to i32
@@ -883,7 +902,8 @@ entry:
%163 = zext i8 %162 to i64
%164 = trunc i64 %163 to i8
%165 = trunc i64 %161 to i8
- %166 = cmpxchg i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 monotonic monotonic
+ %pair166 = cmpxchg i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 monotonic monotonic
+ %166 = extractvalue { i8, i1 } %pair166, 0
%167 = icmp eq i8 %166, %164
%168 = zext i1 %167 to i8
%169 = zext i8 %168 to i32
diff --git a/test/CodeGen/X86/GC/lit.local.cfg b/test/CodeGen/X86/GC/lit.local.cfg
index ba763cf..e71f3cc 100644
--- a/test/CodeGen/X86/GC/lit.local.cfg
+++ b/test/CodeGen/X86/GC/lit.local.cfg
@@ -1,4 +1,3 @@
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/X86/aliases.ll b/test/CodeGen/X86/aliases.ll
index 8487c60..bf55644 100644
--- a/test/CodeGen/X86/aliases.ll
+++ b/test/CodeGen/X86/aliases.ll
@@ -1,4 +1,20 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu -asm-verbose=false | FileCheck %s
+; RUN: llc < %s -mtriple=i686-pc-linux-gnu -asm-verbose=false \
+; RUN: -relocation-model=pic | FileCheck %s
+
+@thread_var = thread_local global i32 42, align 4
+@thread_alias = thread_local(localdynamic) alias i32* @thread_var
+
+; CHECK-LABEL: get_thread_var
+define i32* @get_thread_var() {
+; CHECK: leal thread_var@TLSGD
+ ret i32* @thread_var
+}
+
+; CHECK-LABEL: get_thread_alias
+define i32* @get_thread_alias() {
+; CHECK: leal thread_alias@TLSLD
+ ret i32* @thread_alias
+}
@bar = global i32 42
@@ -22,7 +38,7 @@ define i32 @foo_f() {
@bar_i = alias internal i32* @bar
; CHECK-DAG: .globl A
-@A = alias i64, i32* @bar
+@A = alias bitcast (i32* @bar to i64*)
; CHECK-DAG: .globl bar_h
; CHECK-DAG: .hidden bar_h
@@ -32,6 +48,19 @@ define i32 @foo_f() {
; CHECK-DAG: .protected bar_p
@bar_p = protected alias i32* @bar
+; CHECK-DAG: test2 = bar+4
+@test2 = alias getelementptr(i32 *@bar, i32 1)
+
+; CHECK-DAG: test3 = 42
+@test3 = alias inttoptr(i32 42 to i32*)
+
+; CHECK-DAG: test4 = bar
+@test4 = alias inttoptr(i64 ptrtoint (i32* @bar to i64) to i32*)
+
+; CHECK-DAG: test5 = test2-bar
+@test5 = alias inttoptr(i32 sub (i32 ptrtoint (i32* @test2 to i32),
+ i32 ptrtoint (i32* @bar to i32)) to i32*)
+
; CHECK-DAG: .globl test
define i32 @test() {
entry:
diff --git a/test/CodeGen/X86/atom-fixup-lea4.ll b/test/CodeGen/X86/atom-fixup-lea4.ll
new file mode 100644
index 0000000..668574b
--- /dev/null
+++ b/test/CodeGen/X86/atom-fixup-lea4.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -mcpu=atom -mtriple=x86_64-linux
+
+%struct.ValueWrapper = type { double }
+%struct.ValueWrapper.6 = type { %struct.ValueWrapper.7 }
+%struct.ValueWrapper.7 = type { %struct.ValueWrapper.8 }
+%struct.ValueWrapper.8 = type { %struct.ValueWrapper }
+
+; Function Attrs: uwtable
+define linkonce_odr void @_ZN12ValueWrapperIS_IS_IS_IdEEEEC2Ev(%struct.ValueWrapper.6* %this) unnamed_addr #0 align 2 {
+entry:
+ %this.addr = alloca %struct.ValueWrapper.6*, align 8
+ store %struct.ValueWrapper.6* %this, %struct.ValueWrapper.6** %this.addr, align 8
+ %this1 = load %struct.ValueWrapper.6** %this.addr
+ %value = getelementptr inbounds %struct.ValueWrapper.6* %this1, i32 0, i32 0
+ call void @_ZN12ValueWrapperIS_IS_IdEEEC2Ev(%struct.ValueWrapper.7* %value)
+ ret void
+}
+
+; Function Attrs: uwtable
+declare void @_ZN12ValueWrapperIS_IS_IdEEEC2Ev(%struct.ValueWrapper.7*) unnamed_addr #0 align 2
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
diff --git a/test/CodeGen/X86/atomic-load-store-wide.ll b/test/CodeGen/X86/atomic-load-store-wide.ll
index 17e04f0..7352d5a 100644
--- a/test/CodeGen/X86/atomic-load-store-wide.ll
+++ b/test/CodeGen/X86/atomic-load-store-wide.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mcpu=corei7 -march=x86 -verify-machineinstrs | FileCheck %s
; 64-bit load/store on x86-32
; FIXME: The generated code can be substantially improved.
diff --git a/test/CodeGen/X86/atomic-minmax-i6432.ll b/test/CodeGen/X86/atomic-minmax-i6432.ll
index 1cfbc49..ffb7a3f 100644
--- a/test/CodeGen/X86/atomic-minmax-i6432.ll
+++ b/test/CodeGen/X86/atomic-minmax-i6432.ll
@@ -1,6 +1,5 @@
-; RUN: llc -march=x86 -mattr=+cmov -mtriple=i386-pc-linux -verify-machineinstrs < %s | FileCheck %s -check-prefix=LINUX
-; RUN: llc -march=x86 -mattr=-cmov -mtriple=i386-pc-linux -verify-machineinstrs < %s | FileCheck %s -check-prefix=NOCMOV
-; RUN: llc -march=x86 -mtriple=i386-macosx -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s -check-prefix=PIC
+; RUN: llc -march=x86 -mattr=+cmov,cx16 -mtriple=i386-pc-linux -verify-machineinstrs < %s | FileCheck %s -check-prefix=LINUX
+; RUN: llc -march=x86 -mattr=cx16 -mtriple=i386-macosx -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s -check-prefix=PIC
@sc64 = external global i64
@@ -9,87 +8,39 @@ define void @atomic_maxmin_i6432() {
%1 = atomicrmw max i64* @sc64, i64 5 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: setl
-; LINUX: cmpl
-; LINUX: setl
+; LINUX: seta
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: setl
-; NOCMOV: cmpl
-; NOCMOV: setl
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
%2 = atomicrmw min i64* @sc64, i64 6 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: setg
-; LINUX: cmpl
-; LINUX: setg
+; LINUX: setb
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: setg
-; NOCMOV: cmpl
-; NOCMOV: setg
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
%3 = atomicrmw umax i64* @sc64, i64 7 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: setb
-; LINUX: cmpl
-; LINUX: setb
+; LINUX: seta
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: setb
-; NOCMOV: cmpl
-; NOCMOV: setb
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
%4 = atomicrmw umin i64* @sc64, i64 8 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: seta
-; LINUX: cmpl
-; LINUX: seta
+; LINUX: setb
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: seta
-; NOCMOV: cmpl
-; NOCMOV: seta
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
ret void
}
@@ -98,8 +49,8 @@ define void @atomic_maxmin_i6432() {
define void @tf_bug(i8* %ptr) nounwind {
; PIC-LABEL: tf_bug:
-; PIC: movl _id-L1$pb(
-; PIC: movl (_id-L1$pb)+4(
+; PIC-DAG: movl _id-L1$pb(
+; PIC-DAG: movl (_id-L1$pb)+4(
%tmp1 = atomicrmw add i64* @id, i64 1 seq_cst
%tmp2 = add i64 %tmp1, 1
%tmp3 = bitcast i8* %ptr to i64*
diff --git a/test/CodeGen/X86/atomic-ops-ancient-64.ll b/test/CodeGen/X86/atomic-ops-ancient-64.ll
new file mode 100644
index 0000000..18749b9
--- /dev/null
+++ b/test/CodeGen/X86/atomic-ops-ancient-64.ll
@@ -0,0 +1,43 @@
+; RUN: llc -mtriple=i386-linux-gnu %s -o - | FileCheck %s
+
+define i64 @test_add(i64* %addr, i64 %inc) {
+; CHECK-LABEL: test_add:
+; CHECK: calll __sync_fetch_and_add_8
+ %old = atomicrmw add i64* %addr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_sub(i64* %addr, i64 %inc) {
+; CHECK-LABEL: test_sub:
+; CHECK: calll __sync_fetch_and_sub_8
+ %old = atomicrmw sub i64* %addr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_and(i64* %andr, i64 %inc) {
+; CHECK-LABEL: test_and:
+; CHECK: calll __sync_fetch_and_and_8
+ %old = atomicrmw and i64* %andr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_or(i64* %orr, i64 %inc) {
+; CHECK-LABEL: test_or:
+; CHECK: calll __sync_fetch_and_or_8
+ %old = atomicrmw or i64* %orr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_xor(i64* %xorr, i64 %inc) {
+; CHECK-LABEL: test_xor:
+; CHECK: calll __sync_fetch_and_xor_8
+ %old = atomicrmw xor i64* %xorr, i64 %inc seq_cst
+ ret i64 %old
+}
+
+define i64 @test_nand(i64* %nandr, i64 %inc) {
+; CHECK-LABEL: test_nand:
+; CHECK: calll __sync_fetch_and_nand_8
+ %old = atomicrmw nand i64* %nandr, i64 %inc seq_cst
+ ret i64 %old
+}
diff --git a/test/CodeGen/X86/atomic128.ll b/test/CodeGen/X86/atomic128.ll
new file mode 100644
index 0000000..741d290
--- /dev/null
+++ b/test/CodeGen/X86/atomic128.ll
@@ -0,0 +1,316 @@
+; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9 -verify-machineinstrs -mattr=cx16 | FileCheck %s
+
+@var = global i128 0
+
+define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
+; CHECK-LABEL: val_compare_and_swap:
+; CHECK: movq %rsi, %rax
+; CHECK: movq %rcx, %rbx
+; CHECK: movq %r8, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+
+ %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire
+ %val = extractvalue { i128, i1 } %pair, 0
+ ret i128 %val
+}
+
+define void @fetch_and_nand(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_nand:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rdx, %rcx
+; CHECK: andq [[INCHI]], %rcx
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: andq %rsi, %rbx
+; CHECK: notq %rbx
+; CHECK: notq %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+ %val = atomicrmw nand i128* %p, i128 %bits release
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_or(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_or:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: orq %rsi, %rbx
+; CHECK: movq %rdx, %rcx
+; CHECK: orq [[INCHI]], %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw or i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_add(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_add:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: addq %rsi, %rbx
+; CHECK: movq %rdx, %rcx
+; CHECK: adcq [[INCHI]], %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw add i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_sub(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_sub:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: subq %rsi, %rbx
+; CHECK: movq %rdx, %rcx
+; CHECK: sbbq [[INCHI]], %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw sub i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_min(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_min:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rsi, %rax
+; CHECK: setbe [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: setle [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw min i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_max(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_max:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rsi, %rax
+; CHECK: setae [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: setge [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw max i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_umin(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_umin:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rsi, %rax
+; CHECK: setbe [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: setbe [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw umin i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_umax(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_umax:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rax, %rsi
+; CHECK: setb [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: seta [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw umax i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define i128 @atomic_load_seq_cst(i128* %p) {
+; CHECK-LABEL: atomic_load_seq_cst:
+; CHECK: xorl %eax, %eax
+; CHECK: xorl %edx, %edx
+; CHECK: xorl %ebx, %ebx
+; CHECK: xorl %ecx, %ecx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+
+ %r = load atomic i128* %p seq_cst, align 16
+ ret i128 %r
+}
+
+define i128 @atomic_load_relaxed(i128* %p) {
+; CHECK: atomic_load_relaxed:
+; CHECK: xorl %eax, %eax
+; CHECK: xorl %edx, %edx
+; CHECK: xorl %ebx, %ebx
+; CHECK: xorl %ecx, %ecx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+
+ %r = load atomic i128* %p monotonic, align 16
+ ret i128 %r
+}
+
+define void @atomic_store_seq_cst(i128* %p, i128 %in) {
+; CHECK-LABEL: atomic_store_seq_cst:
+; CHECK: movq %rdx, %rcx
+; CHECK: movq %rsi, %rbx
+; CHECK: movq (%rdi), %rax
+; CHECK: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+; CHECK-NOT: callq ___sync_lock_test_and_set_16
+
+ store atomic i128 %in, i128* %p seq_cst, align 16
+ ret void
+}
+
+define void @atomic_store_release(i128* %p, i128 %in) {
+; CHECK-LABEL: atomic_store_release:
+; CHECK: movq %rdx, %rcx
+; CHECK: movq %rsi, %rbx
+; CHECK: movq (%rdi), %rax
+; CHECK: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+ store atomic i128 %in, i128* %p release, align 16
+ ret void
+}
+
+define void @atomic_store_relaxed(i128* %p, i128 %in) {
+; CHECK-LABEL: atomic_store_relaxed:
+; CHECK: movq %rdx, %rcx
+; CHECK: movq %rsi, %rbx
+; CHECK: movq (%rdi), %rax
+; CHECK: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+ store atomic i128 %in, i128* %p unordered, align 16
+ ret void
+}
diff --git a/test/CodeGen/X86/atomic16.ll b/test/CodeGen/X86/atomic16.ll
index 45d3ff4..faaa4c4 100644
--- a/test/CodeGen/X86/atomic16.ll
+++ b/test/CodeGen/X86/atomic16.ll
@@ -4,8 +4,8 @@
@sc16 = external global i16
define void @atomic_fetch_add16() nounwind {
-; X64: atomic_fetch_add16
-; X32: atomic_fetch_add16
+; X64-LABEL: atomic_fetch_add16
+; X32-LABEL: atomic_fetch_add16
entry:
; 32-bit
%t1 = atomicrmw add i16* @sc16, i16 1 acquire
@@ -34,8 +34,8 @@ entry:
}
define void @atomic_fetch_sub16() nounwind {
-; X64: atomic_fetch_sub16
-; X32: atomic_fetch_sub16
+; X64-LABEL: atomic_fetch_sub16
+; X32-LABEL: atomic_fetch_sub16
%t1 = atomicrmw sub i16* @sc16, i16 1 acquire
; X64: lock
; X64: decw
@@ -62,18 +62,18 @@ define void @atomic_fetch_sub16() nounwind {
}
define void @atomic_fetch_and16() nounwind {
-; X64: atomic_fetch_and16
-; X32: atomic_fetch_and16
+; X64-LABEL: atomic_fetch_and16
+; X32-LABEL: atomic_fetch_and16
%t1 = atomicrmw and i16* @sc16, i16 3 acquire
; X64: lock
; X64: andw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: andw $3
%t2 = atomicrmw and i16* @sc16, i16 5 acquire
-; X64: andw
+; X64: andl
; X64: lock
; X64: cmpxchgw
-; X32: andw
+; X32: andl
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw and i16* @sc16, i16 %t2 acquire
@@ -87,18 +87,18 @@ define void @atomic_fetch_and16() nounwind {
}
define void @atomic_fetch_or16() nounwind {
-; X64: atomic_fetch_or16
-; X32: atomic_fetch_or16
+; X64-LABEL: atomic_fetch_or16
+; X32-LABEL: atomic_fetch_or16
%t1 = atomicrmw or i16* @sc16, i16 3 acquire
; X64: lock
; X64: orw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: orw $3
%t2 = atomicrmw or i16* @sc16, i16 5 acquire
-; X64: orw
+; X64: orl
; X64: lock
; X64: cmpxchgw
-; X32: orw
+; X32: orl
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw or i16* @sc16, i16 %t2 acquire
@@ -112,18 +112,18 @@ define void @atomic_fetch_or16() nounwind {
}
define void @atomic_fetch_xor16() nounwind {
-; X64: atomic_fetch_xor16
-; X32: atomic_fetch_xor16
+; X64-LABEL: atomic_fetch_xor16
+; X32-LABEL: atomic_fetch_xor16
%t1 = atomicrmw xor i16* @sc16, i16 3 acquire
; X64: lock
; X64: xorw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: xorw $3
%t2 = atomicrmw xor i16* @sc16, i16 5 acquire
-; X64: xorw
+; X64: xorl
; X64: lock
; X64: cmpxchgw
-; X32: xorw
+; X32: xorl
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw xor i16* @sc16, i16 %t2 acquire
@@ -137,15 +137,15 @@ define void @atomic_fetch_xor16() nounwind {
}
define void @atomic_fetch_nand16(i16 %x) nounwind {
-; X64: atomic_fetch_nand16
-; X32: atomic_fetch_nand16
+; X64-LABEL: atomic_fetch_nand16
+; X32-LABEL: atomic_fetch_nand16
%t1 = atomicrmw nand i16* @sc16, i16 %x acquire
-; X64: andw
-; X64: notw
+; X64: andl
+; X64: notl
; X64: lock
; X64: cmpxchgw
-; X32: andw
-; X32: notw
+; X32: andl
+; X32: notl
; X32: lock
; X32: cmpxchgw
ret void
@@ -155,12 +155,16 @@ define void @atomic_fetch_nand16(i16 %x) nounwind {
define void @atomic_fetch_max16(i16 %x) nounwind {
%t1 = atomicrmw max i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movswl
+; X64: movswl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+; X32: movswl
+; X32: movswl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
@@ -171,12 +175,16 @@ define void @atomic_fetch_max16(i16 %x) nounwind {
define void @atomic_fetch_min16(i16 %x) nounwind {
%t1 = atomicrmw min i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movswl
+; X64: movswl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+; X32: movswl
+; X32: movswl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
@@ -187,12 +195,16 @@ define void @atomic_fetch_min16(i16 %x) nounwind {
define void @atomic_fetch_umax16(i16 %x) nounwind {
%t1 = atomicrmw umax i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movzwl
+; X64: movzwl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+; X32: movzwl
+; X32: movzwl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
@@ -203,11 +215,16 @@ define void @atomic_fetch_umax16(i16 %x) nounwind {
define void @atomic_fetch_umin16(i16 %x) nounwind {
%t1 = atomicrmw umin i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movzwl
+; X64: movzwl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+
+; X32: movzwl
+; X32: movzwl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
diff --git a/test/CodeGen/X86/atomic32.ll b/test/CodeGen/X86/atomic32.ll
index 474c0e6..4f2cbe0 100644
--- a/test/CodeGen/X86/atomic32.ll
+++ b/test/CodeGen/X86/atomic32.ll
@@ -5,8 +5,8 @@
@sc32 = external global i32
define void @atomic_fetch_add32() nounwind {
-; X64: atomic_fetch_add32
-; X32: atomic_fetch_add32
+; X64-LABEL: atomic_fetch_add32:
+; X32-LABEL: atomic_fetch_add32:
entry:
; 32-bit
%t1 = atomicrmw add i32* @sc32, i32 1 acquire
@@ -35,8 +35,8 @@ entry:
}
define void @atomic_fetch_sub32() nounwind {
-; X64: atomic_fetch_sub32
-; X32: atomic_fetch_sub32
+; X64-LABEL: atomic_fetch_sub32:
+; X32-LABEL: atomic_fetch_sub32:
%t1 = atomicrmw sub i32* @sc32, i32 1 acquire
; X64: lock
; X64: decl
@@ -63,8 +63,8 @@ define void @atomic_fetch_sub32() nounwind {
}
define void @atomic_fetch_and32() nounwind {
-; X64: atomic_fetch_and32
-; X32: atomic_fetch_and32
+; X64-LABEL: atomic_fetch_and32:
+; X32-LABEL: atomic_fetch_and32:
%t1 = atomicrmw and i32* @sc32, i32 3 acquire
; X64: lock
; X64: andl $3
@@ -88,8 +88,8 @@ define void @atomic_fetch_and32() nounwind {
}
define void @atomic_fetch_or32() nounwind {
-; X64: atomic_fetch_or32
-; X32: atomic_fetch_or32
+; X64-LABEL: atomic_fetch_or32:
+; X32-LABEL: atomic_fetch_or32:
%t1 = atomicrmw or i32* @sc32, i32 3 acquire
; X64: lock
; X64: orl $3
@@ -113,8 +113,8 @@ define void @atomic_fetch_or32() nounwind {
}
define void @atomic_fetch_xor32() nounwind {
-; X64: atomic_fetch_xor32
-; X32: atomic_fetch_xor32
+; X64-LABEL: atomic_fetch_xor32:
+; X32-LABEL: atomic_fetch_xor32:
%t1 = atomicrmw xor i32* @sc32, i32 3 acquire
; X64: lock
; X64: xorl $3
@@ -138,8 +138,8 @@ define void @atomic_fetch_xor32() nounwind {
}
define void @atomic_fetch_nand32(i32 %x) nounwind {
-; X64: atomic_fetch_nand32
-; X32: atomic_fetch_nand32
+; X64-LABEL: atomic_fetch_nand32:
+; X32-LABEL: atomic_fetch_nand32:
%t1 = atomicrmw nand i32* @sc32, i32 %x acquire
; X64: andl
; X64: notl
@@ -155,19 +155,22 @@ define void @atomic_fetch_nand32(i32 %x) nounwind {
}
define void @atomic_fetch_max32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_max32:
+; X32-LABEL: atomic_fetch_max32:
+
%t1 = atomicrmw max i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: jl
+; NOCMOV: subl
+; NOCMOV: jge
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -177,19 +180,23 @@ define void @atomic_fetch_max32(i32 %x) nounwind {
}
define void @atomic_fetch_min32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_min32:
+; X32-LABEL: atomic_fetch_min32:
+; NOCMOV-LABEL: atomic_fetch_min32:
+
%t1 = atomicrmw min i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: jg
+; NOCMOV: subl
+; NOCMOV: jle
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -199,19 +206,23 @@ define void @atomic_fetch_min32(i32 %x) nounwind {
}
define void @atomic_fetch_umax32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax32:
+; X32-LABEL: atomic_fetch_umax32:
+; NOCMOV-LABEL: atomic_fetch_umax32:
+
%t1 = atomicrmw umax i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: jb
+; NOCMOV: subl
+; NOCMOV: ja
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -221,19 +232,23 @@ define void @atomic_fetch_umax32(i32 %x) nounwind {
}
define void @atomic_fetch_umin32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin32:
+; X32-LABEL: atomic_fetch_umin32:
+; NOCMOV-LABEL: atomic_fetch_umin32:
+
%t1 = atomicrmw umin i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: ja
+; NOCMOV: subl
+; NOCMOV: jb
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -243,6 +258,9 @@ define void @atomic_fetch_umin32(i32 %x) nounwind {
}
define void @atomic_fetch_cmpxchg32() nounwind {
+; X64-LABEL: atomic_fetch_cmpxchg32:
+; X32-LABEL: atomic_fetch_cmpxchg32:
+
%t1 = cmpxchg i32* @sc32, i32 0, i32 1 acquire acquire
; X64: lock
; X64: cmpxchgl
@@ -254,6 +272,9 @@ define void @atomic_fetch_cmpxchg32() nounwind {
}
define void @atomic_fetch_store32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_store32:
+; X32-LABEL: atomic_fetch_store32:
+
store atomic i32 %x, i32* @sc32 release, align 4
; X64-NOT: lock
; X64: movl
@@ -265,6 +286,9 @@ define void @atomic_fetch_store32(i32 %x) nounwind {
}
define void @atomic_fetch_swap32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap32:
+; X32-LABEL: atomic_fetch_swap32:
+
%t1 = atomicrmw xchg i32* @sc32, i32 %x acquire
; X64-NOT: lock
; X64: xchgl
diff --git a/test/CodeGen/X86/atomic64.ll b/test/CodeGen/X86/atomic64.ll
index 4f55edc..11b4e68 100644
--- a/test/CodeGen/X86/atomic64.ll
+++ b/test/CodeGen/X86/atomic64.ll
@@ -3,7 +3,8 @@
@sc64 = external global i64
define void @atomic_fetch_add64() nounwind {
-; X64: atomic_fetch_add64
+; X64-LABEL: atomic_fetch_add64:
+; X32-LABEL: atomic_fetch_add64:
entry:
%t1 = atomicrmw add i64* @sc64, i64 1 acquire
; X64: lock
@@ -22,7 +23,8 @@ entry:
}
define void @atomic_fetch_sub64() nounwind {
-; X64: atomic_fetch_sub64
+; X64-LABEL: atomic_fetch_sub64:
+; X32-LABEL: atomic_fetch_sub64:
%t1 = atomicrmw sub i64* @sc64, i64 1 acquire
; X64: lock
; X64: decq
@@ -40,7 +42,8 @@ define void @atomic_fetch_sub64() nounwind {
}
define void @atomic_fetch_and64() nounwind {
-; X64: atomic_fetch_and64
+; X64-LABEL: atomic_fetch_and64:
+; X32-LABEL: atomic_fetch_and64:
%t1 = atomicrmw and i64* @sc64, i64 3 acquire
; X64: lock
; X64: andq $3
@@ -56,7 +59,8 @@ define void @atomic_fetch_and64() nounwind {
}
define void @atomic_fetch_or64() nounwind {
-; X64: atomic_fetch_or64
+; X64-LABEL: atomic_fetch_or64:
+; X32-LABEL: atomic_fetch_or64:
%t1 = atomicrmw or i64* @sc64, i64 3 acquire
; X64: lock
; X64: orq $3
@@ -72,7 +76,8 @@ define void @atomic_fetch_or64() nounwind {
}
define void @atomic_fetch_xor64() nounwind {
-; X64: atomic_fetch_xor64
+; X64-LABEL: atomic_fetch_xor64:
+; X32-LABEL: atomic_fetch_xor64:
%t1 = atomicrmw xor i64* @sc64, i64 3 acquire
; X64: lock
; X64: xorq $3
@@ -88,8 +93,8 @@ define void @atomic_fetch_xor64() nounwind {
}
define void @atomic_fetch_nand64(i64 %x) nounwind {
-; X64: atomic_fetch_nand64
-; X32: atomic_fetch_nand64
+; X64-LABEL: atomic_fetch_nand64:
+; X32-LABEL: atomic_fetch_nand64:
%t1 = atomicrmw nand i64* @sc64, i64 %x acquire
; X64: andq
; X64: notq
@@ -107,8 +112,10 @@ define void @atomic_fetch_nand64(i64 %x) nounwind {
}
define void @atomic_fetch_max64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_max64:
+; X32-LABEL: atomic_fetch_max64:
%t1 = atomicrmw max i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -126,8 +133,10 @@ define void @atomic_fetch_max64(i64 %x) nounwind {
}
define void @atomic_fetch_min64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_min64:
+; X32-LABEL: atomic_fetch_min64:
%t1 = atomicrmw min i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -145,8 +154,10 @@ define void @atomic_fetch_min64(i64 %x) nounwind {
}
define void @atomic_fetch_umax64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax64:
+; X32-LABEL: atomic_fetch_umax64:
%t1 = atomicrmw umax i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -164,8 +175,10 @@ define void @atomic_fetch_umax64(i64 %x) nounwind {
}
define void @atomic_fetch_umin64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin64:
+; X32-LABEL: atomic_fetch_umin64:
%t1 = atomicrmw umin i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -183,6 +196,8 @@ define void @atomic_fetch_umin64(i64 %x) nounwind {
}
define void @atomic_fetch_cmpxchg64() nounwind {
+; X64-LABEL: atomic_fetch_cmpxchg64:
+; X32-LABEL: atomic_fetch_cmpxchg64:
%t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
; X64: lock
; X64: cmpxchgq
@@ -194,6 +209,8 @@ define void @atomic_fetch_cmpxchg64() nounwind {
}
define void @atomic_fetch_store64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_store64:
+; X32-LABEL: atomic_fetch_store64:
store atomic i64 %x, i64* @sc64 release, align 8
; X64-NOT: lock
; X64: movq
@@ -205,6 +222,8 @@ define void @atomic_fetch_store64(i64 %x) nounwind {
}
define void @atomic_fetch_swap64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap64:
+; X32-LABEL: atomic_fetch_swap64:
%t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
; X64-NOT: lock
; X64: xchgq
diff --git a/test/CodeGen/X86/atomic6432.ll b/test/CodeGen/X86/atomic6432.ll
index c0f7267..1c4b0f4 100644
--- a/test/CodeGen/X86/atomic6432.ll
+++ b/test/CodeGen/X86/atomic6432.ll
@@ -3,7 +3,8 @@
@sc64 = external global i64
define void @atomic_fetch_add64() nounwind {
-; X32: atomic_fetch_add64
+; X64-LABEL: atomic_fetch_add64:
+; X32-LABEL: atomic_fetch_add64:
entry:
%t1 = atomicrmw add i64* @sc64, i64 1 acquire
; X32: addl
@@ -30,20 +31,21 @@ entry:
}
define void @atomic_fetch_sub64() nounwind {
-; X32: atomic_fetch_sub64
+; X64-LABEL: atomic_fetch_sub64:
+; X32-LABEL: atomic_fetch_sub64:
%t1 = atomicrmw sub i64* @sc64, i64 1 acquire
-; X32: subl
-; X32: sbbl
+; X32: addl $-1
+; X32: adcl $-1
; X32: lock
; X32: cmpxchg8b
%t2 = atomicrmw sub i64* @sc64, i64 3 acquire
-; X32: subl
-; X32: sbbl
+; X32: addl $-3
+; X32: adcl $-1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw sub i64* @sc64, i64 5 acquire
-; X32: subl
-; X32: sbbl
+; X32: addl $-5
+; X32: adcl $-1
; X32: lock
; X32: cmpxchg8b
%t4 = atomicrmw sub i64* @sc64, i64 %t3 acquire
@@ -56,15 +58,16 @@ define void @atomic_fetch_sub64() nounwind {
}
define void @atomic_fetch_and64() nounwind {
-; X32: atomic_fetch_and64
+; X64-LABEL: atomic_fetch_and:64
+; X32-LABEL: atomic_fetch_and64:
%t1 = atomicrmw and i64* @sc64, i64 3 acquire
-; X32: andl
-; X32: andl
+; X32: andl $3
+; X32-NOT: andl
; X32: lock
; X32: cmpxchg8b
- %t2 = atomicrmw and i64* @sc64, i64 5 acquire
-; X32: andl
-; X32: andl
+ %t2 = atomicrmw and i64* @sc64, i64 4294967297 acquire
+; X32: andl $1
+; X32: andl $1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw and i64* @sc64, i64 %t2 acquire
@@ -77,15 +80,16 @@ define void @atomic_fetch_and64() nounwind {
}
define void @atomic_fetch_or64() nounwind {
-; X32: atomic_fetch_or64
+; X64-LABEL: atomic_fetch_or64:
+; X32-LABEL: atomic_fetch_or64:
%t1 = atomicrmw or i64* @sc64, i64 3 acquire
-; X32: orl
-; X32: orl
+; X32: orl $3
+; X32-NOT: orl
; X32: lock
; X32: cmpxchg8b
- %t2 = atomicrmw or i64* @sc64, i64 5 acquire
-; X32: orl
-; X32: orl
+ %t2 = atomicrmw or i64* @sc64, i64 4294967297 acquire
+; X32: orl $1
+; X32: orl $1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw or i64* @sc64, i64 %t2 acquire
@@ -98,15 +102,16 @@ define void @atomic_fetch_or64() nounwind {
}
define void @atomic_fetch_xor64() nounwind {
-; X32: atomic_fetch_xor64
+; X64-LABEL: atomic_fetch_xor:64
+; X32-LABEL: atomic_fetch_xor64:
%t1 = atomicrmw xor i64* @sc64, i64 3 acquire
; X32: xorl
-; X32: xorl
+; X32-NOT: xorl
; X32: lock
; X32: cmpxchg8b
- %t2 = atomicrmw xor i64* @sc64, i64 5 acquire
-; X32: xorl
-; X32: xorl
+ %t2 = atomicrmw xor i64* @sc64, i64 4294967297 acquire
+; X32: xorl $1
+; X32: xorl $1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw xor i64* @sc64, i64 %t2 acquire
@@ -119,7 +124,8 @@ define void @atomic_fetch_xor64() nounwind {
}
define void @atomic_fetch_nand64(i64 %x) nounwind {
-; X32: atomic_fetch_nand64
+; X64-LABEL: atomic_fetch_nand64:
+; X32-LABEL: atomic_fetch_nand64:
%t1 = atomicrmw nand i64* @sc64, i64 %x acquire
; X32: andl
; X32: andl
@@ -132,10 +138,11 @@ define void @atomic_fetch_nand64(i64 %x) nounwind {
}
define void @atomic_fetch_max64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_max:64
+; X32-LABEL: atomic_fetch_max64:
%t1 = atomicrmw max i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -145,10 +152,11 @@ define void @atomic_fetch_max64(i64 %x) nounwind {
}
define void @atomic_fetch_min64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_min64:
+; X32-LABEL: atomic_fetch_min64:
%t1 = atomicrmw min i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -158,10 +166,11 @@ define void @atomic_fetch_min64(i64 %x) nounwind {
}
define void @atomic_fetch_umax64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax:64
+; X32-LABEL: atomic_fetch_umax64:
%t1 = atomicrmw umax i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -171,10 +180,11 @@ define void @atomic_fetch_umax64(i64 %x) nounwind {
}
define void @atomic_fetch_umin64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin64:
+; X32-LABEL: atomic_fetch_umin64:
%t1 = atomicrmw umin i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -184,6 +194,8 @@ define void @atomic_fetch_umin64(i64 %x) nounwind {
}
define void @atomic_fetch_cmpxchg64() nounwind {
+; X64-LABEL: atomic_fetch_cmpxchg:64
+; X32-LABEL: atomic_fetch_cmpxchg64:
%t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
; X32: lock
; X32: cmpxchg8b
@@ -192,6 +204,8 @@ define void @atomic_fetch_cmpxchg64() nounwind {
}
define void @atomic_fetch_store64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_store64:
+; X32-LABEL: atomic_fetch_store64:
store atomic i64 %x, i64* @sc64 release, align 8
; X32: lock
; X32: cmpxchg8b
@@ -200,6 +214,8 @@ define void @atomic_fetch_store64(i64 %x) nounwind {
}
define void @atomic_fetch_swap64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap64:
+; X32-LABEL: atomic_fetch_swap64:
%t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
; X32: lock
; X32: xchg8b
diff --git a/test/CodeGen/X86/atomic8.ll b/test/CodeGen/X86/atomic8.ll
index 203b26f..5eef9b2 100644
--- a/test/CodeGen/X86/atomic8.ll
+++ b/test/CodeGen/X86/atomic8.ll
@@ -4,8 +4,8 @@
@sc8 = external global i8
define void @atomic_fetch_add8() nounwind {
-; X64: atomic_fetch_add8
-; X32: atomic_fetch_add8
+; X64-LABEL: atomic_fetch_add8:
+; X32-LABEL: atomic_fetch_add8:
entry:
; 32-bit
%t1 = atomicrmw add i8* @sc8, i8 1 acquire
@@ -34,8 +34,8 @@ entry:
}
define void @atomic_fetch_sub8() nounwind {
-; X64: atomic_fetch_sub8
-; X32: atomic_fetch_sub8
+; X64-LABEL: atomic_fetch_sub8:
+; X32-LABEL: atomic_fetch_sub8:
%t1 = atomicrmw sub i8* @sc8, i8 1 acquire
; X64: lock
; X64: decb
@@ -62,8 +62,8 @@ define void @atomic_fetch_sub8() nounwind {
}
define void @atomic_fetch_and8() nounwind {
-; X64: atomic_fetch_and8
-; X32: atomic_fetch_and8
+; X64-LABEL: atomic_fetch_and8:
+; X32-LABEL: atomic_fetch_and8:
%t1 = atomicrmw and i8* @sc8, i8 3 acquire
; X64: lock
; X64: andb $3
@@ -87,8 +87,8 @@ define void @atomic_fetch_and8() nounwind {
}
define void @atomic_fetch_or8() nounwind {
-; X64: atomic_fetch_or8
-; X32: atomic_fetch_or8
+; X64-LABEL: atomic_fetch_or8:
+; X32-LABEL: atomic_fetch_or8:
%t1 = atomicrmw or i8* @sc8, i8 3 acquire
; X64: lock
; X64: orb $3
@@ -112,8 +112,8 @@ define void @atomic_fetch_or8() nounwind {
}
define void @atomic_fetch_xor8() nounwind {
-; X64: atomic_fetch_xor8
-; X32: atomic_fetch_xor8
+; X64-LABEL: atomic_fetch_xor8:
+; X32-LABEL: atomic_fetch_xor8:
%t1 = atomicrmw xor i8* @sc8, i8 3 acquire
; X64: lock
; X64: xorb $3
@@ -137,8 +137,8 @@ define void @atomic_fetch_xor8() nounwind {
}
define void @atomic_fetch_nand8(i8 %x) nounwind {
-; X64: atomic_fetch_nand8
-; X32: atomic_fetch_nand8
+; X64-LABEL: atomic_fetch_nand8:
+; X32-LABEL: atomic_fetch_nand8:
%t1 = atomicrmw nand i8* @sc8, i8 %x acquire
; X64: andb
; X64: notb
@@ -154,14 +154,18 @@ define void @atomic_fetch_nand8(i8 %x) nounwind {
}
define void @atomic_fetch_max8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_max8:
+; X32-LABEL: atomic_fetch_max8:
%t1 = atomicrmw max i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movsbl
+; X64: movsbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+; X32: movsbl
+; X32: movsbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -170,14 +174,18 @@ define void @atomic_fetch_max8(i8 %x) nounwind {
}
define void @atomic_fetch_min8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_min8:
+; X32-LABEL: atomic_fetch_min8:
%t1 = atomicrmw min i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movsbl
+; X64: movsbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+; X32: movsbl
+; X32: movsbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -186,14 +194,18 @@ define void @atomic_fetch_min8(i8 %x) nounwind {
}
define void @atomic_fetch_umax8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax8:
+; X32-LABEL: atomic_fetch_umax8:
%t1 = atomicrmw umax i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movzbl
+; X64: movzbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+; X32: movzbl
+; X32: movzbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -202,13 +214,18 @@ define void @atomic_fetch_umax8(i8 %x) nounwind {
}
define void @atomic_fetch_umin8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin8:
+; X32-LABEL: atomic_fetch_umin8:
%t1 = atomicrmw umin i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movzbl
+; X64: movzbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+
+; X32: movzbl
+; X32: movzbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -217,6 +234,8 @@ define void @atomic_fetch_umin8(i8 %x) nounwind {
}
define void @atomic_fetch_cmpxchg8() nounwind {
+; X64-LABEL: atomic_fetch_cmpxchg8:
+; X32-LABEL: atomic_fetch_cmpxchg8:
%t1 = cmpxchg i8* @sc8, i8 0, i8 1 acquire acquire
; X64: lock
; X64: cmpxchgb
@@ -228,6 +247,8 @@ define void @atomic_fetch_cmpxchg8() nounwind {
}
define void @atomic_fetch_store8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_store8:
+; X32-LABEL: atomic_fetch_store8:
store atomic i8 %x, i8* @sc8 release, align 4
; X64-NOT: lock
; X64: movb
@@ -239,6 +260,8 @@ define void @atomic_fetch_store8(i8 %x) nounwind {
}
define void @atomic_fetch_swap8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap8:
+; X32-LABEL: atomic_fetch_swap8:
%t1 = atomicrmw xchg i8* @sc8, i8 %x acquire
; X64-NOT: lock
; X64: xchgb
diff --git a/test/CodeGen/X86/atomic_op.ll b/test/CodeGen/X86/atomic_op.ll
index b3045ed..d0ab28a 100644
--- a/test/CodeGen/X86/atomic_op.ll
+++ b/test/CodeGen/X86/atomic_op.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=generic -march=x86 -mattr=+cmov -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mcpu=generic -march=x86 -mattr=+cmov,cx16 -verify-machineinstrs | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
@@ -101,26 +101,28 @@ entry:
%neg1 = sub i32 0, 10 ; <i32> [#uses=1]
; CHECK: lock
; CHECK: cmpxchgl
- %16 = cmpxchg i32* %val2, i32 %neg1, i32 1 monotonic monotonic
+ %pair16 = cmpxchg i32* %val2, i32 %neg1, i32 1 monotonic monotonic
+ %16 = extractvalue { i32, i1 } %pair16, 0
store i32 %16, i32* %old
; CHECK: lock
; CHECK: cmpxchgl
- %17 = cmpxchg i32* %val2, i32 1976, i32 1 monotonic monotonic
+ %pair17 = cmpxchg i32* %val2, i32 1976, i32 1 monotonic monotonic
+ %17 = extractvalue { i32, i1 } %pair17, 0
store i32 %17, i32* %old
; CHECK: movl [[R17atomic:.*]], %eax
- ; CHECK: movl $1401, %[[R17mask:[a-z]*]]
- ; CHECK: andl %eax, %[[R17mask]]
- ; CHECK: notl %[[R17mask]]
+ ; CHECK: movl %eax, %[[R17mask:[a-z]*]]
+ ; CHECK: notl %[[R17mask]]
+ ; CHECK: orl $-1402, %[[R17mask]]
; CHECK: lock
; CHECK: cmpxchgl %[[R17mask]], [[R17atomic]]
; CHECK: jne
; CHECK: movl %eax,
%18 = atomicrmw nand i32* %val2, i32 1401 monotonic
store i32 %18, i32* %old
- ; CHECK: andl
- ; CHECK: andl
; CHECK: notl
; CHECK: notl
+ ; CHECK: orl $252645135
+ ; CHECK: orl $252645135
; CHECK: lock
; CHECK: cmpxchg8b
%19 = atomicrmw nand i64* %temp64, i64 17361641481138401520 monotonic
@@ -133,6 +135,7 @@ entry:
; CHECK: lock
; CHECK: cmpxchgl %{{.*}}, %gs:(%{{.*}})
- %0 = cmpxchg i32 addrspace(256)* %P, i32 0, i32 1 monotonic monotonic
+ %pair0 = cmpxchg i32 addrspace(256)* %P, i32 0, i32 1 monotonic monotonic
+ %0 = extractvalue { i32, i1 } %pair0, 0
ret void
}
diff --git a/test/CodeGen/X86/avx-blend.ll b/test/CodeGen/X86/avx-blend.ll
index e21c7a0..d2a22d7 100644
--- a/test/CodeGen/X86/avx-blend.ll
+++ b/test/CodeGen/X86/avx-blend.ll
@@ -110,7 +110,7 @@ define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) {
;CHECK-LABEL: vsel_double4:
;CHECK-NOT: vinsertf128
-;CHECK: vshufpd $10
+;CHECK: vblendpd $10
;CHECK-NEXT: ret
define <4 x double> @vsel_double4(<4 x double> %v1, <4 x double> %v2) {
%vsel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x double> %v1, <4 x double> %v2
@@ -158,3 +158,45 @@ define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd)
declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>)
declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 x double>)
+
+;; 4 tests for shufflevectors that optimize to blend + immediate
+; CHECK-LABEL: @blend_shufflevector_4xfloat
+define <4 x float> @blend_shufflevector_4xfloat(<4 x float> %a, <4 x float> %b) {
+; Equivalent select mask is <i1 true, i1 false, i1 true, i1 false>.
+; Big endian representation is 0101 = 5.
+; '1' means takes the first argument, '0' means takes the second argument.
+; This is the opposite of the intel syntax, thus we expect
+; Inverted mask: 1010 = 10.
+; According to the ABI:
+; a is in xmm0 => first argument is xmm0.
+; b is in xmm1 => second argument is xmm1.
+; Result is in xmm0 => destination argument.
+; CHECK: vblendps $10, %xmm1, %xmm0, %xmm0
+; CHECK: ret
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %1
+}
+
+; CHECK-LABEL: @blend_shufflevector_8xfloat
+define <8 x float> @blend_shufflevector_8xfloat(<8 x float> %a, <8 x float> %b) {
+; CHECK: vblendps $190, %ymm1, %ymm0, %ymm0
+; CHECK: ret
+ %1 = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 6, i32 15>
+ ret <8 x float> %1
+}
+
+; CHECK-LABEL: @blend_shufflevector_4xdouble
+define <4 x double> @blend_shufflevector_4xdouble(<4 x double> %a, <4 x double> %b) {
+; CHECK: vblendpd $2, %ymm1, %ymm0, %ymm0
+; CHECK: ret
+ %1 = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
+ ret <4 x double> %1
+}
+
+; CHECK-LABEL: @blend_shufflevector_4xi64
+define <4 x i64> @blend_shufflevector_4xi64(<4 x i64> %a, <4 x i64> %b) {
+; CHECK: vblendpd $13, %ymm1, %ymm0, %ymm0
+; CHECK: ret
+ %1 = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
+ ret <4 x i64> %1
+}
diff --git a/test/CodeGen/X86/avx-intel-ocl.ll b/test/CodeGen/X86/avx-intel-ocl.ll
index 7337815..3e051bf 100644
--- a/test/CodeGen/X86/avx-intel-ocl.ll
+++ b/test/CodeGen/X86/avx-intel-ocl.ll
@@ -7,21 +7,21 @@ declare <16 x float> @func_float16_ptr(<16 x float>, <16 x float> *)
declare <16 x float> @func_float16(<16 x float>, <16 x float>)
declare i32 @func_int(i32, i32)
-; WIN64: testf16_inp
+; WIN64-LABEL: testf16_inp
; WIN64: vaddps {{.*}}, {{%ymm[0-1]}}
; WIN64: vaddps {{.*}}, {{%ymm[0-1]}}
; WIN64: leaq {{.*}}(%rsp), %rcx
; WIN64: call
; WIN64: ret
-; X32: testf16_inp
+; X32-LABEL: testf16_inp
; X32: movl %eax, (%esp)
; X32: vaddps {{.*}}, {{%ymm[0-1]}}
; X32: vaddps {{.*}}, {{%ymm[0-1]}}
; X32: call
; X32: ret
-; X64: testf16_inp
+; X64-LABEL: testf16_inp
; X64: vaddps {{.*}}, {{%ymm[0-1]}}
; X64: vaddps {{.*}}, {{%ymm[0-1]}}
; X64: leaq {{.*}}(%rsp), %rdi
@@ -41,14 +41,14 @@ define <16 x float> @testf16_inp(<16 x float> %a, <16 x float> %b) nounwind {
;test calling conventions - preserved registers
; preserved ymm6-ymm15
-; WIN64: testf16_regs
+; WIN64-LABEL: testf16_regs
; WIN64: call
; WIN64: vaddps {{%ymm[6-7]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
; WIN64: vaddps {{%ymm[6-7]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
; WIN64: ret
; preserved ymm8-ymm15
-; X64: testf16_regs
+; X64-LABEL: testf16_regs
; X64: call
; X64: vaddps {{%ymm[8-9]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
; X64: vaddps {{%ymm[8-9]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}}
@@ -65,28 +65,30 @@ define <16 x float> @testf16_regs(<16 x float> %a, <16 x float> %b) nounwind {
}
; test calling conventions - prolog and epilog
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
-; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill
+; WIN64-LABEL: test_prolog_epilog
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
+; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill
; WIN64: call
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
-
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload
+
+; X64-LABEL: test_prolog_epilog
; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp) ## 32-byte Folded Spill
; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp) ## 32-byte Folded Spill
; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp) ## 32-byte Folded Spill
@@ -111,12 +113,14 @@ define intel_ocl_bicc <16 x float> @test_prolog_epilog(<16 x float> %a, <16 x fl
; test functions with integer parameters
; pass parameters on stack for 32-bit platform
+; X32-LABEL: test_int
; X32: movl {{.*}}, 4(%esp)
; X32: movl {{.*}}, (%esp)
; X32: call
; X32: addl {{.*}}, %eax
; pass parameters in registers for 64-bit platform
+; X64-LABEL: test_int
; X64: leal {{.*}}, %edi
; X64: movl {{.*}}, %esi
; X64: call
@@ -128,21 +132,21 @@ define i32 @test_int(i32 %a, i32 %b) nounwind {
ret i32 %c
}
-; WIN64: test_float4
+; WIN64-LABEL: test_float4
; WIN64-NOT: vzeroupper
; WIN64: call
; WIN64-NOT: vzeroupper
; WIN64: call
; WIN64: ret
-; X64: test_float4
+; X64-LABEL: test_float4
; X64-NOT: vzeroupper
; X64: call
; X64-NOT: vzeroupper
; X64: call
; X64: ret
-; X32: test_float4
+; X32-LABEL: test_float4
; X32: vzeroupper
; X32: call
; X32: vzeroupper
diff --git a/test/CodeGen/X86/avx-intrinsics-x86.ll b/test/CodeGen/X86/avx-intrinsics-x86.ll
index 0be83f6..ce31161 100644
--- a/test/CodeGen/X86/avx-intrinsics-x86.ll
+++ b/test/CodeGen/X86/avx-intrinsics-x86.ll
@@ -2219,14 +2219,6 @@ define void @test_x86_avx_storeu_ps_256(i8* %a0, <8 x float> %a1) {
declare void @llvm.x86.avx.storeu.ps.256(i8*, <8 x float>) nounwind
-define <4 x double> @test_x86_avx_vbroadcast_sd_256(i8* %a0) {
- ; CHECK: vbroadcastsd
- %res = call <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8* %a0) ; <<4 x double>> [#uses=1]
- ret <4 x double> %res
-}
-declare <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8*) nounwind readonly
-
-
define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) {
; CHECK: vbroadcastf128
%res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1]
@@ -2243,22 +2235,6 @@ define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) {
declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly
-define <4 x float> @test_x86_avx_vbroadcast_ss(i8* %a0) {
- ; CHECK: vbroadcastss
- %res = call <4 x float> @llvm.x86.avx.vbroadcast.ss(i8* %a0) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.avx.vbroadcast.ss(i8*) nounwind readonly
-
-
-define <8 x float> @test_x86_avx_vbroadcast_ss_256(i8* %a0) {
- ; CHECK: vbroadcastss
- %res = call <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8* %a0) ; <<8 x float>> [#uses=1]
- ret <8 x float> %res
-}
-declare <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8*) nounwind readonly
-
-
define <2 x double> @test_x86_avx_vextractf128_pd_256(<4 x double> %a0) {
; CHECK: vextractf128
%res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 7) ; <<2 x double>> [#uses=1]
diff --git a/test/CodeGen/X86/avx-shuffle.ll b/test/CodeGen/X86/avx-shuffle.ll
index f407ba4..4a996d7 100644
--- a/test/CodeGen/X86/avx-shuffle.ll
+++ b/test/CodeGen/X86/avx-shuffle.ll
@@ -5,8 +5,10 @@ define <4 x float> @test1(<4 x float> %a) nounwind {
%b = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 2, i32 5, i32 undef, i32 undef>
ret <4 x float> %b
; CHECK-LABEL: test1:
-; CHECK: vshufps
-; CHECK: vpshufd
+;; TODO: This test could be improved by removing the xor instruction and
+;; having vinsertps zero out the needed elements.
+; CHECK: vxorps
+; CHECK: vinsertps
}
; rdar://10538417
@@ -23,7 +25,7 @@ define <4 x i64> @test3(<4 x i64> %a, <4 x i64> %b) nounwind {
%c = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 5, i32 2, i32 undef>
ret <4 x i64> %c
; CHECK-LABEL: test3:
-; CHECK: vperm2f128
+; CHECK: vblendpd
; CHECK: ret
}
diff --git a/test/CodeGen/X86/avx-splat.ll b/test/CodeGen/X86/avx-splat.ll
index 5d07815..b1b2f8b 100644
--- a/test/CodeGen/X86/avx-splat.ll
+++ b/test/CodeGen/X86/avx-splat.ll
@@ -43,13 +43,10 @@ entry:
ret <4 x double> %vecinit6.i
}
-; Test this simple opt:
+; Test this turns into a broadcast:
; shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
-; To:
-; shuffle (vload ptr)), undef, <1, 1, 1, 1>
-; CHECK: vmovdqa
-; CHECK-NEXT: vpshufd $-1
-; CHECK-NEXT: vinsertf128 $1
+;
+; CHECK: vbroadcastss
define <8 x float> @funcE() nounwind {
allocas:
%udx495 = alloca [18 x [18 x float]], align 32
diff --git a/test/CodeGen/X86/avx-vperm2f128.ll b/test/CodeGen/X86/avx-vperm2f128.ll
index caa21e5..c20775b 100644
--- a/test/CodeGen/X86/avx-vperm2f128.ll
+++ b/test/CodeGen/X86/avx-vperm2f128.ll
@@ -9,7 +9,7 @@ entry:
}
; CHECK: _B
-; CHECK: vperm2f128 $48
+; CHECK: vblendps $240
define <8 x float> @B(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
entry:
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
diff --git a/test/CodeGen/X86/avx-vshufp.ll b/test/CodeGen/X86/avx-vshufp.ll
index 45883b7..ad3dbc1 100644
--- a/test/CodeGen/X86/avx-vshufp.ll
+++ b/test/CodeGen/X86/avx-vshufp.ll
@@ -32,14 +32,14 @@ entry:
ret <8 x i32> %shuffle
}
-; CHECK: vshufpd $10, %ymm
+; CHECK: vblendpd $10, %ymm
define <4 x double> @B(<4 x double> %a, <4 x double> %b) nounwind uwtable readnone ssp {
entry:
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x double> %shuffle
}
-; CHECK: vshufpd $10, (%{{.*}}), %ymm
+; CHECK: vblendpd $10, (%{{.*}}), %ymm
define <4 x double> @B2(<4 x double>* %a, <4 x double>* %b) nounwind uwtable readnone ssp {
entry:
%a2 = load <4 x double>* %a
@@ -48,14 +48,14 @@ entry:
ret <4 x double> %shuffle
}
-; CHECK: vshufpd $10, %ymm
+; CHECK: vblendpd $10, %ymm
define <4 x i64> @B3(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
entry:
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x i64> %shuffle
}
-; CHECK: vshufpd $10, (%{{.*}}), %ymm
+; CHECK: vblendpd $10, (%{{.*}}), %ymm
define <4 x i64> @B4(<4 x i64>* %a, <4 x i64>* %b) nounwind uwtable readnone ssp {
entry:
%a2 = load <4 x i64>* %a
@@ -71,7 +71,7 @@ entry:
ret <8 x float> %shuffle
}
-; CHECK: vshufpd $2, %ymm
+; CHECK: vblendpd $2, %ymm
define <4 x double> @D(<4 x double> %a, <4 x double> %b) nounwind uwtable readnone ssp {
entry:
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 undef>
diff --git a/test/CodeGen/X86/avx2-shuffle.ll b/test/CodeGen/X86/avx2-shuffle.ll
index 0e6dd29..185b989 100644
--- a/test/CodeGen/X86/avx2-shuffle.ll
+++ b/test/CodeGen/X86/avx2-shuffle.ll
@@ -60,6 +60,24 @@ define <4 x i64> @blend_test4(<4 x i64> %a, <4 x i64> %b) nounwind alwaysinline
ret <4 x i64> %t
}
+;; 2 tests for shufflevectors that optimize to blend + immediate
+; CHECK-LABEL: @blend_test5
+; CHECK: vpblendd $10, %xmm1, %xmm0, %xmm0
+; CHECK: ret
+define <4 x i32> @blend_test5(<4 x i32> %a, <4 x i32> %b) {
+ %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x i32> %1
+}
+
+; CHECK-LABEL: @blend_test6
+; CHECK: vpblendw $134, %ymm1, %ymm0, %ymm0
+; CHECK: ret
+define <16 x i16> @blend_test6(<16 x i16> %a, <16 x i16> %b) {
+ %1 = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 23,
+ i32 8, i32 25, i32 26, i32 11, i32 12, i32 13, i32 14, i32 31>
+ ret <16 x i16> %1
+}
+
; CHECK: vpshufhw $27, %ymm
define <16 x i16> @vpshufhw(<16 x i16> %src1) nounwind uwtable readnone ssp {
entry:
diff --git a/test/CodeGen/X86/avx512-cvt.ll b/test/CodeGen/X86/avx512-cvt.ll
index 2476ea1..f5cda96 100644
--- a/test/CodeGen/X86/avx512-cvt.ll
+++ b/test/CodeGen/X86/avx512-cvt.ll
@@ -192,6 +192,14 @@ define <16 x double> @uitof64(<16 x i32> %a) nounwind {
ret <16 x double> %b
}
+; CHECK-LABEL: uitof64_256
+; CHECK: vcvtudq2pd
+; CHECK: ret
+define <4 x double> @uitof64_256(<4 x i32> %a) nounwind {
+ %b = uitofp <4 x i32> %a to <4 x double>
+ ret <4 x double> %b
+}
+
; CHECK-LABEL: uitof32
; CHECK: vcvtudq2ps
; CHECK: ret
diff --git a/test/CodeGen/X86/avx512-inc-dec.ll b/test/CodeGen/X86/avx512-inc-dec.ll
new file mode 100644
index 0000000..f04ca87
--- /dev/null
+++ b/test/CodeGen/X86/avx512-inc-dec.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+
+;CHECK-LABEL: test
+;CHECK-NOT: dec
+;CHECK_NOT: enc
+;CHECK: ret
+define i32 @test(i32 %a, i32 %b) {
+ %a1 = add i32 %a, -1
+ %b1 = add i32 %b, 1
+ %res = mul i32 %a1, %b1
+ ret i32 %res
+}
+
diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll
index e19841a..18cfcfe 100644
--- a/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-intrinsics.ll
@@ -311,7 +311,6 @@ define <8 x i64> @test_conflict_q(<8 x i64> %a) {
declare <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
-
define <16 x i32> @test_maskz_conflict_d(<16 x i32> %a, i16 %mask) {
; CHECK: vpconflictd
%res = call <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 %mask)
@@ -324,6 +323,57 @@ define <8 x i64> @test_mask_conflict_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
ret <8 x i64> %res
}
+define <16 x i32> @test_lzcnt_d(<16 x i32> %a) {
+ ; CHECK: movw $-1, %ax
+ ; CHECK: vpxor
+ ; CHECK: vplzcntd
+ %res = call <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 -1)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32>, <16 x i32>, i16) nounwind readonly
+
+define <8 x i64> @test_lzcnt_q(<8 x i64> %a) {
+ ; CHECK: movb $-1, %al
+ ; CHECK: vpxor
+ ; CHECK: vplzcntq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64> %a, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
+
+
+define <16 x i32> @test_mask_lzcnt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
+ ; CHECK: vplzcntd
+ %res = call <16 x i32> @llvm.x86.avx512.mask.lzcnt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <8 x i64> @test_mask_lzcnt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
+ ; CHECK: vplzcntq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.lzcnt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
+ ret <8 x i64> %res
+}
+
+define <16 x i32> @test_ctlz_d(<16 x i32> %a) {
+ ; CHECK-LABEL: test_ctlz_d
+ ; CHECK: vplzcntd
+ %res = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %a, i1 false)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1) nounwind readonly
+
+define <8 x i64> @test_ctlz_q(<8 x i64> %a) {
+ ; CHECK-LABEL: test_ctlz_q
+ ; CHECK: vplzcntq
+ %res = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %a, i1 false)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1) nounwind readonly
+
define <16 x float> @test_x86_mask_blend_ps_512(i16 %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK: vblendmps
%res = call <16 x float> @llvm.x86.avx512.mask.blend.ps.512(<16 x float> %a1, <16 x float> %a2, i16 %a0) ; <<16 x float>> [#uses=1]
@@ -544,4 +594,20 @@ define <16 x float> @test_vpermt2ps(<16 x float>%x, <16 x float>%y, <16 x i32>%p
ret <16 x float> %res
}
+define <16 x float> @test_vpermt2ps_mask(<16 x float>%x, <16 x float>%y, <16 x i32>%perm, i16 %mask) {
+; CHECK-LABEL: test_vpermt2ps_mask:
+; CHECK: vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x7f,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>%perm, <16 x float>%x, <16 x float>%y, i16 %mask)
+ ret <16 x float> %res
+}
+
declare <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>, <16 x float>, <16 x float>, i16)
+
+define <8 x i64> @test_vmovntdqa(i8 *%x) {
+; CHECK-LABEL: test_vmovntdqa:
+; CHECK: vmovntdqa (%rdi), %zmm0 ## encoding: [0x62,0xf2,0x7d,0x48,0x2a,0x07]
+ %res = call <8 x i64> @llvm.x86.avx512.movntdqa(i8* %x)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.movntdqa(i8*)
diff --git a/test/CodeGen/X86/avx512-nontemporal.ll b/test/CodeGen/X86/avx512-nontemporal.ll
new file mode 100644
index 0000000..ef50cdb
--- /dev/null
+++ b/test/CodeGen/X86/avx512-nontemporal.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -march=x86-64 -mattr=+avx512f | FileCheck %s
+
+define void @f(<16 x float> %A, <16 x float> %AA, i8* %B, <8 x double> %C, <8 x double> %CC, i32 %D, <8 x i64> %E, <8 x i64> %EE) {
+; CHECK: vmovntps %z
+ %cast = bitcast i8* %B to <16 x float>*
+ %A2 = fadd <16 x float> %A, %AA
+ store <16 x float> %A2, <16 x float>* %cast, align 64, !nontemporal !0
+; CHECK: vmovntdq %z
+ %cast1 = bitcast i8* %B to <8 x i64>*
+ %E2 = add <8 x i64> %E, %EE
+ store <8 x i64> %E2, <8 x i64>* %cast1, align 64, !nontemporal !0
+; CHECK: vmovntpd %z
+ %cast2 = bitcast i8* %B to <8 x double>*
+ %C2 = fadd <8 x double> %C, %CC
+ store <8 x double> %C2, <8 x double>* %cast2, align 64, !nontemporal !0
+ ret void
+}
+
+!0 = metadata !{i32 1}
diff --git a/test/CodeGen/X86/avx512-shuffle.ll b/test/CodeGen/X86/avx512-shuffle.ll
index 23ddc3a..b99e89a 100644
--- a/test/CodeGen/X86/avx512-shuffle.ll
+++ b/test/CodeGen/X86/avx512-shuffle.ll
@@ -56,6 +56,16 @@ define <8 x double> @test5(<8 x double> %a, <8 x double> %b) nounwind {
ret <8 x double> %c
}
+; The reg variant of vpermt2 with a writemask
+; CHECK-LABEL: test5m:
+; CHECK: vpermt2pd {{.* {%k[1-7]} {z}}}
+define <8 x double> @test5m(<8 x double> %a, <8 x double> %b, i8 %mask) nounwind {
+ %c = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 2, i32 8, i32 0, i32 1, i32 6, i32 10, i32 4, i32 5>
+ %m = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %m, <8 x double> %c, <8 x double> zeroinitializer
+ ret <8 x double> %res
+}
+
; CHECK-LABEL: test6:
; CHECK: vpermq $30
; CHECK: ret
@@ -72,6 +82,27 @@ define <8 x i64> @test7(<8 x i64> %a, <8 x i64> %b) nounwind {
ret <8 x i64> %c
}
+; The reg variant of vpermt2 with a writemask
+; CHECK-LABEL: test7m:
+; CHECK: vpermt2q {{.* {%k[1-7]} {z}}}
+define <8 x i64> @test7m(<8 x i64> %a, <8 x i64> %b, i8 %mask) nounwind {
+ %c = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 2, i32 8, i32 0, i32 1, i32 6, i32 10, i32 4, i32 5>
+ %m = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %m, <8 x i64> %c, <8 x i64> zeroinitializer
+ ret <8 x i64> %res
+}
+
+; The mem variant of vpermt2 with a writemask
+; CHECK-LABEL: test7mm:
+; CHECK: vpermt2q {{\(.*\).* {%k[1-7]} {z}}}
+define <8 x i64> @test7mm(<8 x i64> %a, <8 x i64> *%pb, i8 %mask) nounwind {
+ %b = load <8 x i64>* %pb
+ %c = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 2, i32 8, i32 0, i32 1, i32 6, i32 10, i32 4, i32 5>
+ %m = bitcast i8 %mask to <8 x i1>
+ %res = select <8 x i1> %m, <8 x i64> %c, <8 x i64> zeroinitializer
+ ret <8 x i64> %res
+}
+
; CHECK-LABEL: test8:
; CHECK: vpermt2d
; CHECK: ret
@@ -80,6 +111,27 @@ define <16 x i32> @test8(<16 x i32> %a, <16 x i32> %b) nounwind {
ret <16 x i32> %c
}
+; The reg variant of vpermt2 with a writemask
+; CHECK-LABEL: test8m:
+; CHECK: vpermt2d {{.* {%k[1-7]} {z}}}
+define <16 x i32> @test8m(<16 x i32> %a, <16 x i32> %b, i16 %mask) nounwind {
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
+ %m = bitcast i16 %mask to <16 x i1>
+ %res = select <16 x i1> %m, <16 x i32> %c, <16 x i32> zeroinitializer
+ ret <16 x i32> %res
+}
+
+; The mem variant of vpermt2 with a writemask
+; CHECK-LABEL: test8mm:
+; CHECK: vpermt2d {{\(.*\).* {%k[1-7]} {z}}}
+define <16 x i32> @test8mm(<16 x i32> %a, <16 x i32> *%pb, i16 %mask) nounwind {
+ %b = load <16 x i32> * %pb
+ %c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
+ %m = bitcast i16 %mask to <16 x i1>
+ %res = select <16 x i1> %m, <16 x i32> %c, <16 x i32> zeroinitializer
+ ret <16 x i32> %res
+}
+
; CHECK-LABEL: test9:
; CHECK: vpermt2ps
; CHECK: ret
@@ -88,6 +140,16 @@ define <16 x float> @test9(<16 x float> %a, <16 x float> %b) nounwind {
ret <16 x float> %c
}
+; The reg variant of vpermt2 with a writemask
+; CHECK-LABEL: test9m:
+; CHECK: vpermt2ps {{.*}} {%k{{.}}} {z}
+define <16 x float> @test9m(<16 x float> %a, <16 x float> %b, i16 %mask) nounwind {
+ %c = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
+ %m = bitcast i16 %mask to <16 x i1>
+ %res = select <16 x i1> %m, <16 x float> %c, <16 x float> zeroinitializer
+ ret <16 x float> %res
+}
+
; CHECK-LABEL: test10:
; CHECK: vpermt2ps (
; CHECK: ret
diff --git a/test/CodeGen/X86/bswap-vector.ll b/test/CodeGen/X86/bswap-vector.ll
index 3c931db..9dc960d 100644
--- a/test/CodeGen/X86/bswap-vector.ll
+++ b/test/CodeGen/X86/bswap-vector.ll
@@ -1,6 +1,7 @@
; RUN: llc < %s -mcpu=x86-64 | FileCheck %s -check-prefix=CHECK-NOSSSE3
; RUN: llc < %s -mcpu=core2 | FileCheck %s -check-prefix=CHECK-SSSE3
; RUN: llc < %s -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK-AVX2
+; RUN: llc < %s -mcpu=core-avx2 -x86-experimental-vector-widening-legalization | FileCheck %s -check-prefix=CHECK-WIDE-AVX2
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@@ -31,6 +32,10 @@ entry:
; CHECK-AVX2-LABEL: @test1
; CHECK-AVX2: vpshufb
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test1
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
define <4 x i32> @test2(<4 x i32> %v) #0 {
@@ -52,6 +57,10 @@ entry:
; CHECK-AVX2-LABEL: @test2
; CHECK-AVX2: vpshufb
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test2
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
define <2 x i64> @test3(<2 x i64> %v) #0 {
@@ -71,6 +80,10 @@ entry:
; CHECK-AVX2-LABEL: @test3
; CHECK-AVX2: vpshufb
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test3
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>)
@@ -90,6 +103,10 @@ entry:
; CHECK-AVX2-LABEL: @test4
; CHECK-AVX2: vpshufb
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test4
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
define <8 x i32> @test5(<8 x i32> %v) #0 {
@@ -105,6 +122,10 @@ entry:
; CHECK-AVX2-LABEL: @test5
; CHECK-AVX2: vpshufb
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test5
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
define <4 x i64> @test6(<4 x i64> %v) #0 {
@@ -120,6 +141,10 @@ entry:
; CHECK-AVX2-LABEL: @test6
; CHECK-AVX2: vpshufb
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test6
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>)
@@ -138,6 +163,10 @@ entry:
; CHECK-AVX2: vpshufb
; CHECK-AVX2: vpsrld $16
; CHECK-AVX2-NEXT: retq
+
+; CHECK-WIDE-AVX2-LABEL: @test7
+; CHECK-WIDE-AVX2: vpshufb
+; CHECK-WIDE-AVX2-NEXT: retq
}
attributes #0 = { nounwind uwtable }
diff --git a/test/CodeGen/X86/cmp.ll b/test/CodeGen/X86/cmp.ll
index cdcdc96..149d537 100644
--- a/test/CodeGen/X86/cmp.ll
+++ b/test/CodeGen/X86/cmp.ll
@@ -198,3 +198,16 @@ define i32 @test14(i32 %mask, i32 %base, i32 %intra) #0 {
; CHECK: shrl $7, %edi
; CHECK-NEXT: cmovnsl %edx, %esi
}
+
+; PR19964
+define zeroext i1 @test15(i32 %bf.load, i32 %n) {
+ %bf.lshr = lshr i32 %bf.load, 16
+ %cmp2 = icmp eq i32 %bf.lshr, 0
+ %cmp5 = icmp uge i32 %bf.lshr, %n
+ %.cmp5 = or i1 %cmp2, %cmp5
+ ret i1 %.cmp5
+
+; CHECK-LABEL: test15:
+; CHECK: shrl $16, %edi
+; CHECK: cmpl %esi, %edi
+}
diff --git a/test/CodeGen/X86/cmpxchg-i1.ll b/test/CodeGen/X86/cmpxchg-i1.ll
new file mode 100644
index 0000000..a21ab59
--- /dev/null
+++ b/test/CodeGen/X86/cmpxchg-i1.ll
@@ -0,0 +1,87 @@
+; RUN: llc -mtriple=x86_64 -o - %s | FileCheck %s
+
+define i1 @try_cmpxchg(i32* %addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: try_cmpxchg:
+; CHECK: cmpxchgl
+; CHECK-NOT: cmp
+; CHECK: sete %al
+; CHECK: retq
+ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ ret i1 %success
+}
+
+define void @cmpxchg_flow(i64* %addr, i64 %desired, i64 %new) {
+; CHECK-LABEL: cmpxchg_flow:
+; CHECK: cmpxchgq
+; CHECK-NOT: cmp
+; CHECK-NOT: set
+; CHECK: {{jne|jeq}}
+ %pair = cmpxchg i64* %addr, i64 %desired, i64 %new seq_cst seq_cst
+ %success = extractvalue { i64, i1 } %pair, 1
+ br i1 %success, label %true, label %false
+
+true:
+ call void @foo()
+ ret void
+
+false:
+ call void @bar()
+ ret void
+}
+
+define i64 @cmpxchg_sext(i32* %addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: cmpxchg_sext:
+; CHECK-DAG: cmpxchgl
+; CHECK-NOT: cmpl
+; CHECK: sete %al
+; CHECK: retq
+ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ %mask = sext i1 %success to i64
+ ret i64 %mask
+}
+
+define i32 @cmpxchg_zext(i32* %addr, i32 %desired, i32 %new) {
+; CHECK-LABEL: cmpxchg_zext:
+; CHECK: cmpxchgl
+; CHECK-NOT: cmp
+; CHECK: sete [[BYTE:%[a-z0-9]+]]
+; CHECK: movzbl [[BYTE]], %eax
+ %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
+ %success = extractvalue { i32, i1 } %pair, 1
+ %mask = zext i1 %success to i32
+ ret i32 %mask
+}
+
+
+define i32 @cmpxchg_use_eflags_and_val(i32* %addr, i32 %offset) {
+; CHECK-LABEL: cmpxchg_use_eflags_and_val:
+; CHECK: movl (%rdi), %e[[OLDVAL:[a-z0-9]+]]
+
+; CHECK: [[LOOPBB:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: leal (%r[[OLDVAL]],%rsi), [[NEW:%[a-z0-9]+]]
+; CHECK: cmpxchgl [[NEW]], (%rdi)
+; CHECK-NOT: cmpl
+; CHECK: jne [[LOOPBB]]
+
+ ; Result already in %eax
+; CHECK: retq
+entry:
+ %init = load atomic i32* %addr seq_cst, align 4
+ br label %loop
+
+loop:
+ %old = phi i32 [%init, %entry], [%oldval, %loop]
+ %new = add i32 %old, %offset
+ %pair = cmpxchg i32* %addr, i32 %old, i32 %new seq_cst seq_cst
+ %oldval = extractvalue { i32, i1 } %pair, 0
+ %success = extractvalue { i32, i1 } %pair, 1
+ br i1 %success, label %done, label %loop
+
+done:
+ ret i32 %oldval
+}
+
+declare void @foo()
+declare void @bar()
diff --git a/test/CodeGen/X86/cmpxchg-i128-i1.ll b/test/CodeGen/X86/cmpxchg-i128-i1.ll
new file mode 100644
index 0000000..4dd3001
--- /dev/null
+++ b/test/CodeGen/X86/cmpxchg-i128-i1.ll
@@ -0,0 +1,83 @@
+; RUN: llc -mcpu=core-avx2 -mtriple=x86_64 -o - %s | FileCheck %s
+
+define i1 @try_cmpxchg(i128* %addr, i128 %desired, i128 %new) {
+; CHECK-LABEL: try_cmpxchg:
+; CHECK: cmpxchg16b
+; CHECK-NOT: cmp
+; CHECK: sete %al
+; CHECK: retq
+ %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+ %success = extractvalue { i128, i1 } %pair, 1
+ ret i1 %success
+}
+
+define void @cmpxchg_flow(i128* %addr, i128 %desired, i128 %new) {
+; CHECK-LABEL: cmpxchg_flow:
+; CHECK: cmpxchg16b
+; CHECK-NOT: cmp
+; CHECK-NOT: set
+; CHECK: {{jne|jeq}}
+ %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+ %success = extractvalue { i128, i1 } %pair, 1
+ br i1 %success, label %true, label %false
+
+true:
+ call void @foo()
+ ret void
+
+false:
+ call void @bar()
+ ret void
+}
+
+; Can't use the flags here because cmpxchg16b only sets ZF.
+define i1 @cmpxchg_arithcmp(i128* %addr, i128 %desired, i128 %new) {
+; CHECK-LABEL: cmpxchg_arithcmp:
+; CHECK: cmpxchg16b
+; CHECK: cmpq
+; CHECK: retq
+ %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+ %oldval = extractvalue { i128, i1 } %pair, 0
+ %success = icmp sge i128 %oldval, %desired
+ ret i1 %success
+}
+
+define i128 @cmpxchg_zext(i128* %addr, i128 %desired, i128 %new) {
+; CHECK-LABEL: cmpxchg_zext:
+; CHECK: cmpxchg16b
+; CHECK-NOT: cmpq
+; CHECK: sete [[BYTE:%[a-z0-9]+]]
+; CHECK: movzbl [[BYTE]], %eax
+ %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+ %success = extractvalue { i128, i1 } %pair, 1
+ %mask = zext i1 %success to i128
+ ret i128 %mask
+}
+
+
+define i128 @cmpxchg_use_eflags_and_val(i128* %addr, i128 %offset) {
+; CHECK-LABEL: cmpxchg_use_eflags_and_val:
+
+; CHECK: cmpxchg16b
+; CHECK-NOT: cmpq
+; CHECK: jne
+entry:
+ %init = load atomic i128* %addr seq_cst, align 16
+ br label %loop
+
+loop:
+ %old = phi i128 [%init, %entry], [%oldval, %loop]
+ %new = add i128 %old, %offset
+
+ %pair = cmpxchg i128* %addr, i128 %old, i128 %new seq_cst seq_cst
+ %oldval = extractvalue { i128, i1 } %pair, 0
+ %success = extractvalue { i128, i1 } %pair, 1
+
+ br i1 %success, label %done, label %loop
+
+done:
+ ret i128 %old
+}
+
+declare void @foo()
+declare void @bar()
diff --git a/test/CodeGen/X86/coalescer-remat.ll b/test/CodeGen/X86/coalescer-remat.ll
index 468b70b..bb08a0e 100644
--- a/test/CodeGen/X86/coalescer-remat.ll
+++ b/test/CodeGen/X86/coalescer-remat.ll
@@ -5,7 +5,8 @@
define i32 @main() nounwind {
entry:
- %0 = cmpxchg i64* @val, i64 0, i64 1 monotonic monotonic
+ %t0 = cmpxchg i64* @val, i64 0, i64 1 monotonic monotonic
+ %0 = extractvalue { i64, i1 } %t0, 0
%1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([7 x i8]* @"\01LC", i32 0, i64 0), i64 %0) nounwind
ret i32 0
}
diff --git a/test/CodeGen/X86/coff-comdat.ll b/test/CodeGen/X86/coff-comdat.ll
new file mode 100644
index 0000000..bf27b2f
--- /dev/null
+++ b/test/CodeGen/X86/coff-comdat.ll
@@ -0,0 +1,92 @@
+; RUN: llc -mtriple i386-pc-win32 < %s | FileCheck %s
+
+$f1 = comdat any
+@v1 = global i32 0, comdat $f1
+define void @f1() comdat $f1 {
+ ret void
+}
+
+$f2 = comdat exactmatch
+@v2 = global i32 0, comdat $f2
+define void @f2() comdat $f2 {
+ ret void
+}
+
+$f3 = comdat largest
+@v3 = global i32 0, comdat $f3
+define void @f3() comdat $f3 {
+ ret void
+}
+
+$f4 = comdat noduplicates
+@v4 = global i32 0, comdat $f4
+define void @f4() comdat $f4 {
+ ret void
+}
+
+$f5 = comdat samesize
+@v5 = global i32 0, comdat $f5
+define void @f5() comdat $f5 {
+ ret void
+}
+
+$f6 = comdat samesize
+@v6 = global i32 0, comdat $f6
+@f6 = global i32 0, comdat $f6
+
+$"\01@f7@0" = comdat any
+define x86_fastcallcc void @"\01@v7@0"() comdat $"\01@f7@0" {
+ ret void
+}
+define x86_fastcallcc void @"\01@f7@0"() comdat $"\01@f7@0" {
+ ret void
+}
+
+$f8 = comdat any
+define x86_fastcallcc void @v8() comdat $f8 {
+ ret void
+}
+define x86_fastcallcc void @f8() comdat $f8 {
+ ret void
+}
+
+$vftable = comdat largest
+
+@some_name = private unnamed_addr constant [2 x i8*] zeroinitializer, comdat $vftable
+@vftable = alias getelementptr([2 x i8*]* @some_name, i32 0, i32 1)
+
+; CHECK: .section .text,"xr",discard,_f1
+; CHECK: .globl _f1
+; CHECK: .section .text,"xr",same_contents,_f2
+; CHECK: .globl _f2
+; CHECK: .section .text,"xr",largest,_f3
+; CHECK: .globl _f3
+; CHECK: .section .text,"xr",one_only,_f4
+; CHECK: .globl _f4
+; CHECK: .section .text,"xr",same_size,_f5
+; CHECK: .globl _f5
+; CHECK: .section .text,"xr",associative,@f7@0
+; CHECK: .globl @v7@0
+; CHECK: .section .text,"xr",discard,@f7@0
+; CHECK: .globl @f7@0
+; CHECK: .section .text,"xr",associative,@f8@0
+; CHECK: .globl @v8@0
+; CHECK: .section .text,"xr",discard,@f8@0
+; CHECK: .globl @f8@0
+; CHECK: .section .bss,"bw",associative,_f1
+; CHECK: .globl _v1
+; CHECK: .section .bss,"bw",associative,_f2
+; CHECK: .globl _v2
+; CHECK: .section .bss,"bw",associative,_f3
+; CHECK: .globl _v3
+; CHECK: .section .bss,"bw",associative,_f4
+; CHECK: .globl _v4
+; CHECK: .section .bss,"bw",associative,_f5
+; CHECK: .globl _v5
+; CHECK: .section .bss,"bw",associative,_f6
+; CHECK: .globl _v6
+; CHECK: .section .bss,"bw",same_size,_f6
+; CHECK: .globl _f6
+; CHECK: .section .rdata,"rd",largest,_vftable
+; CHECK: .globl _vftable
+; CHECK: _vftable = L_some_name+4
diff --git a/test/CodeGen/X86/coff-comdat2.ll b/test/CodeGen/X86/coff-comdat2.ll
new file mode 100644
index 0000000..6744b5b
--- /dev/null
+++ b/test/CodeGen/X86/coff-comdat2.ll
@@ -0,0 +1,9 @@
+; RUN: not llc %s -o /dev/null 2>&1 | FileCheck %s
+
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+$foo = comdat largest
+@foo = global i32 0
+@bar = global i32 0, comdat $foo
+; CHECK: Associative COMDAT symbol 'foo' is not a key for it's COMDAT.
diff --git a/test/CodeGen/X86/coff-comdat3.ll b/test/CodeGen/X86/coff-comdat3.ll
new file mode 100644
index 0000000..76e464b
--- /dev/null
+++ b/test/CodeGen/X86/coff-comdat3.ll
@@ -0,0 +1,8 @@
+; RUN: not llc %s -o /dev/null 2>&1 | FileCheck %s
+
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+$foo = comdat largest
+@bar = global i32 0, comdat $foo
+; CHECK: Associative COMDAT symbol 'foo' does not exist.
diff --git a/test/CodeGen/X86/combine-64bit-vec-binop.ll b/test/CodeGen/X86/combine-64bit-vec-binop.ll
new file mode 100644
index 0000000..8440fda
--- /dev/null
+++ b/test/CodeGen/X86/combine-64bit-vec-binop.ll
@@ -0,0 +1,273 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 -mtriple=x86_64-unknown-linux-gnu | FileCheck %s -check-prefix=CHECK -check-prefix=SSE41
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx -mtriple=x86_64-unknown-linux-gnu | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+
+
+define double @test1_add(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %add = add <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test1_add
+; SSE41: paddd
+; AVX: vpaddd
+; CHECK-NEXT: ret
+
+
+define double @test2_add(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %add = add <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test2_add
+; SSE41: paddw
+; AVX: vpaddw
+; CHECK-NEXT: ret
+
+define double @test3_add(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %add = add <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test3_add
+; SSE41: paddb
+; AVX: vpaddb
+; CHECK-NEXT: ret
+
+
+define double @test1_sub(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %sub = sub <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %sub to double
+ ret double %3
+}
+; CHECK-LABEL: test1_sub
+; SSE41: psubd
+; AVX: vpsubd
+; CHECK-NEXT: ret
+
+
+define double @test2_sub(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %sub = sub <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %sub to double
+ ret double %3
+}
+; CHECK-LABEL: test2_sub
+; SSE41: psubw
+; AVX: vpsubw
+; CHECK-NEXT: ret
+
+
+define double @test3_sub(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %sub = sub <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %sub to double
+ ret double %3
+}
+; CHECK-LABEL: test3_sub
+; SSE41: psubb
+; AVX: vpsubb
+; CHECK-NEXT: ret
+
+
+define double @test1_mul(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %mul = mul <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %mul to double
+ ret double %3
+}
+; CHECK-LABEL: test1_mul
+; SSE41: pmulld
+; AVX: vpmulld
+; CHECK-NEXT: ret
+
+
+define double @test2_mul(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %mul = mul <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %mul to double
+ ret double %3
+}
+; CHECK-LABEL: test2_mul
+; SSE41: pmullw
+; AVX: vpmullw
+; CHECK-NEXT: ret
+
+; There is no legal ISD::MUL with type MVT::v8i16.
+define double @test3_mul(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %mul = mul <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %mul to double
+ ret double %3
+}
+; CHECK-LABEL: test3_mul
+; CHECK: pmullw
+; CHECK-NEXT: pshufb
+; CHECK-NEXT: ret
+
+
+define double @test1_and(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %and = and <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %and to double
+ ret double %3
+}
+; CHECK-LABEL: test1_and
+; SSE41: andps
+; AVX: vandps
+; CHECK-NEXT: ret
+
+
+define double @test2_and(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %and = and <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %and to double
+ ret double %3
+}
+; CHECK-LABEL: test2_and
+; SSE41: andps
+; AVX: vandps
+; CHECK-NEXT: ret
+
+
+define double @test3_and(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %and = and <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %and to double
+ ret double %3
+}
+; CHECK-LABEL: test3_and
+; SSE41: andps
+; AVX: vandps
+; CHECK-NEXT: ret
+
+
+define double @test1_or(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %or = or <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %or to double
+ ret double %3
+}
+; CHECK-LABEL: test1_or
+; SSE41: orps
+; AVX: vorps
+; CHECK-NEXT: ret
+
+
+define double @test2_or(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %or = or <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %or to double
+ ret double %3
+}
+; CHECK-LABEL: test2_or
+; SSE41: orps
+; AVX: vorps
+; CHECK-NEXT: ret
+
+
+define double @test3_or(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %or = or <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %or to double
+ ret double %3
+}
+; CHECK-LABEL: test3_or
+; SSE41: orps
+; AVX: vorps
+; CHECK-NEXT: ret
+
+
+define double @test1_xor(double %A, double %B) {
+ %1 = bitcast double %A to <2 x i32>
+ %2 = bitcast double %B to <2 x i32>
+ %xor = xor <2 x i32> %1, %2
+ %3 = bitcast <2 x i32> %xor to double
+ ret double %3
+}
+; CHECK-LABEL: test1_xor
+; SSE41: xorps
+; AVX: vxorps
+; CHECK-NEXT: ret
+
+
+define double @test2_xor(double %A, double %B) {
+ %1 = bitcast double %A to <4 x i16>
+ %2 = bitcast double %B to <4 x i16>
+ %xor = xor <4 x i16> %1, %2
+ %3 = bitcast <4 x i16> %xor to double
+ ret double %3
+}
+; CHECK-LABEL: test2_xor
+; SSE41: xorps
+; AVX: vxorps
+; CHECK-NEXT: ret
+
+
+define double @test3_xor(double %A, double %B) {
+ %1 = bitcast double %A to <8 x i8>
+ %2 = bitcast double %B to <8 x i8>
+ %xor = xor <8 x i8> %1, %2
+ %3 = bitcast <8 x i8> %xor to double
+ ret double %3
+}
+; CHECK-LABEL: test3_xor
+; SSE41: xorps
+; AVX: vxorps
+; CHECK-NEXT: ret
+
+
+define double @test_fadd(double %A, double %B) {
+ %1 = bitcast double %A to <2 x float>
+ %2 = bitcast double %B to <2 x float>
+ %add = fadd <2 x float> %1, %2
+ %3 = bitcast <2 x float> %add to double
+ ret double %3
+}
+; CHECK-LABEL: test_fadd
+; SSE41: addps
+; AVX: vaddps
+; CHECK-NEXT: ret
+
+define double @test_fsub(double %A, double %B) {
+ %1 = bitcast double %A to <2 x float>
+ %2 = bitcast double %B to <2 x float>
+ %sub = fsub <2 x float> %1, %2
+ %3 = bitcast <2 x float> %sub to double
+ ret double %3
+}
+; CHECK-LABEL: test_fsub
+; SSE41: subps
+; AVX: vsubps
+; CHECK-NEXT: ret
+
+define double @test_fmul(double %A, double %B) {
+ %1 = bitcast double %A to <2 x float>
+ %2 = bitcast double %B to <2 x float>
+ %mul = fmul <2 x float> %1, %2
+ %3 = bitcast <2 x float> %mul to double
+ ret double %3
+}
+; CHECK-LABEL: test_fmul
+; SSE41: mulps
+; AVX: vmulps
+; CHECK-NEXT: ret
+
diff --git a/test/CodeGen/X86/combine-or.ll b/test/CodeGen/X86/combine-or.ll
index c1ce533..ff807b9 100644
--- a/test/CodeGen/X86/combine-or.ll
+++ b/test/CodeGen/X86/combine-or.ll
@@ -25,7 +25,7 @@ define <4 x i32> @test2(<4 x i32> %a, <4 x i32> %b) {
}
; CHECK-LABEL: test2
; CHECK-NOT: xorps
-; CHECK: shufps
+; CHECK: movsd
; CHECK: ret
@@ -74,7 +74,7 @@ define <4 x i32> @test6(<4 x i32> %a, <4 x i32> %b) {
}
; CHECK-LABEL: test6
; CHECK-NOT: xorps
-; CHECK: shufps
+; CHECK: blendps $12
; CHECK-NEXT: ret
@@ -86,7 +86,7 @@ define <4 x i32> @test7(<4 x i32> %a, <4 x i32> %b) {
}
; CHECK-LABEL: test7
; CHECK-NOT: xorps
-; CHECK: shufps
+; CHECK: blendps $12
; CHECK-NEXT: ret
@@ -111,7 +111,7 @@ define <4 x i32> @test9(<4 x i32> %a, <4 x i32> %b) {
}
; CHECK-LABEL: test9
; CHECK-NOT: xorps
-; CHECK: shufps
+; CHECK: movsd
; CHECK: ret
diff --git a/test/CodeGen/X86/combine-vec-shuffle-2.ll b/test/CodeGen/X86/combine-vec-shuffle-2.ll
new file mode 100644
index 0000000..7ab7f80
--- /dev/null
+++ b/test/CodeGen/X86/combine-vec-shuffle-2.ll
@@ -0,0 +1,164 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+
+; Check that DAGCombiner correctly folds the following pairs of shuffles
+; using the following rules:
+; 1. shuffle(shuffle(x, y), undef) -> x
+; 2. shuffle(shuffle(x, y), undef) -> y
+; 3. shuffle(shuffle(x, y), undef) -> shuffle(x, undef)
+; 4. shuffle(shuffle(x, y), undef) -> shuffle(undef, y)
+;
+; Rules 3. and 4. are used only if the resulting shuffle mask is legal.
+
+define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 3, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test1
+; Mask: [3,0,0,1]
+; CHECK: pshufd $67
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test2
+; Mask: [2,0,0,3]
+; CHECK: pshufd $-62
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test3(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test3
+; Mask: [2,0,0,3]
+; CHECK: pshufd $-62
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 7, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 4, i32 4, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test4
+; Mask: [0,0,0,1]
+; CHECK: pshufd $64
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test5(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 5, i32 5, i32 2, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 4, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test5
+; Mask: [1,1]
+; CHECK: movhlps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test6(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 4>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test6
+; Mask: [2,0,0,0]
+; CHECK: pshufd $2
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test7(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 0, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test7
+; Mask: [0,2,0,2]
+; CHECK: pshufd $-120
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test8(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 4, i32 3, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test8
+; Mask: [1,0,3,0]
+; CHECK: pshufd $49
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test9(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 3, i32 2, i32 5>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 4, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test9
+; Mask: [1,3,0,2]
+; CHECK: pshufd $-115
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test10(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 1, i32 5, i32 5>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test10
+; Mask: [1,0,1,0]
+; CHECK: pshufd $17
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test11(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 2, i32 5, i32 4>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test11
+; Mask: [1,0,2,1]
+; CHECK: pshufd $97
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @test12(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 0, i32 2, i32 4>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 4, i32 0, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test12
+; Mask: [0,0,0,0]
+; CHECK: pshufd $0
+; CHECK-NEXT: ret
+
+
+; The following pair of shuffles is folded into vector %A.
+define <4 x i32> @test13(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 4, i32 2, i32 6>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 4, i32 0, i32 2, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test13
+; CHECK-NOT: pshufd
+; CHECK: ret
+
+
+; The following pair of shuffles is folded into vector %B.
+define <4 x i32> @test14(<4 x i32> %A, <4 x i32> %B) {
+ %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 4>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 4, i32 1, i32 4>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: test14
+; CHECK-NOT: pshufd
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/computeKnownBits_urem.ll b/test/CodeGen/X86/computeKnownBits_urem.ll
new file mode 100644
index 0000000..9902e6f
--- /dev/null
+++ b/test/CodeGen/X86/computeKnownBits_urem.ll
@@ -0,0 +1,14 @@
+; RUN: llc -mtriple=x86_64-linux < %s | FileCheck %s
+define i32 @main() #0 {
+entry:
+ %a = alloca i32, align 4
+ store i32 1, i32* %a, align 4
+ %0 = load i32* %a, align 4
+ %or = or i32 1, %0
+ %and = and i32 1, %or
+ %rem = urem i32 %and, 1
+ %add = add i32 %rem, 1
+ ret i32 %add
+}
+; CHECK: $1, %eax
+; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/cvt16.ll b/test/CodeGen/X86/cvt16.ll
new file mode 100644
index 0000000..951b5c3
--- /dev/null
+++ b/test/CodeGen/X86/cvt16.ll
@@ -0,0 +1,64 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=-f16c | FileCheck %s -check-prefix=CHECK -check-prefix=LIBCALL
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=+f16c | FileCheck %s -check-prefix=CHECK -check-prefix=F16C
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -soft-float=1 -mattr=-f16c | FileCheck %s -check-prefix=CHECK -check-prefix=SOFTFLOAT
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -soft-float=1 -mattr=+f16c | FileCheck %s -check-prefix=CHECK -check-prefix=SOFTFLOAT
+
+; This is a test for float to half float conversions on x86-64.
+;
+; If flag -soft-float is set, or if there is no F16C support, then:
+; 1) half float to float conversions are
+; translated into calls to __gnu_h2f_ieee defined
+; by the compiler runtime library;
+; 2) float to half float conversions are translated into calls
+; to __gnu_f2h_ieee which expected to be defined by the
+; compiler runtime library.
+;
+; Otherwise (we have F16C support):
+; 1) half float to float conversion are translated using
+; vcvtph2ps instructions;
+; 2) float to half float conversions are translated using
+; vcvtps2ph instructions
+
+
+define void @test1(float %src, i16* %dest) {
+ %1 = tail call i16 @llvm.convert.to.fp16(float %src)
+ store i16 %1, i16* %dest, align 2
+ ret void
+}
+; CHECK-LABEL: test1
+; LIBCALL: callq __gnu_f2h_ieee
+; SOFTFLOAT: callq __gnu_f2h_ieee
+; F16C: vcvtps2ph
+; CHECK: ret
+
+
+define float @test2(i16* nocapture %src) {
+ %1 = load i16* %src, align 2
+ %2 = tail call float @llvm.convert.from.fp16(i16 %1)
+ ret float %2
+}
+; CHECK-LABEL: test2:
+; LIBCALL: jmp __gnu_h2f_ieee
+; SOFTFLOAT: callq __gnu_h2f_ieee
+; F16C: vcvtph2ps
+; F16C: ret
+
+
+define float @test3(float %src) nounwind uwtable readnone {
+ %1 = tail call i16 @llvm.convert.to.fp16(float %src)
+ %2 = tail call float @llvm.convert.from.fp16(i16 %1)
+ ret float %2
+}
+
+; CHECK-LABEL: test3:
+; LIBCALL: callq __gnu_f2h_ieee
+; LIBCALL: jmp __gnu_h2f_ieee
+; SOFTFLOAT: callq __gnu_f2h_ieee
+; SOFTFLOAT: callq __gnu_h2f_ieee
+; F16C: vcvtps2ph
+; F16C-NEXT: vcvtph2ps
+; F16C: ret
+
+declare float @llvm.convert.from.fp16(i16) nounwind readnone
+declare i16 @llvm.convert.to.fp16(float) nounwind readnone
+
diff --git a/test/CodeGen/X86/dagcombine-and-setcc.ll b/test/CodeGen/X86/dagcombine-and-setcc.ll
new file mode 100644
index 0000000..e7336a9
--- /dev/null
+++ b/test/CodeGen/X86/dagcombine-and-setcc.ll
@@ -0,0 +1,47 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+; Function Attrs: nounwind
+declare i32 @printf(i8* nocapture readonly, ...)
+
+; On X86 1 is true and 0 is false, so we can't perform the combine:
+; (and (setgt X, true), (setgt Y, true)) -> (setgt (or X, Y), true)
+; This combine only works if the true value is -1.
+
+
+;CHECK: cmpl
+;CHECK: setg
+;CHECK: cmpl
+;CHECK: setg
+;CHECK: andb
+
+@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
+; Function Attrs: optsize ssp uwtable
+define i32 @foo(i32 %a, i32 %b, i32 * %c) {
+if.else429:
+ %cmp.i1144 = icmp eq i32* %c, null
+ %cmp430 = icmp slt i32 %a, 2
+ %cmp432 = icmp slt i32 %b, 2
+ %or.cond710 = or i1 %cmp430, %cmp432
+ %or.cond710.not = xor i1 %or.cond710, true
+ %brmerge1448 = or i1 %cmp.i1144, %or.cond710.not
+ br i1 %brmerge1448, label %ret1, label %ret2
+
+ret1:
+ ret i32 0
+
+ret2:
+ ret i32 1
+}
+
+define i32 @main(i32 %argc, i8** nocapture readnone %argv) {
+ %res = alloca i32, align 4
+ %t = call i32 @foo(i32 1, i32 2, i32* %res) #3
+ %v = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %t)
+ ret i32 0
+}
+
+
+
diff --git a/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll b/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
index 23f8335..4912213 100644
--- a/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
+++ b/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
@@ -52,58 +52,153 @@ define void @_Z3barii(i32 %param1, i32 %param2) #0 {
entry:
%var1 = alloca %struct.AAA3, align 1
%var2 = alloca %struct.AAA3, align 1
- %tobool = icmp eq i32 %param2, 0
- br i1 %tobool, label %if.end, label %if.then
+ tail call void @llvm.dbg.value(metadata !{i32 %param1}, i64 0, metadata !30), !dbg !47
+ tail call void @llvm.dbg.value(metadata !{i32 %param2}, i64 0, metadata !31), !dbg !47
+ tail call void @llvm.dbg.value(metadata !48, i64 0, metadata !32), !dbg !49
+ %tobool = icmp eq i32 %param2, 0, !dbg !50
+ br i1 %tobool, label %if.end, label %if.then, !dbg !50
if.then: ; preds = %entry
- %call = call i8* @_Z5i2stri(i32 %param2)
- br label %if.end
+ %call = tail call i8* @_Z5i2stri(i32 %param2), !dbg !52
+ tail call void @llvm.dbg.value(metadata !{i8* %call}, i64 0, metadata !32), !dbg !49
+ br label %if.end, !dbg !54
if.end: ; preds = %entry, %if.then
- call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !60)
- call void @llvm.dbg.value(metadata !62, i64 0, metadata !63)
- %arraydecay.i = getelementptr inbounds %struct.AAA3* %var1, i64 0, i32 0, i64 0
- call void @_Z3fooPcjPKc(i8* %arraydecay.i, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0))
- call void @llvm.dbg.declare(metadata !{%struct.AAA3* %var2}, metadata !38)
- %arraydecay.i5 = getelementptr inbounds %struct.AAA3* %var2, i64 0, i32 0, i64 0
- call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0))
- %tobool1 = icmp eq i32 %param1, 0
- br i1 %tobool1, label %if.else, label %if.then2
+ tail call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !33), !dbg !55
+ tail call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !56), !dbg !57
+ tail call void @llvm.dbg.value(metadata !58, i64 0, metadata !59), !dbg !60
+ %arraydecay.i = getelementptr inbounds %struct.AAA3* %var1, i64 0, i32 0, i64 0, !dbg !61
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)), !dbg !61
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !34), !dbg !63
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !64), !dbg !65
+ call void @llvm.dbg.value(metadata !58, i64 0, metadata !66), !dbg !67
+ %arraydecay.i5 = getelementptr inbounds %struct.AAA3* %var2, i64 0, i32 0, i64 0, !dbg !68
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)), !dbg !68
+ %tobool1 = icmp eq i32 %param1, 0, !dbg !69
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !34), !dbg !63
+ br i1 %tobool1, label %if.else, label %if.then2, !dbg !69
if.then2: ; preds = %if.end
- call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([2 x i8]* @.str1, i64 0, i64 0))
- br label %if.end3
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !71), !dbg !73
+ call void @llvm.dbg.value(metadata !74, i64 0, metadata !75), !dbg !76
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([2 x i8]* @.str1, i64 0, i64 0)), !dbg !76
+ br label %if.end3, !dbg !72
if.else: ; preds = %if.end
- call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([2 x i8]* @.str2, i64 0, i64 0))
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !77), !dbg !79
+ call void @llvm.dbg.value(metadata !80, i64 0, metadata !81), !dbg !82
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([2 x i8]* @.str2, i64 0, i64 0)), !dbg !82
br label %if.end3
if.end3: ; preds = %if.else, %if.then2
- call void @_Z3fooPcjPKc(i8* %arraydecay.i, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0))
- ret void
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !33), !dbg !55
+ call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !83), !dbg !85
+ call void @llvm.dbg.value(metadata !58, i64 0, metadata !86), !dbg !87
+ call void @_Z3fooPcjPKc(i8* %arraydecay.i, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)), !dbg !87
+ ret void, !dbg !88
}
-; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata) #1
-
-declare i8* @_Z5i2stri(i32) #2
+declare i8* @_Z5i2stri(i32) #1
-declare void @_Z3fooPcjPKc(i8*, i32, i8*) #2
+declare void @_Z3fooPcjPKc(i8*, i32, i8*) #1
; Function Attrs: nounwind readnone
-declare void @llvm.dbg.value(metadata, i64, metadata) #1
+declare void @llvm.dbg.value(metadata, i64, metadata) #2
attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
-attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
-!llvm.module.flags = !{!48, !49}
-!llvm.ident = !{!50}
-
-!38 = metadata !{i32 786688, null, metadata !"var2", null, i32 20, null, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [var2] [line 20]
-!48 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
-!49 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
-!50 = metadata !{metadata !"clang version 3.5 (202418)"}
-!60 = metadata !{i32 786689, null, metadata !"this", null, i32 16777216, null, i32 1088, null} ; [ DW_TAG_arg_variable ] [this] [line 0]
-!62 = metadata !{i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)}
-!63 = metadata !{i32 786689, null, metadata !"value", null, i32 33554439, null, i32 0, null} ; [ DW_TAG_arg_variable ] [value] [line 7]
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!44, !45}
+!llvm.ident = !{!46}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !"clang version 3.5.0 ", i1 true, metadata !"", i32 0, metadata !2, metadata !3, metadata !23, metadata !2, metadata !2, metadata !"", i32 1} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp] [DW_LANG_C_plus_plus]
+!1 = metadata !{metadata !"dbg-changes-codegen-branch-folding.cpp", metadata !"/tmp/dbginfo"}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786451, metadata !1, null, metadata !"AAA3", i32 4, i64 32, i64 8, i32 0, i32 0, null, metadata !5, i32 0, null, null, metadata !"_ZTS4AAA3"} ; [ DW_TAG_structure_type ] [AAA3] [line 4, size 32, align 8, offset 0] [def] [from ]
+!5 = metadata !{metadata !6, metadata !11, metadata !17, metadata !18}
+!6 = metadata !{i32 786445, metadata !1, metadata !"_ZTS4AAA3", metadata !"text", i32 8, i64 32, i64 8, i64 0, i32 0, metadata !7} ; [ DW_TAG_member ] [text] [line 8, size 32, align 8, offset 0] [from ]
+!7 = metadata !{i32 786433, null, null, metadata !"", i32 0, i64 32, i64 8, i32 0, i32 0, metadata !8, metadata !9, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 32, align 8, offset 0] [from char]
+!8 = metadata !{i32 786468, null, null, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
+!9 = metadata !{metadata !10}
+!10 = metadata !{i32 786465, i64 0, i64 4} ; [ DW_TAG_subrange_type ] [0, 3]
+!11 = metadata !{i32 786478, metadata !1, metadata !"_ZTS4AAA3", metadata !"AAA3", metadata !"AAA3", metadata !"", i32 5, metadata !12, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null, i32 0, null, i32 5} ; [ DW_TAG_subprogram ] [line 5] [AAA3]
+!12 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !13, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!13 = metadata !{null, metadata !14, metadata !15}
+!14 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !"_ZTS4AAA3"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS4AAA3]
+!15 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !16} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!16 = metadata !{i32 786470, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !8} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from char]
+!17 = metadata !{i32 786478, metadata !1, metadata !"_ZTS4AAA3", metadata !"operator=", metadata !"operator=", metadata !"_ZN4AAA3aSEPKc", i32 6, metadata !12, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null, i32 0, null, i32 6} ; [ DW_TAG_subprogram ] [line 6] [operator=]
+!18 = metadata !{i32 786478, metadata !1, metadata !"_ZTS4AAA3", metadata !"operator const char *", metadata !"operator const char *", metadata !"_ZNK4AAA3cvPKcEv", i32 7, metadata !19, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 true, null, null, i32 0, null, i32 7} ; [ DW_TAG_subprogram ] [line 7] [operator const char *]
+!19 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !20, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!20 = metadata !{metadata !15, metadata !21}
+!21 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !22} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from ]
+!22 = metadata !{i32 786470, null, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, metadata !"_ZTS4AAA3"} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from _ZTS4AAA3]
+!23 = metadata !{metadata !24, metadata !35, metadata !40}
+!24 = metadata !{i32 786478, metadata !1, metadata !25, metadata !"bar", metadata !"bar", metadata !"_Z3barii", i32 11, metadata !26, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (i32, i32)* @_Z3barii, null, null, metadata !29, i32 11} ; [ DW_TAG_subprogram ] [line 11] [def] [bar]
+!25 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!26 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !27, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!27 = metadata !{null, metadata !28, metadata !28}
+!28 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!29 = metadata !{metadata !30, metadata !31, metadata !32, metadata !33, metadata !34}
+!30 = metadata !{i32 786689, metadata !24, metadata !"param1", metadata !25, i32 16777227, metadata !28, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [param1] [line 11]
+!31 = metadata !{i32 786689, metadata !24, metadata !"param2", metadata !25, i32 33554443, metadata !28, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [param2] [line 11]
+!32 = metadata !{i32 786688, metadata !24, metadata !"temp", metadata !25, i32 12, metadata !15, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [temp] [line 12]
+!33 = metadata !{i32 786688, metadata !24, metadata !"var1", metadata !25, i32 17, metadata !"_ZTS4AAA3", i32 0, i32 0} ; [ DW_TAG_auto_variable ] [var1] [line 17]
+!34 = metadata !{i32 786688, metadata !24, metadata !"var2", metadata !25, i32 18, metadata !"_ZTS4AAA3", i32 0, i32 0} ; [ DW_TAG_auto_variable ] [var2] [line 18]
+!35 = metadata !{i32 786478, metadata !1, metadata !"_ZTS4AAA3", metadata !"operator=", metadata !"operator=", metadata !"_ZN4AAA3aSEPKc", i32 6, metadata !12, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, null, null, metadata !17, metadata !36, i32 6} ; [ DW_TAG_subprogram ] [line 6] [def] [operator=]
+!36 = metadata !{metadata !37, metadata !39}
+!37 = metadata !{i32 786689, metadata !35, metadata !"this", null, i32 16777216, metadata !38, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!38 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !"_ZTS4AAA3"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS4AAA3]
+!39 = metadata !{i32 786689, metadata !35, metadata !"value", metadata !25, i32 33554438, metadata !15, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [value] [line 6]
+!40 = metadata !{i32 786478, metadata !1, metadata !"_ZTS4AAA3", metadata !"AAA3", metadata !"AAA3", metadata !"_ZN4AAA3C2EPKc", i32 5, metadata !12, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, null, null, metadata !11, metadata !41, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [AAA3]
+!41 = metadata !{metadata !42, metadata !43}
+!42 = metadata !{i32 786689, metadata !40, metadata !"this", null, i32 16777216, metadata !38, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!43 = metadata !{i32 786689, metadata !40, metadata !"value", metadata !25, i32 33554437, metadata !15, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [value] [line 5]
+!44 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
+!45 = metadata !{i32 2, metadata !"Debug Info Version", i32 1}
+!46 = metadata !{metadata !"clang version 3.5.0 "}
+!47 = metadata !{i32 11, i32 0, metadata !24, null}
+!48 = metadata !{i8* null}
+!49 = metadata !{i32 12, i32 0, metadata !24, null}
+!50 = metadata !{i32 14, i32 0, metadata !51, null}
+!51 = metadata !{i32 786443, metadata !1, metadata !24, i32 14, i32 0, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!52 = metadata !{i32 15, i32 0, metadata !53, null}
+!53 = metadata !{i32 786443, metadata !1, metadata !51, i32 14, i32 0, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!54 = metadata !{i32 16, i32 0, metadata !53, null}
+!55 = metadata !{i32 17, i32 0, metadata !24, null}
+!56 = metadata !{i32 786689, metadata !40, metadata !"this", null, i32 16777216, metadata !38, i32 1088, metadata !55} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!57 = metadata !{i32 0, i32 0, metadata !40, metadata !55}
+!58 = metadata !{i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)}
+!59 = metadata !{i32 786689, metadata !40, metadata !"value", metadata !25, i32 33554437, metadata !15, i32 0, metadata !55} ; [ DW_TAG_arg_variable ] [value] [line 5]
+!60 = metadata !{i32 5, i32 0, metadata !40, metadata !55}
+!61 = metadata !{i32 5, i32 0, metadata !62, metadata !55}
+!62 = metadata !{i32 786443, metadata !1, metadata !40, i32 5, i32 0, i32 0, i32 3} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!63 = metadata !{i32 18, i32 0, metadata !24, null}
+!64 = metadata !{i32 786689, metadata !40, metadata !"this", null, i32 16777216, metadata !38, i32 1088, metadata !63} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!65 = metadata !{i32 0, i32 0, metadata !40, metadata !63}
+!66 = metadata !{i32 786689, metadata !40, metadata !"value", metadata !25, i32 33554437, metadata !15, i32 0, metadata !63} ; [ DW_TAG_arg_variable ] [value] [line 5]
+!67 = metadata !{i32 5, i32 0, metadata !40, metadata !63}
+!68 = metadata !{i32 5, i32 0, metadata !62, metadata !63}
+!69 = metadata !{i32 20, i32 0, metadata !70, null}
+!70 = metadata !{i32 786443, metadata !1, metadata !24, i32 20, i32 0, i32 0, i32 2} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!71 = metadata !{i32 786689, metadata !35, metadata !"this", null, i32 16777216, metadata !38, i32 1088, metadata !72} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!72 = metadata !{i32 21, i32 0, metadata !70, null}
+!73 = metadata !{i32 0, i32 0, metadata !35, metadata !72}
+!74 = metadata !{i8* getelementptr inbounds ([2 x i8]* @.str1, i64 0, i64 0)}
+!75 = metadata !{i32 786689, metadata !35, metadata !"value", metadata !25, i32 33554438, metadata !15, i32 0, metadata !72} ; [ DW_TAG_arg_variable ] [value] [line 6]
+!76 = metadata !{i32 6, i32 0, metadata !35, metadata !72}
+!77 = metadata !{i32 786689, metadata !35, metadata !"this", null, i32 16777216, metadata !38, i32 1088, metadata !78} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!78 = metadata !{i32 23, i32 0, metadata !70, null}
+!79 = metadata !{i32 0, i32 0, metadata !35, metadata !78}
+!80 = metadata !{i8* getelementptr inbounds ([2 x i8]* @.str2, i64 0, i64 0)}
+!81 = metadata !{i32 786689, metadata !35, metadata !"value", metadata !25, i32 33554438, metadata !15, i32 0, metadata !78} ; [ DW_TAG_arg_variable ] [value] [line 6]
+!82 = metadata !{i32 6, i32 0, metadata !35, metadata !78}
+!83 = metadata !{i32 786689, metadata !35, metadata !"this", null, i32 16777216, metadata !38, i32 1088, metadata !84} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!84 = metadata !{i32 24, i32 0, metadata !24, null}
+!85 = metadata !{i32 0, i32 0, metadata !35, metadata !84}
+!86 = metadata !{i32 786689, metadata !35, metadata !"value", metadata !25, i32 33554438, metadata !15, i32 0, metadata !84} ; [ DW_TAG_arg_variable ] [value] [line 6]
+!87 = metadata !{i32 6, i32 0, metadata !35, metadata !84}
+!88 = metadata !{i32 25, i32 0, metadata !24, null}
diff --git a/test/CodeGen/X86/dllexport-x86_64.ll b/test/CodeGen/X86/dllexport-x86_64.ll
index f4dec4f..0d5afa1 100644
--- a/test/CodeGen/X86/dllexport-x86_64.ll
+++ b/test/CodeGen/X86/dllexport-x86_64.ll
@@ -73,7 +73,7 @@ define weak_odr dllexport void @weak1() {
@weak_alias = dllexport alias weak_odr void()* @f1
@blob = global [6 x i8] c"\B8*\00\00\00\C3", section ".text", align 16
-@blob_alias = dllexport alias i32 (), [6 x i8]* @blob
+@blob_alias = dllexport alias bitcast ([6 x i8]* @blob to i32 ()*)
; CHECK: .section .drectve
; WIN32: " /EXPORT:Var1,DATA"
diff --git a/test/CodeGen/X86/elf-comdat.ll b/test/CodeGen/X86/elf-comdat.ll
new file mode 100644
index 0000000..c7e6df7
--- /dev/null
+++ b/test/CodeGen/X86/elf-comdat.ll
@@ -0,0 +1,11 @@
+; RUN: llc -mtriple x86_64-pc-linux-gnu < %s | FileCheck %s
+
+$f = comdat any
+@v = global i32 0, comdat $f
+define void @f() comdat $f {
+ ret void
+}
+; CHECK: .section .text.f,"axG",@progbits,f,comdat
+; CHECK: .globl f
+; CHECK: .section .bss.v,"aGw",@nobits,f,comdat
+; CHECK: .globl v
diff --git a/test/CodeGen/X86/elf-comdat2.ll b/test/CodeGen/X86/elf-comdat2.ll
new file mode 100644
index 0000000..209da39
--- /dev/null
+++ b/test/CodeGen/X86/elf-comdat2.ll
@@ -0,0 +1,12 @@
+; RUN: llc -mtriple x86_64-pc-linux-gnu < %s | FileCheck %s
+
+$foo = comdat any
+@bar = global i32 42, comdat $foo
+@foo = global i32 42
+
+; CHECK: .type bar,@object
+; CHECK-NEXT: .section .data.bar,"aGw",@progbits,foo,comdat
+; CHECK-NEXT: .globl bar
+; CHECK: .type foo,@object
+; CHECK-NEXT: .data
+; CHECK-NEXT: .globl foo
diff --git a/test/CodeGen/X86/fast-isel-args-fail2.ll b/test/CodeGen/X86/fast-isel-args-fail2.ll
new file mode 100644
index 0000000..08de472
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-args-fail2.ll
@@ -0,0 +1,10 @@
+; RUN: not --crash llc < %s -fast-isel -fast-isel-abort-args -mtriple=x86_64-apple-darwin10
+; REQUIRES: asserts
+
+%struct.s0 = type { x86_fp80, x86_fp80 }
+
+; FastISel cannot handle this case yet. Make sure that we abort.
+define i8* @args_fail(%struct.s0* byval nocapture readonly align 16 %y) {
+ %1 = bitcast %struct.s0* %y to i8*
+ ret i8* %1
+}
diff --git a/test/CodeGen/X86/fast-isel-args.ll b/test/CodeGen/X86/fast-isel-args.ll
index 0f36265..8c86a9c 100644
--- a/test/CodeGen/X86/fast-isel-args.ll
+++ b/test/CodeGen/X86/fast-isel-args.ll
@@ -23,3 +23,27 @@ entry:
%add2 = add nsw i64 %add, %conv1
ret i64 %add2
}
+
+define float @t4(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h) {
+entry:
+ %add1 = fadd float %a, %b
+ %add2 = fadd float %c, %d
+ %add3 = fadd float %e, %f
+ %add4 = fadd float %g, %h
+ %add5 = fadd float %add1, %add2
+ %add6 = fadd float %add3, %add4
+ %add7 = fadd float %add5, %add6
+ ret float %add7
+}
+
+define double @t5(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h) {
+entry:
+ %add1 = fadd double %a, %b
+ %add2 = fadd double %c, %d
+ %add3 = fadd double %e, %f
+ %add4 = fadd double %g, %h
+ %add5 = fadd double %add1, %add2
+ %add6 = fadd double %add3, %add4
+ %add7 = fadd double %add5, %add6
+ ret double %add7
+}
diff --git a/test/CodeGen/X86/fast-isel-branch_weights.ll b/test/CodeGen/X86/fast-isel-branch_weights.ll
new file mode 100644
index 0000000..bc41395
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-branch_weights.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+; Test if the BBs are reordred according to their branch weights.
+define i64 @branch_weights_test(i64 %a, i64 %b) {
+; CHECK-LABEL: branch_weights_test
+; CHECK-LABEL: success
+; CHECK-LABEL: fail
+ %1 = icmp ult i64 %a, %b
+ br i1 %1, label %fail, label %success, !prof !0
+
+fail:
+ ret i64 -1
+
+success:
+ ret i64 0
+}
+
+!0 = metadata !{metadata !"branch_weights", i32 0, i32 2147483647}
diff --git a/test/CodeGen/X86/fast-isel-cmp-branch2.ll b/test/CodeGen/X86/fast-isel-cmp-branch2.ll
new file mode 100644
index 0000000..7e45c49
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-cmp-branch2.ll
@@ -0,0 +1,294 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+define i32 @fcmp_oeq(float %x, float %y) {
+; CHECK-LABEL: fcmp_oeq
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne {{LBB.+_1}}
+; CHECK-NEXT: jnp {{LBB.+_2}}
+ %1 = fcmp oeq float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ogt(float %x, float %y) {
+; CHECK-LABEL: fcmp_ogt
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = fcmp ogt float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_oge(float %x, float %y) {
+; CHECK-LABEL: fcmp_oge
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = fcmp oge float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_olt(float %x, float %y) {
+; CHECK-LABEL: fcmp_olt
+; CHECK: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = fcmp olt float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ole(float %x, float %y) {
+; CHECK-LABEL: fcmp_ole
+; CHECK: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = fcmp ole float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_one(float %x, float %y) {
+; CHECK-LABEL: fcmp_one
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: je {{LBB.+_1}}
+ %1 = fcmp one float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ord(float %x, float %y) {
+; CHECK-LABEL: fcmp_ord
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp ord float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uno(float %x, float %y) {
+; CHECK-LABEL: fcmp_uno
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jp {{LBB.+_2}}
+ %1 = fcmp uno float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ueq(float %x, float %y) {
+; CHECK-LABEL: fcmp_ueq
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: je {{LBB.+_2}}
+ %1 = fcmp ueq float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ugt(float %x, float %y) {
+; CHECK-LABEL: fcmp_ugt
+; CHECK: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = fcmp ugt float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uge(float %x, float %y) {
+; CHECK-LABEL: fcmp_uge
+; CHECK: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = fcmp uge float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ult(float %x, float %y) {
+; CHECK-LABEL: fcmp_ult
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = fcmp ult float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ule(float %x, float %y) {
+; CHECK-LABEL: fcmp_ule
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = fcmp ule float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_une(float %x, float %y) {
+; CHECK-LABEL: fcmp_une
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne {{LBB.+_2}}
+; CHECK-NEXT: jp {{LBB.+_2}}
+; CHECK-NEXT: jmp {{LBB.+_1}}
+ %1 = fcmp une float %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_eq(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_eq
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jne {{LBB.+_1}}
+ %1 = icmp eq i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ne(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_ne
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: je {{LBB.+_1}}
+ %1 = icmp ne i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ugt(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_ugt
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = icmp ugt i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_uge(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_uge
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = icmp uge i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ult(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_ult
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = icmp ult i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ule(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_ule
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = icmp ule i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sgt(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_sgt
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jle {{LBB.+_1}}
+ %1 = icmp sgt i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sge(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_sge
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jl {{LBB.+_1}}
+ %1 = icmp sge i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_slt(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_slt
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jge {{LBB.+_1}}
+ %1 = icmp slt i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sle(i32 %x, i32 %y) {
+; CHECK-LABEL: icmp_sle
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jg {{LBB.+_1}}
+ %1 = icmp sle i32 %x, %y
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
diff --git a/test/CodeGen/X86/fast-isel-cmp-branch3.ll b/test/CodeGen/X86/fast-isel-cmp-branch3.ll
new file mode 100644
index 0000000..a3f6851
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-cmp-branch3.ll
@@ -0,0 +1,470 @@
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+define i32 @fcmp_oeq1(float %x) {
+; CHECK-LABEL: fcmp_oeq1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp oeq float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_oeq2(float %x) {
+; CHECK-LABEL: fcmp_oeq2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne {{LBB.+_1}}
+; CHECK-NEXT: jnp {{LBB.+_2}}
+ %1 = fcmp oeq float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ogt1(float %x) {
+; CHECK-LABEL: fcmp_ogt1
+; CHECK-NOT: ucomiss
+; CHECK: movl $1, %eax
+ %1 = fcmp ogt float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ogt2(float %x) {
+; CHECK-LABEL: fcmp_ogt2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = fcmp ogt float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_oge1(float %x) {
+; CHECK-LABEL: fcmp_oge1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp oge float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_oge2(float %x) {
+; CHECK-LABEL: fcmp_oge2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = fcmp oge float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_olt1(float %x) {
+; CHECK-LABEL: fcmp_olt1
+; CHECK-NOT: ucomiss
+; CHECK: movl $1, %eax
+ %1 = fcmp olt float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_olt2(float %x) {
+; CHECK-LABEL: fcmp_olt2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jbe {{LBB.+_1}}
+ %1 = fcmp olt float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ole1(float %x) {
+; CHECK-LABEL: fcmp_ole1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp ole float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ole2(float %x) {
+; CHECK-LABEL: fcmp_ole2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jb {{LBB.+_1}}
+ %1 = fcmp ole float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_one1(float %x) {
+; CHECK-LABEL: fcmp_one1
+; CHECK-NOT: ucomiss
+; CHECK: movl $1, %eax
+ %1 = fcmp one float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_one2(float %x) {
+; CHECK-LABEL: fcmp_one2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: je {{LBB.+_1}}
+ %1 = fcmp one float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ord1(float %x) {
+; CHECK-LABEL: fcmp_ord1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp ord float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ord2(float %x) {
+; CHECK-LABEL: fcmp_ord2
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_1}}
+ %1 = fcmp ord float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uno1(float %x) {
+; CHECK-LABEL: fcmp_uno1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_2}}
+ %1 = fcmp uno float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uno2(float %x) {
+; CHECK-LABEL: fcmp_uno2
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jp {{LBB.+_2}}
+ %1 = fcmp uno float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ueq1(float %x) {
+; CHECK-LABEL: fcmp_ueq1
+; CHECK-NOT: ucomiss
+ %1 = fcmp ueq float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ueq2(float %x) {
+; CHECK-LABEL: fcmp_ueq2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: je {{LBB.+_2}}
+ %1 = fcmp ueq float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ugt1(float %x) {
+; CHECK-LABEL: fcmp_ugt1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jnp {{LBB.+_1}}
+ %1 = fcmp ugt float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ugt2(float %x) {
+; CHECK-LABEL: fcmp_ugt2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = fcmp ugt float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uge1(float %x) {
+; CHECK-LABEL: fcmp_uge1
+; CHECK-NOT: ucomiss
+ %1 = fcmp uge float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_uge2(float %x) {
+; CHECK-LABEL: fcmp_uge2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm0, %xmm1
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = fcmp uge float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ult1(float %x) {
+; CHECK-LABEL: fcmp_ult1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jnp {{LBB.+_1}}
+ %1 = fcmp ult float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ult2(float %x) {
+; CHECK-LABEL: fcmp_ult2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jae {{LBB.+_1}}
+ %1 = fcmp ult float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ule1(float %x) {
+; CHECK-LABEL: fcmp_ule1
+; CHECK-NOT: ucomiss
+ %1 = fcmp ule float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_ule2(float %x) {
+; CHECK-LABEL: fcmp_ule2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: ja {{LBB.+_1}}
+ %1 = fcmp ule float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_une1(float %x) {
+; CHECK-LABEL: fcmp_une1
+; CHECK: ucomiss %xmm0, %xmm0
+; CHECK-NEXT: jnp {{LBB.+_1}}
+ %1 = fcmp une float %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @fcmp_une2(float %x) {
+; CHECK-LABEL: fcmp_une2
+; CHECK: xorps %xmm1, %xmm1
+; CHECK-NEXT: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne {{LBB.+_2}}
+; CHECK-NEXT: jp {{LBB.+_2}}
+; CHECK-NEXT: jmp {{LBB.+_1}}
+ %1 = fcmp une float %x, 0.000000e+00
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_eq(i32 %x) {
+; CHECK-LABEL: icmp_eq
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp eq i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ne(i32 %x) {
+; CHECK-LABEL: icmp_ne
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp ne i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ugt(i32 %x) {
+; CHECK-LABEL: icmp_ugt
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp ugt i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_uge(i32 %x) {
+; CHECK-LABEL: icmp_uge
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp uge i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ult(i32 %x) {
+; CHECK-LABEL: icmp_ult
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp ult i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_ule(i32 %x) {
+; CHECK-LABEL: icmp_ule
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp ule i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sgt(i32 %x) {
+; CHECK-LABEL: icmp_sgt
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp sgt i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sge(i32 %x) {
+; CHECK-LABEL: icmp_sge
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp sge i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_slt(i32 %x) {
+; CHECK-LABEL: icmp_slt
+; CHECK-NOT: cmpl
+; CHECK: movl $1, %eax
+ %1 = icmp slt i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
+define i32 @icmp_sle(i32 %x) {
+; CHECK-LABEL: icmp_sle
+; CHECK-NOT: cmpl
+; CHECK: movl $0, %eax
+ %1 = icmp sle i32 %x, %x
+ br i1 %1, label %bb1, label %bb2
+bb2:
+ ret i32 1
+bb1:
+ ret i32 0
+}
+
diff --git a/test/CodeGen/X86/fast-isel-cmp.ll b/test/CodeGen/X86/fast-isel-cmp.ll
new file mode 100644
index 0000000..1b72cfc
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-cmp.ll
@@ -0,0 +1,689 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=SDAG
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=FAST
+
+define zeroext i1 @fcmp_oeq(float %x, float %y) {
+; SDAG-LABEL: fcmp_oeq
+; SDAG: cmpeqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; FAST-LABEL: fcmp_oeq
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+; FAST-NEXT: setnp %cl
+; FAST-NEXT: andb %al, %cl
+ %1 = fcmp oeq float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ogt(float %x, float %y) {
+; SDAG-LABEL: fcmp_ogt
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: seta %al
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: seta %al
+ %1 = fcmp ogt float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_oge(float %x, float %y) {
+; SDAG-LABEL: fcmp_oge
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setae %al
+; FAST-LABEL: fcmp_oge
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setae %al
+ %1 = fcmp oge float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_olt(float %x, float %y) {
+; SDAG-LABEL: fcmp_olt
+; SDAG: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: seta %al
+; FAST-LABEL: fcmp_olt
+; FAST: ucomiss %xmm0, %xmm1
+; FAST-NEXT: seta %al
+ %1 = fcmp olt float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ole(float %x, float %y) {
+; SDAG-LABEL: fcmp_ole
+; SDAG: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setae %al
+; FAST-LABEL: fcmp_ole
+; FAST: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setae %al
+ %1 = fcmp ole float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_one(float %x, float %y) {
+; SDAG-LABEL: fcmp_one
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setne %al
+; FAST-LABEL: fcmp_one
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+ %1 = fcmp one float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ord(float %x, float %y) {
+; SDAG-LABEL: fcmp_ord
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_ord
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp ord float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uno(float %x, float %y) {
+; SDAG-LABEL: fcmp_uno
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_uno
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp uno float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ueq(float %x, float %y) {
+; SDAG-LABEL: fcmp_ueq
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: sete %al
+; FAST-LABEL: fcmp_ueq
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+ %1 = fcmp ueq float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ugt(float %x, float %y) {
+; SDAG-LABEL: fcmp_ugt
+; SDAG: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setb %al
+; FAST-LABEL: fcmp_ugt
+; FAST: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setb %al
+ %1 = fcmp ugt float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uge(float %x, float %y) {
+; SDAG-LABEL: fcmp_uge
+; SDAG: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: fcmp_uge
+; FAST: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setbe %al
+ %1 = fcmp uge float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ult(float %x, float %y) {
+; SDAG-LABEL: fcmp_ult
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setb %al
+; FAST-LABEL: fcmp_ult
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setb %al
+ %1 = fcmp ult float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ule(float %x, float %y) {
+; SDAG-LABEL: fcmp_ule
+; SDAG: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: fcmp_ule
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setbe %al
+ %1 = fcmp ule float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_une(float %x, float %y) {
+; SDAG-LABEL: fcmp_une
+; SDAG: cmpneqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; FAST-LABEL: fcmp_une
+; FAST: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+; FAST-NEXT: setp %cl
+; FAST-NEXT: orb %al, %cl
+ %1 = fcmp une float %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_eq(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_eq
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: sete %al
+; FAST-LABEL: icmp_eq
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: sete %al
+ %1 = icmp eq i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ne(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_ne
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setne %al
+; FAST-LABEL: icmp_ne
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setne %al
+ %1 = icmp ne i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ugt(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_ugt
+; SDAG: cmpl %edi, %esi
+; SDAG-NEXT: setb %al
+; FAST-LABEL: icmp_ugt
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: seta %al
+ %1 = icmp ugt i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_uge(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_uge
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setae %al
+; FAST-LABEL: icmp_uge
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setae %al
+ %1 = icmp uge i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ult(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_ult
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setb %al
+; FAST-LABEL: icmp_ult
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setb %al
+ %1 = icmp ult i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ule(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_ule
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: icmp_ule
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setbe %al
+ %1 = icmp ule i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sgt(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_sgt
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setg %al
+; FAST-LABEL: icmp_sgt
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setg %al
+ %1 = icmp sgt i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sge(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_sge
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setge %al
+; FAST-LABEL: icmp_sge
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setge %al
+ %1 = icmp sge i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_slt(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_slt
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setl %al
+; FAST-LABEL: icmp_slt
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setl %al
+ %1 = icmp slt i32 %x, %y
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sle(i32 %x, i32 %y) {
+; SDAG-LABEL: icmp_sle
+; SDAG: cmpl %esi, %edi
+; SDAG-NEXT: setle %al
+; FAST-LABEL: icmp_sle
+; FAST: cmpl %esi, %edi
+; FAST-NEXT: setle %al
+ %1 = icmp sle i32 %x, %y
+ ret i1 %1
+}
+
+; Test cmp folding and condition optimization.
+define zeroext i1 @fcmp_oeq2(float %x) {
+; SDAG-LABEL: fcmp_oeq2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_oeq2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp oeq float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_oeq3(float %x) {
+; SDAG-LABEL: fcmp_oeq3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: cmpeqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; FAST-LABEL: fcmp_oeq3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+; FAST-NEXT: setnp %cl
+; FAST-NEXT: andb %al, %cl
+ %1 = fcmp oeq float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ogt2(float %x) {
+; SDAG-LABEL: fcmp_ogt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: fcmp_ogt2
+; FAST: xorl %eax, %eax
+ %1 = fcmp ogt float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ogt3(float %x) {
+; SDAG-LABEL: fcmp_ogt3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: seta %al
+; FAST-LABEL: fcmp_ogt3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: seta %al
+ %1 = fcmp ogt float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_oge2(float %x) {
+; SDAG-LABEL: fcmp_oge2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_oge2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp oge float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_oge3(float %x) {
+; SDAG-LABEL: fcmp_oge3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setae %al
+; FAST-LABEL: fcmp_oge3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setae %al
+ %1 = fcmp oge float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_olt2(float %x) {
+; SDAG-LABEL: fcmp_olt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: fcmp_olt2
+; FAST: xorl %eax, %eax
+ %1 = fcmp olt float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_olt3(float %x) {
+; SDAG-LABEL: fcmp_olt3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: seta %al
+; FAST-LABEL: fcmp_olt3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: seta %al
+ %1 = fcmp olt float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ole2(float %x) {
+; SDAG-LABEL: fcmp_ole2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_ole2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp ole float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ole3(float %x) {
+; SDAG-LABEL: fcmp_ole3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setae %al
+; FAST-LABEL: fcmp_ole3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setae %al
+ %1 = fcmp ole float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_one2(float %x) {
+; SDAG-LABEL: fcmp_one2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: fcmp_one2
+; FAST: xorl %eax, %eax
+ %1 = fcmp one float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_one3(float %x) {
+; SDAG-LABEL: fcmp_one3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setne %al
+; FAST-LABEL: fcmp_one3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+ %1 = fcmp one float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ord2(float %x) {
+; SDAG-LABEL: fcmp_ord2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_ord2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp ord float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ord3(float %x) {
+; SDAG-LABEL: fcmp_ord3
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setnp %al
+; FAST-LABEL: fcmp_ord3
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setnp %al
+ %1 = fcmp ord float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uno2(float %x) {
+; SDAG-LABEL: fcmp_uno2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_uno2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp uno float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uno3(float %x) {
+; SDAG-LABEL: fcmp_uno3
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_uno3
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp uno float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ueq2(float %x) {
+; SDAG-LABEL: fcmp_ueq2
+; SDAG: movb $1, %al
+; FAST-LABEL: fcmp_ueq2
+; FAST: movb $1, %al
+ %1 = fcmp ueq float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ueq3(float %x) {
+; SDAG-LABEL: fcmp_ueq3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: sete %al
+; FAST-LABEL: fcmp_ueq3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: sete %al
+ %1 = fcmp ueq float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ugt2(float %x) {
+; SDAG-LABEL: fcmp_ugt2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_ugt2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp ugt float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ugt3(float %x) {
+; SDAG-LABEL: fcmp_ugt3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setb %al
+; FAST-LABEL: fcmp_ugt3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setb %al
+ %1 = fcmp ugt float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uge2(float %x) {
+; SDAG-LABEL: fcmp_uge2
+; SDAG: movb $1, %al
+; FAST-LABEL: fcmp_uge2
+; FAST: movb $1, %al
+ %1 = fcmp uge float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_uge3(float %x) {
+; SDAG-LABEL: fcmp_uge3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm0, %xmm1
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: fcmp_uge3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm0, %xmm1
+; FAST-NEXT: setbe %al
+ %1 = fcmp uge float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ult2(float %x) {
+; SDAG-LABEL: fcmp_ult2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_ult2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp ult float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ult3(float %x) {
+; SDAG-LABEL: fcmp_ult3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setb %al
+; FAST-LABEL: fcmp_ult3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setb %al
+ %1 = fcmp ult float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ule2(float %x) {
+; SDAG-LABEL: fcmp_ule2
+; SDAG: movb $1, %al
+; FAST-LABEL: fcmp_ule2
+; FAST: movb $1, %al
+ %1 = fcmp ule float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_ule3(float %x) {
+; SDAG-LABEL: fcmp_ule3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: ucomiss %xmm1, %xmm0
+; SDAG-NEXT: setbe %al
+; FAST-LABEL: fcmp_ule3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setbe %al
+ %1 = fcmp ule float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_une2(float %x) {
+; SDAG-LABEL: fcmp_une2
+; SDAG: ucomiss %xmm0, %xmm0
+; SDAG-NEXT: setp %al
+; FAST-LABEL: fcmp_une2
+; FAST: ucomiss %xmm0, %xmm0
+; FAST-NEXT: setp %al
+ %1 = fcmp une float %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @fcmp_une3(float %x) {
+; SDAG-LABEL: fcmp_une3
+; SDAG: xorps %xmm1, %xmm1
+; SDAG-NEXT: cmpneqss %xmm1, %xmm0
+; SDAG-NEXT: movd %xmm0, %eax
+; SDAG-NEXT: andl $1, %eax
+; FAST-LABEL: fcmp_une3
+; FAST: xorps %xmm1, %xmm1
+; FAST-NEXT: ucomiss %xmm1, %xmm0
+; FAST-NEXT: setne %al
+; FAST-NEXT: setp %cl
+; FAST-NEXT: orb %al, %cl
+ %1 = fcmp une float %x, 0.000000e+00
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_eq2(i32 %x) {
+; SDAG-LABEL: icmp_eq2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_eq2
+; FAST: movb $1, %al
+ %1 = icmp eq i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ne2(i32 %x) {
+; SDAG-LABEL: icmp_ne2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_ne2
+; FAST: xorl %eax, %eax
+ %1 = icmp ne i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ugt2(i32 %x) {
+; SDAG-LABEL: icmp_ugt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_ugt2
+; FAST: xorl %eax, %eax
+ %1 = icmp ugt i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_uge2(i32 %x) {
+; SDAG-LABEL: icmp_uge2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_uge2
+; FAST: movb $1, %al
+ %1 = icmp uge i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ult2(i32 %x) {
+; SDAG-LABEL: icmp_ult2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_ult2
+; FAST: xorl %eax, %eax
+ %1 = icmp ult i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_ule2(i32 %x) {
+; SDAG-LABEL: icmp_ule2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_ule2
+; FAST: movb $1, %al
+ %1 = icmp ule i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sgt2(i32 %x) {
+; SDAG-LABEL: icmp_sgt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_sgt2
+; FAST: xorl %eax, %eax
+ %1 = icmp sgt i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sge2(i32 %x) {
+; SDAG-LABEL: icmp_sge2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_sge2
+; FAST: movb $1, %al
+ %1 = icmp sge i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_slt2(i32 %x) {
+; SDAG-LABEL: icmp_slt2
+; SDAG: xorl %eax, %eax
+; FAST-LABEL: icmp_slt2
+; FAST: xorl %eax, %eax
+ %1 = icmp slt i32 %x, %x
+ ret i1 %1
+}
+
+define zeroext i1 @icmp_sle2(i32 %x) {
+; SDAG-LABEL: icmp_sle2
+; SDAG: movb $1, %al
+; FAST-LABEL: icmp_sle2
+; FAST: movb $1, %al
+ %1 = icmp sle i32 %x, %x
+ ret i1 %1
+}
+
diff --git a/test/CodeGen/X86/fast-isel-fold-mem.ll b/test/CodeGen/X86/fast-isel-fold-mem.ll
new file mode 100644
index 0000000..a945779
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-fold-mem.ll
@@ -0,0 +1,12 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin | FileCheck %s
+
+define i64 @fold_load(i64* %a, i64 %b) {
+; CHECK-LABEL: fold_load
+; CHECK: addq (%rdi), %rsi
+; CHECK-NEXT: movq %rsi, %rax
+ %1 = load i64* %a, align 8
+ %2 = add i64 %1, %b
+ ret i64 %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-cmov.ll b/test/CodeGen/X86/fast-isel-select-cmov.ll
new file mode 100644
index 0000000..8008e28
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-cmov.ll
@@ -0,0 +1,62 @@
+; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+; Test conditional move for the supported types (i16, i32, and i32) and
+; conditon input (argument or cmp). Currently i8 is not supported.
+
+define zeroext i16 @select_cmov_i16(i1 zeroext %cond, i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: select_cmov_i16
+; CHECK: testb $1, %dil
+; CHECK-NEXT: cmovew %dx, %si
+; CHECK-NEXT: movzwl %si, %eax
+ %1 = select i1 %cond, i16 %a, i16 %b
+ ret i16 %1
+}
+
+define zeroext i16 @select_cmp_cmov_i16(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: select_cmp_cmov_i16
+; CHECK: cmpw %si, %di
+; CHECK-NEXT: cmovbw %di, %si
+; CHECK-NEXT: movzwl %si, %eax
+ %1 = icmp ult i16 %a, %b
+ %2 = select i1 %1, i16 %a, i16 %b
+ ret i16 %2
+}
+
+define i32 @select_cmov_i32(i1 zeroext %cond, i32 %a, i32 %b) {
+; CHECK-LABEL: select_cmov_i32
+; CHECK: testb $1, %dil
+; CHECK-NEXT: cmovel %edx, %esi
+; CHECK-NEXT: movl %esi, %eax
+ %1 = select i1 %cond, i32 %a, i32 %b
+ ret i32 %1
+}
+
+define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
+; CHECK-LABEL: select_cmp_cmov_i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: cmovbl %edi, %esi
+; CHECK-NEXT: movl %esi, %eax
+ %1 = icmp ult i32 %a, %b
+ %2 = select i1 %1, i32 %a, i32 %b
+ ret i32 %2
+}
+
+define i64 @select_cmov_i64(i1 zeroext %cond, i64 %a, i64 %b) {
+; CHECK-LABEL: select_cmov_i64
+; CHECK: testb $1, %dil
+; CHECK-NEXT: cmoveq %rdx, %rsi
+; CHECK-NEXT: movq %rsi, %rax
+ %1 = select i1 %cond, i64 %a, i64 %b
+ ret i64 %1
+}
+
+define i64 @select_cmp_cmov_i64(i64 %a, i64 %b) {
+; CHECK-LABEL: select_cmp_cmov_i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovbq %rdi, %rsi
+; CHECK-NEXT: movq %rsi, %rax
+ %1 = icmp ult i64 %a, %b
+ %2 = select i1 %1, i64 %a, i64 %b
+ ret i64 %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-cmov2.ll b/test/CodeGen/X86/fast-isel-select-cmov2.ll
new file mode 100644
index 0000000..658098f
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-cmov2.ll
@@ -0,0 +1,255 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort | FileCheck %s
+
+; Test all the cmp predicates that can feed an integer conditional move.
+
+define i64 @select_fcmp_false_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_false_cmov
+; CHECK: movq %rsi, %rax
+; CHECK-NEXT: retq
+ %1 = fcmp false double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_oeq_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_oeq_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: setnp %al
+; CHECK-NEXT: sete %cl
+; CHECK-NEXT: testb %al, %cl
+; CHECK-NEXT: cmoveq %rsi, %rdi
+ %1 = fcmp oeq double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ogt_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ogt_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovbeq %rsi, %rdi
+ %1 = fcmp ogt double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_oge_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_oge_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovbq %rsi, %rdi
+ %1 = fcmp oge double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_olt_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_olt_cmov
+; CHECK: ucomisd %xmm0, %xmm1
+; CHECK-NEXT: cmovbeq %rsi, %rdi
+ %1 = fcmp olt double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ole_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ole_cmov
+; CHECK: ucomisd %xmm0, %xmm1
+; CHECK-NEXT: cmovbq %rsi, %rdi
+ %1 = fcmp ole double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_one_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_one_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmoveq %rsi, %rdi
+ %1 = fcmp one double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ord_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ord_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovpq %rsi, %rdi
+ %1 = fcmp ord double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_uno_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_uno_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovnpq %rsi, %rdi
+ %1 = fcmp uno double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ueq_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ueq_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovneq %rsi, %rdi
+ %1 = fcmp ueq double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ugt_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ugt_cmov
+; CHECK: ucomisd %xmm0, %xmm1
+; CHECK-NEXT: cmovaeq %rsi, %rdi
+ %1 = fcmp ugt double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_uge_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_uge_cmov
+; CHECK: ucomisd %xmm0, %xmm1
+; CHECK-NEXT: cmovaq %rsi, %rdi
+ %1 = fcmp uge double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ult_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ult_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovaeq %rsi, %rdi
+ %1 = fcmp ult double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_ule_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_ule_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: cmovaq %rsi, %rdi
+ %1 = fcmp ule double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_une_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_une_cmov
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: setp %al
+; CHECK-NEXT: setne %cl
+; CHECK-NEXT: orb %al, %cl
+; CHECK-NEXT: cmoveq %rsi, %rdi
+ %1 = fcmp une double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_fcmp_true_cmov(double %a, double %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_fcmp_true_cmov
+; CHECK: movq %rdi, %rax
+ %1 = fcmp true double %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_eq_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_eq_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovneq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp eq i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_ne_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_ne_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmoveq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp ne i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_ugt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_ugt_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovbeq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp ugt i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+
+define i64 @select_icmp_uge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_uge_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovbq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp uge i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_ult_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_ult_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovaeq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp ult i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_ule_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_ule_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovaq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp ule i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_sgt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_sgt_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovleq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp sgt i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_sge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_sge_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovlq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp sge i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_slt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_slt_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovgeq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp slt i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
+define i64 @select_icmp_sle_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
+; CHECK-LABEL: select_icmp_sle_cmov
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovgq %rcx, %rdx
+; CHECK-NEXT: movq %rdx, %rax
+ %1 = icmp sle i64 %a, %b
+ %2 = select i1 %1, i64 %c, i64 %d
+ ret i64 %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-cmp.ll b/test/CodeGen/X86/fast-isel-select-cmp.ll
new file mode 100644
index 0000000..1af30e9
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-cmp.ll
@@ -0,0 +1,50 @@
+; RUN: llc < %s -O0 -mtriple=x86_64-apple-darwin10 | FileCheck %s
+
+; Test if we do not fold the cmp into select if the instructions are in
+; different basic blocks.
+
+define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
+; CHECK-LABEL: select_cmp_cmov_i32
+; CHECK-LABEL: continue
+; CHECK-NOT: cmp
+ %1 = icmp ult i32 %a, %b
+ br i1 %1, label %continue, label %exit
+
+continue:
+ %2 = select i1 %1, i32 %a, i32 %b
+ ret i32 %2
+
+exit:
+ ret i32 -1
+}
+
+define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_oeq_f32
+; CHECK-LABEL: continue
+; CHECK-NOT: cmp
+ %1 = fcmp oeq float %a, %b
+ br i1 %1, label %continue, label %exit
+
+continue:
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+
+exit:
+ ret float -1.0
+}
+
+define float @select_fcmp_one_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_one_f32
+; CHECK-LABEL: continue
+; CHECK-NOT: ucomi
+ %1 = fcmp one float %a, %b
+ br i1 %1, label %continue, label %exit
+
+continue:
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+
+exit:
+ ret float -1.0
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll b/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll
new file mode 100644
index 0000000..1ec4d64
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll
@@ -0,0 +1,138 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=corei7-avx | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort -mcpu=corei7-avx | FileCheck %s
+
+
+define float @select_fcmp_one_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_one_f32
+; CHECK: ucomiss %xmm1, %xmm0
+; CHECK-NEXT: jne [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: movaps %xmm2, %xmm0
+ %1 = fcmp one float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_one_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_one_f64
+; CHECK: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: jne [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: movaps %xmm2, %xmm0
+ %1 = fcmp one double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_icmp_eq_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_eq_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: je [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp eq i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_ne_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_ne_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jne [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp ne i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_ugt_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_ugt_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: ja [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp ugt i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_uge_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_uge_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jae [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp uge i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_ult_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_ult_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jb [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp ult i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_ule_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_ule_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jbe [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp ule i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_sgt_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_sgt_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jg [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp sgt i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_sge_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_sge_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jge [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp sge i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_slt_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_slt_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jl [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp slt i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define float @select_icmp_sle_f32(i64 %a, i64 %b, float %c, float %d) {
+; CHECK-LABEL: select_icmp_sle_f32
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jle [[BB:LBB[0-9]+_2]]
+; CHECK: [[BB]]
+; CHECK-NEXT: retq
+ %1 = icmp sle i64 %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select-sse.ll b/test/CodeGen/X86/fast-isel-select-sse.ll
new file mode 100644
index 0000000..3c03a03
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-select-sse.ll
@@ -0,0 +1,391 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=corei7-avx | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort -mcpu=corei7-avx | FileCheck %s --check-prefix=AVX
+
+; Test all cmp predicates that can be used with SSE.
+
+define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_oeq_f32
+; CHECK: cmpeqss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_oeq_f32
+; AVX: vcmpeqss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp oeq float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_oeq_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_oeq_f64
+; CHECK: cmpeqsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_oeq_f64
+; AVX: vcmpeqsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp oeq double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ogt_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ogt_f32
+; CHECK: cmpltss %xmm0, %xmm1
+; CHECK-NEXT: andps %xmm1, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm1
+; CHECK-NEXT: orps %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ogt_f32
+; AVX: vcmpltss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ogt float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ogt_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ogt_f64
+; CHECK: cmpltsd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm1
+; CHECK-NEXT: orpd %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ogt_f64
+; AVX: vcmpltsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ogt double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_oge_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_oge_f32
+; CHECK: cmpless %xmm0, %xmm1
+; CHECK-NEXT: andps %xmm1, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm1
+; CHECK-NEXT: orps %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_oge_f32
+; AVX: vcmpless %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp oge float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_oge_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_oge_f64
+; CHECK: cmplesd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm1
+; CHECK-NEXT: orpd %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_oge_f64
+; AVX: vcmplesd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp oge double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_olt_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_olt_f32
+; CHECK: cmpltss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_olt_f32
+; AVX: vcmpltss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp olt float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_olt_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_olt_f64
+; CHECK: cmpltsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_olt_f64
+; AVX: vcmpltsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp olt double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ole_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ole_f32
+; CHECK: cmpless %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ole_f32
+; AVX: vcmpless %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ole float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ole_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ole_f64
+; CHECK: cmplesd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ole_f64
+; AVX: vcmplesd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ole double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ord_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ord_f32
+; CHECK: cmpordss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ord_f32
+; AVX: vcmpordss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ord float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ord_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ord_f64
+; CHECK: cmpordsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ord_f64
+; AVX: vcmpordsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ord double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_uno_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_uno_f32
+; CHECK: cmpunordss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_uno_f32
+; AVX: vcmpunordss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp uno float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_uno_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_uno_f64
+; CHECK: cmpunordsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_uno_f64
+; AVX: vcmpunordsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp uno double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ugt_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ugt_f32
+; CHECK: cmpnless %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ugt_f32
+; AVX: vcmpnless %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ugt float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ugt_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ugt_f64
+; CHECK: cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_ugt_f64
+; AVX: vcmpnlesd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ugt double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_uge_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_uge_f32
+; CHECK: cmpnltss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_uge_f32
+; AVX: vcmpnltss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp uge float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_uge_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_uge_f64
+; CHECK: cmpnltsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_uge_f64
+; AVX: vcmpnltsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp uge double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ult_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ult_f32
+; CHECK: cmpnless %xmm0, %xmm1
+; CHECK-NEXT: andps %xmm1, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm1
+; CHECK-NEXT: orps %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ult_f32
+; AVX: vcmpnless %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ult float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ult_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ult_f64
+; CHECK: cmpnlesd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm1
+; CHECK-NEXT: orpd %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ult_f64
+; AVX: vcmpnlesd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ult double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_ule_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_ule_f32
+; CHECK: cmpnltss %xmm0, %xmm1
+; CHECK-NEXT: andps %xmm1, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm1
+; CHECK-NEXT: orps %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ule_f32
+; AVX: vcmpnltss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp ule float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_ule_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_ule_f64
+; CHECK: cmpnltsd %xmm0, %xmm1
+; CHECK-NEXT: andpd %xmm1, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm1
+; CHECK-NEXT: orpd %xmm2, %xmm1
+; AVX-LABEL: select_fcmp_ule_f64
+; AVX: vcmpnltsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp ule double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
+define float @select_fcmp_une_f32(float %a, float %b, float %c, float %d) {
+; CHECK-LABEL: select_fcmp_une_f32
+; CHECK: cmpneqss %xmm1, %xmm0
+; CHECK-NEXT: andps %xmm0, %xmm2
+; CHECK-NEXT: andnps %xmm3, %xmm0
+; CHECK-NEXT: orps %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_une_f32
+; AVX: vcmpneqss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnps %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
+ %1 = fcmp une float %a, %b
+ %2 = select i1 %1, float %c, float %d
+ ret float %2
+}
+
+define double @select_fcmp_une_f64(double %a, double %b, double %c, double %d) {
+; CHECK-LABEL: select_fcmp_une_f64
+; CHECK: cmpneqsd %xmm1, %xmm0
+; CHECK-NEXT: andpd %xmm0, %xmm2
+; CHECK-NEXT: andnpd %xmm3, %xmm0
+; CHECK-NEXT: orpd %xmm2, %xmm0
+; AVX-LABEL: select_fcmp_une_f64
+; AVX: vcmpneqsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
+ %1 = fcmp une double %a, %b
+ %2 = select i1 %1, double %c, double %d
+ ret double %2
+}
+
diff --git a/test/CodeGen/X86/fast-isel-select.ll b/test/CodeGen/X86/fast-isel-select.ll
index 53158bc..7b3c99f 100644
--- a/test/CodeGen/X86/fast-isel-select.ll
+++ b/test/CodeGen/X86/fast-isel-select.ll
@@ -4,10 +4,10 @@
; lsb is zero.
; <rdar://problem/15651765>
-; CHECK-LABEL: fastisel_select:
+; CHECK-LABEL: fastisel_select:
; CHECK: subb {{%[a-z0-9]+}}, [[RES:%[a-z0-9]+]]
; CHECK: testb $1, [[RES]]
-; CHECK: cmovel
+; CHECK: cmovnel %edi, %esi
define i32 @fastisel_select(i1 %exchSub2211_, i1 %trunc_8766) {
%shuffleInternal15257_8932 = sub i1 %exchSub2211_, %trunc_8766
%counter_diff1345 = select i1 %shuffleInternal15257_8932, i32 1204476887, i32 0
diff --git a/test/CodeGen/X86/fast-isel-sse12-fptoint.ll b/test/CodeGen/X86/fast-isel-sse12-fptoint.ll
new file mode 100644
index 0000000..769c987
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-sse12-fptoint.ll
@@ -0,0 +1,54 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx2,+avx -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=AVX
+
+define i32 @cvt_test1(float %a) {
+; SSE-LABEL: cvt_test1
+; SSE: cvttss2si %xmm0, %eax
+; AVX-LABEL: cvt_test1
+; AVX: vcvttss2si %xmm0, %eax
+ %1 = insertelement <4 x float> undef, float %a, i32 0
+ %2 = insertelement <4 x float> %1, float 0.000000e+00, i32 1
+ %3 = insertelement <4 x float> %2, float 0.000000e+00, i32 2
+ %4 = insertelement <4 x float> %3, float 0.000000e+00, i32 3
+ %5 = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %4)
+ ret i32 %5
+}
+declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
+
+define i64 @cvt_test2(float %a) {
+; SSE-LABEL: cvt_test2
+; SSE: cvttss2si %xmm0, %rax
+; AVX-LABEL: cvt_test2
+; AVX: vcvttss2si %xmm0, %rax
+ %1 = insertelement <4 x float> undef, float %a, i32 0
+ %2 = insertelement <4 x float> %1, float 0.000000e+00, i32 1
+ %3 = insertelement <4 x float> %2, float 0.000000e+00, i32 2
+ %4 = insertelement <4 x float> %3, float 0.000000e+00, i32 3
+ %5 = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %4)
+ ret i64 %5
+}
+declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
+
+define i32 @cvt_test3(double %a) {
+; SSE-LABEL: cvt_test3
+; SSE: cvttsd2si %xmm0, %eax
+; AVX-LABEL: cvt_test3
+; AVX: vcvttsd2si %xmm0, %eax
+ %1 = insertelement <2 x double> undef, double %a, i32 0
+ %2 = insertelement <2 x double> %1, double 0.000000e+00, i32 1
+ %3 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %2)
+ ret i32 %3
+}
+declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
+
+define i64 @cvt_test4(double %a) {
+; SSE-LABEL: cvt_test4
+; SSE: cvttsd2si %xmm0, %rax
+; AVX-LABEL: cvt_test4
+; AVX: vcvttsd2si %xmm0, %rax
+ %1 = insertelement <2 x double> undef, double %a, i32 0
+ %2 = insertelement <2 x double> %1, double 0.000000e+00, i32 1
+ %3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %2)
+ ret i64 %3
+}
+declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
diff --git a/test/CodeGen/X86/float-asmprint.ll b/test/CodeGen/X86/float-asmprint.ll
index 4aeae7f..5de9700 100644
--- a/test/CodeGen/X86/float-asmprint.ll
+++ b/test/CodeGen/X86/float-asmprint.ll
@@ -16,8 +16,9 @@
; CHECK-NEXT: .size
; CHECK: varppc128:
-; CHECK-NEXT: .quad 0 # ppc_fp128 -0
-; CHECK-NEXT: .quad -9223372036854775808
+; For ppc_fp128, the high double always comes first.
+; CHECK-NEXT: .quad -9223372036854775808 # ppc_fp128 -0
+; CHECK-NEXT: .quad 0
; CHECK-NEXT: .size
; CHECK: var80:
diff --git a/test/CodeGen/X86/frameaddr.ll b/test/CodeGen/X86/frameaddr.ll
new file mode 100644
index 0000000..6c1ca25
--- /dev/null
+++ b/test/CodeGen/X86/frameaddr.ll
@@ -0,0 +1,44 @@
+; RUN: llc < %s -march=x86 | FileCheck %s --check-prefix=CHECK-32
+; RUN: llc < %s -march=x86 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=CHECK-32
+; RUN: llc < %s -march=x86-64 | FileCheck %s --check-prefix=CHECK-64
+; RUN: llc < %s -march=x86-64 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=CHECK-64
+
+define i8* @test1() nounwind {
+entry:
+; CHECK-32-LABEL: test1
+; CHECK-32: push
+; CHECK-32-NEXT: movl %esp, %ebp
+; CHECK-32-NEXT: movl %ebp, %eax
+; CHECK-32-NEXT: pop
+; CHECK-32-NEXT: ret
+; CHECK-64-LABEL: test1
+; CHECK-64: push
+; CHECK-64-NEXT: movq %rsp, %rbp
+; CHECK-64-NEXT: movq %rbp, %rax
+; CHECK-64-NEXT: pop
+; CHECK-64-NEXT: ret
+ %0 = tail call i8* @llvm.frameaddress(i32 0)
+ ret i8* %0
+}
+
+define i8* @test2() nounwind {
+entry:
+; CHECK-32-LABEL: test2
+; CHECK-32: push
+; CHECK-32-NEXT: movl %esp, %ebp
+; CHECK-32-NEXT: movl (%ebp), %eax
+; CHECK-32-NEXT: movl (%eax), %eax
+; CHECK-32-NEXT: pop
+; CHECK-32-NEXT: ret
+; CHECK-64-LABEL: test2
+; CHECK-64: push
+; CHECK-64-NEXT: movq %rsp, %rbp
+; CHECK-64-NEXT: movq (%rbp), %rax
+; CHECK-64-NEXT: movq (%rax), %rax
+; CHECK-64-NEXT: pop
+; CHECK-64-NEXT: ret
+ %0 = tail call i8* @llvm.frameaddress(i32 2)
+ ret i8* %0
+}
+
+declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/X86/gcc_except_table.ll b/test/CodeGen/X86/gcc_except_table.ll
index 8c328ec..a732eb1 100644
--- a/test/CodeGen/X86/gcc_except_table.ll
+++ b/test/CodeGen/X86/gcc_except_table.ll
@@ -13,14 +13,14 @@ define i32 @main() uwtable optsize ssp {
; APPLE: GCC_except_table0:
; APPLE: Lexception0:
-; MINGW64: .cfi_startproc
-; MINGW64: .cfi_personality 0, __gxx_personality_v0
-; MINGW64: .cfi_lsda 0, .Lexception0
-; MINGW64: .cfi_def_cfa_offset 16
+; MINGW64: .seh_proc
+; MINGW64: .seh_handler __gxx_personality_v0
+; MINGW64: .seh_setframe 5, 0
; MINGW64: callq _Unwind_Resume
-; MINGW64: .cfi_endproc
+; MINGW64: .seh_handlerdata
; MINGW64: GCC_except_table0:
; MINGW64: Lexception0:
+; MINGW64: .seh_endproc
; MINGW32: .cfi_startproc
; MINGW32: .cfi_personality 0, ___gxx_personality_v0
diff --git a/test/CodeGen/X86/haddsub-2.ll b/test/CodeGen/X86/haddsub-2.ll
new file mode 100644
index 0000000..ff939a9
--- /dev/null
+++ b/test/CodeGen/X86/haddsub-2.ll
@@ -0,0 +1,802 @@
+; RUN: llc < %s -march=x86-64 -mattr=+sse2,+sse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE3
+; RUN: llc < %s -march=x86-64 -mattr=+sse2,+sse3,+ssse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSSE3
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+
+
+
+define <4 x float> @hadd_ps_test1(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 0
+ %vecext1 = extractelement <4 x float> %A, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %A, i32 2
+ %vecext3 = extractelement <4 x float> %A, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
+ %vecext6 = extractelement <4 x float> %B, i32 0
+ %vecext7 = extractelement <4 x float> %B, i32 1
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %add8, i32 2
+ %vecext10 = extractelement <4 x float> %B, i32 2
+ %vecext11 = extractelement <4 x float> %B, i32 3
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hadd_ps_test1
+; CHECK: haddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @hadd_ps_test2(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 2
+ %vecext1 = extractelement <4 x float> %A, i32 3
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 1
+ %vecext2 = extractelement <4 x float> %A, i32 0
+ %vecext3 = extractelement <4 x float> %A, i32 1
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 0
+ %vecext6 = extractelement <4 x float> %B, i32 2
+ %vecext7 = extractelement <4 x float> %B, i32 3
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %add8, i32 3
+ %vecext10 = extractelement <4 x float> %B, i32 0
+ %vecext11 = extractelement <4 x float> %B, i32 1
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 2
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hadd_ps_test2
+; CHECK: haddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @hsub_ps_test1(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 0
+ %vecext1 = extractelement <4 x float> %A, i32 1
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %sub, i32 0
+ %vecext2 = extractelement <4 x float> %A, i32 2
+ %vecext3 = extractelement <4 x float> %A, i32 3
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %sub4, i32 1
+ %vecext6 = extractelement <4 x float> %B, i32 0
+ %vecext7 = extractelement <4 x float> %B, i32 1
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %sub8, i32 2
+ %vecext10 = extractelement <4 x float> %B, i32 2
+ %vecext11 = extractelement <4 x float> %B, i32 3
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hsub_ps_test1
+; CHECK: hsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @hsub_ps_test2(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 2
+ %vecext1 = extractelement <4 x float> %A, i32 3
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %sub, i32 1
+ %vecext2 = extractelement <4 x float> %A, i32 0
+ %vecext3 = extractelement <4 x float> %A, i32 1
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %sub4, i32 0
+ %vecext6 = extractelement <4 x float> %B, i32 2
+ %vecext7 = extractelement <4 x float> %B, i32 3
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %sub8, i32 3
+ %vecext10 = extractelement <4 x float> %B, i32 0
+ %vecext11 = extractelement <4 x float> %B, i32 1
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 2
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hsub_ps_test2
+; CHECK: hsubps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @phadd_d_test1(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 0
+ %vecext1 = extractelement <4 x i32> %A, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <4 x i32> %A, i32 2
+ %vecext3 = extractelement <4 x i32> %A, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %add4, i32 1
+ %vecext6 = extractelement <4 x i32> %B, i32 0
+ %vecext7 = extractelement <4 x i32> %B, i32 1
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %add8, i32 2
+ %vecext10 = extractelement <4 x i32> %B, i32 2
+ %vecext11 = extractelement <4 x i32> %B, i32 3
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %add12, i32 3
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phadd_d_test1
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; AVX: vphaddd
+; AVX2 vphaddd
+; CHECK: ret
+
+
+define <4 x i32> @phadd_d_test2(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 2
+ %vecext1 = extractelement <4 x i32> %A, i32 3
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %add, i32 1
+ %vecext2 = extractelement <4 x i32> %A, i32 0
+ %vecext3 = extractelement <4 x i32> %A, i32 1
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %add4, i32 0
+ %vecext6 = extractelement <4 x i32> %B, i32 3
+ %vecext7 = extractelement <4 x i32> %B, i32 2
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %add8, i32 3
+ %vecext10 = extractelement <4 x i32> %B, i32 1
+ %vecext11 = extractelement <4 x i32> %B, i32 0
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %add12, i32 2
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phadd_d_test2
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; AVX: vphaddd
+; AVX2 vphaddd
+; CHECK: ret
+
+
+define <4 x i32> @phsub_d_test1(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 0
+ %vecext1 = extractelement <4 x i32> %A, i32 1
+ %sub = sub i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %sub, i32 0
+ %vecext2 = extractelement <4 x i32> %A, i32 2
+ %vecext3 = extractelement <4 x i32> %A, i32 3
+ %sub4 = sub i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %sub4, i32 1
+ %vecext6 = extractelement <4 x i32> %B, i32 0
+ %vecext7 = extractelement <4 x i32> %B, i32 1
+ %sub8 = sub i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %sub8, i32 2
+ %vecext10 = extractelement <4 x i32> %B, i32 2
+ %vecext11 = extractelement <4 x i32> %B, i32 3
+ %sub12 = sub i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 3
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phsub_d_test1
+; SSE3-NOT: phsubd
+; SSSE3: phsubd
+; AVX: vphsubd
+; AVX2 vphsubd
+; CHECK: ret
+
+
+define <4 x i32> @phsub_d_test2(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 2
+ %vecext1 = extractelement <4 x i32> %A, i32 3
+ %sub = sub i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %sub, i32 1
+ %vecext2 = extractelement <4 x i32> %A, i32 0
+ %vecext3 = extractelement <4 x i32> %A, i32 1
+ %sub4 = sub i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %sub4, i32 0
+ %vecext6 = extractelement <4 x i32> %B, i32 2
+ %vecext7 = extractelement <4 x i32> %B, i32 3
+ %sub8 = sub i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %sub8, i32 3
+ %vecext10 = extractelement <4 x i32> %B, i32 0
+ %vecext11 = extractelement <4 x i32> %B, i32 1
+ %sub12 = sub i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 2
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phsub_d_test2
+; SSE3-NOT: phsubd
+; SSSE3: phsubd
+; AVX: vphsubd
+; AVX2 vphsubd
+; CHECK: ret
+
+
+define <2 x double> @hadd_pd_test1(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %A, i32 0
+ %vecext1 = extractelement <2 x double> %A, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <2 x double> %B, i32 0
+ %vecext3 = extractelement <2 x double> %B, i32 1
+ %add2 = fadd double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %add2, i32 1
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hadd_pd_test1
+; CHECK: haddpd
+; CHECK-NEXT: ret
+
+
+define <2 x double> @hadd_pd_test2(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %A, i32 1
+ %vecext1 = extractelement <2 x double> %A, i32 0
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <2 x double> %B, i32 1
+ %vecext3 = extractelement <2 x double> %B, i32 0
+ %add2 = fadd double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %add2, i32 1
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hadd_pd_test2
+; CHECK: haddpd
+; CHECK-NEXT: ret
+
+
+define <2 x double> @hsub_pd_test1(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %A, i32 0
+ %vecext1 = extractelement <2 x double> %A, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %sub, i32 0
+ %vecext2 = extractelement <2 x double> %B, i32 0
+ %vecext3 = extractelement <2 x double> %B, i32 1
+ %sub2 = fsub double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 1
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hsub_pd_test1
+; CHECK: hsubpd
+; CHECK-NEXT: ret
+
+
+define <2 x double> @hsub_pd_test2(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %B, i32 0
+ %vecext1 = extractelement <2 x double> %B, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %sub, i32 1
+ %vecext2 = extractelement <2 x double> %A, i32 0
+ %vecext3 = extractelement <2 x double> %A, i32 1
+ %sub2 = fsub double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 0
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hsub_pd_test2
+; CHECK: hsubpd
+; CHECK-NEXT: ret
+
+
+define <4 x double> @avx_vhadd_pd_test(<4 x double> %A, <4 x double> %B) {
+ %vecext = extractelement <4 x double> %A, i32 0
+ %vecext1 = extractelement <4 x double> %A, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <4 x double> %A, i32 2
+ %vecext3 = extractelement <4 x double> %A, i32 3
+ %add4 = fadd double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %add4, i32 1
+ %vecext6 = extractelement <4 x double> %B, i32 0
+ %vecext7 = extractelement <4 x double> %B, i32 1
+ %add8 = fadd double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %add8, i32 2
+ %vecext10 = extractelement <4 x double> %B, i32 2
+ %vecext11 = extractelement <4 x double> %B, i32 3
+ %add12 = fadd double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %add12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_vhadd_pd_test
+; SSE3: haddpd
+; SSE3-NEXT: haddpd
+; SSSE3: haddpd
+; SSSE3: haddpd
+; AVX: vhaddpd
+; AVX: vhaddpd
+; AVX2: vhaddpd
+; AVX2: vhaddpd
+; CHECK: ret
+
+
+define <4 x double> @avx_vhsub_pd_test(<4 x double> %A, <4 x double> %B) {
+ %vecext = extractelement <4 x double> %A, i32 0
+ %vecext1 = extractelement <4 x double> %A, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %sub, i32 0
+ %vecext2 = extractelement <4 x double> %A, i32 2
+ %vecext3 = extractelement <4 x double> %A, i32 3
+ %sub4 = fsub double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %sub4, i32 1
+ %vecext6 = extractelement <4 x double> %B, i32 0
+ %vecext7 = extractelement <4 x double> %B, i32 1
+ %sub8 = fsub double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %sub8, i32 2
+ %vecext10 = extractelement <4 x double> %B, i32 2
+ %vecext11 = extractelement <4 x double> %B, i32 3
+ %sub12 = fsub double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %sub12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_vhsub_pd_test
+; SSE3: hsubpd
+; SSE3-NEXT: hsubpd
+; SSSE3: hsubpd
+; SSSE3-NEXT: hsubpd
+; AVX: vhsubpd
+; AVX: vhsubpd
+; AVX2: vhsubpd
+; AVX2: vhsubpd
+; CHECK: ret
+
+
+define <8 x i32> @avx2_vphadd_d_test(<8 x i32> %A, <8 x i32> %B) {
+ %vecext = extractelement <8 x i32> %A, i32 0
+ %vecext1 = extractelement <8 x i32> %A, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %A, i32 2
+ %vecext3 = extractelement <8 x i32> %A, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 1
+ %vecext6 = extractelement <8 x i32> %A, i32 4
+ %vecext7 = extractelement <8 x i32> %A, i32 5
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <8 x i32> %vecinit5, i32 %add8, i32 2
+ %vecext10 = extractelement <8 x i32> %A, i32 6
+ %vecext11 = extractelement <8 x i32> %A, i32 7
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <8 x i32> %vecinit9, i32 %add12, i32 3
+ %vecext14 = extractelement <8 x i32> %B, i32 0
+ %vecext15 = extractelement <8 x i32> %B, i32 1
+ %add16 = add i32 %vecext14, %vecext15
+ %vecinit17 = insertelement <8 x i32> %vecinit13, i32 %add16, i32 4
+ %vecext18 = extractelement <8 x i32> %B, i32 2
+ %vecext19 = extractelement <8 x i32> %B, i32 3
+ %add20 = add i32 %vecext18, %vecext19
+ %vecinit21 = insertelement <8 x i32> %vecinit17, i32 %add20, i32 5
+ %vecext22 = extractelement <8 x i32> %B, i32 4
+ %vecext23 = extractelement <8 x i32> %B, i32 5
+ %add24 = add i32 %vecext22, %vecext23
+ %vecinit25 = insertelement <8 x i32> %vecinit21, i32 %add24, i32 6
+ %vecext26 = extractelement <8 x i32> %B, i32 6
+ %vecext27 = extractelement <8 x i32> %B, i32 7
+ %add28 = add i32 %vecext26, %vecext27
+ %vecinit29 = insertelement <8 x i32> %vecinit25, i32 %add28, i32 7
+ ret <8 x i32> %vecinit29
+}
+; CHECK-LABEL: avx2_vphadd_d_test
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; SSSE3-NEXT: phaddd
+; AVX: vphaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; AVX2: vphaddd
+; CHECK: ret
+
+define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) {
+ %vecext = extractelement <16 x i16> %a, i32 0
+ %vecext1 = extractelement <16 x i16> %a, i32 1
+ %add = add i16 %vecext, %vecext1
+ %vecinit = insertelement <16 x i16> undef, i16 %add, i32 0
+ %vecext4 = extractelement <16 x i16> %a, i32 2
+ %vecext6 = extractelement <16 x i16> %a, i32 3
+ %add8 = add i16 %vecext4, %vecext6
+ %vecinit10 = insertelement <16 x i16> %vecinit, i16 %add8, i32 1
+ %vecext11 = extractelement <16 x i16> %a, i32 4
+ %vecext13 = extractelement <16 x i16> %a, i32 5
+ %add15 = add i16 %vecext11, %vecext13
+ %vecinit17 = insertelement <16 x i16> %vecinit10, i16 %add15, i32 2
+ %vecext18 = extractelement <16 x i16> %a, i32 6
+ %vecext20 = extractelement <16 x i16> %a, i32 7
+ %add22 = add i16 %vecext18, %vecext20
+ %vecinit24 = insertelement <16 x i16> %vecinit17, i16 %add22, i32 3
+ %vecext25 = extractelement <16 x i16> %a, i32 8
+ %vecext27 = extractelement <16 x i16> %a, i32 9
+ %add29 = add i16 %vecext25, %vecext27
+ %vecinit31 = insertelement <16 x i16> %vecinit24, i16 %add29, i32 4
+ %vecext32 = extractelement <16 x i16> %a, i32 10
+ %vecext34 = extractelement <16 x i16> %a, i32 11
+ %add36 = add i16 %vecext32, %vecext34
+ %vecinit38 = insertelement <16 x i16> %vecinit31, i16 %add36, i32 5
+ %vecext39 = extractelement <16 x i16> %a, i32 12
+ %vecext41 = extractelement <16 x i16> %a, i32 13
+ %add43 = add i16 %vecext39, %vecext41
+ %vecinit45 = insertelement <16 x i16> %vecinit38, i16 %add43, i32 6
+ %vecext46 = extractelement <16 x i16> %a, i32 14
+ %vecext48 = extractelement <16 x i16> %a, i32 15
+ %add50 = add i16 %vecext46, %vecext48
+ %vecinit52 = insertelement <16 x i16> %vecinit45, i16 %add50, i32 7
+ %vecext53 = extractelement <16 x i16> %b, i32 0
+ %vecext55 = extractelement <16 x i16> %b, i32 1
+ %add57 = add i16 %vecext53, %vecext55
+ %vecinit59 = insertelement <16 x i16> %vecinit52, i16 %add57, i32 8
+ %vecext60 = extractelement <16 x i16> %b, i32 2
+ %vecext62 = extractelement <16 x i16> %b, i32 3
+ %add64 = add i16 %vecext60, %vecext62
+ %vecinit66 = insertelement <16 x i16> %vecinit59, i16 %add64, i32 9
+ %vecext67 = extractelement <16 x i16> %b, i32 4
+ %vecext69 = extractelement <16 x i16> %b, i32 5
+ %add71 = add i16 %vecext67, %vecext69
+ %vecinit73 = insertelement <16 x i16> %vecinit66, i16 %add71, i32 10
+ %vecext74 = extractelement <16 x i16> %b, i32 6
+ %vecext76 = extractelement <16 x i16> %b, i32 7
+ %add78 = add i16 %vecext74, %vecext76
+ %vecinit80 = insertelement <16 x i16> %vecinit73, i16 %add78, i32 11
+ %vecext81 = extractelement <16 x i16> %b, i32 8
+ %vecext83 = extractelement <16 x i16> %b, i32 9
+ %add85 = add i16 %vecext81, %vecext83
+ %vecinit87 = insertelement <16 x i16> %vecinit80, i16 %add85, i32 12
+ %vecext88 = extractelement <16 x i16> %b, i32 10
+ %vecext90 = extractelement <16 x i16> %b, i32 11
+ %add92 = add i16 %vecext88, %vecext90
+ %vecinit94 = insertelement <16 x i16> %vecinit87, i16 %add92, i32 13
+ %vecext95 = extractelement <16 x i16> %b, i32 12
+ %vecext97 = extractelement <16 x i16> %b, i32 13
+ %add99 = add i16 %vecext95, %vecext97
+ %vecinit101 = insertelement <16 x i16> %vecinit94, i16 %add99, i32 14
+ %vecext102 = extractelement <16 x i16> %b, i32 14
+ %vecext104 = extractelement <16 x i16> %b, i32 15
+ %add106 = add i16 %vecext102, %vecext104
+ %vecinit108 = insertelement <16 x i16> %vecinit101, i16 %add106, i32 15
+ ret <16 x i16> %vecinit108
+}
+; CHECK-LABEL: avx2_vphadd_w_test
+; SSE3-NOT: phaddw
+; SSSE3: phaddw
+; SSSE3-NEXT: phaddw
+; AVX: vphaddw
+; AVX: vphaddw
+; AVX2: vphaddw
+; AVX2: vphaddw
+; CHECK: ret
+
+
+; Verify that we don't select horizontal subs in the following functions.
+
+define <4 x i32> @not_a_hsub_1(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 0
+ %vecext1 = extractelement <4 x i32> %A, i32 1
+ %sub = sub i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %sub, i32 0
+ %vecext2 = extractelement <4 x i32> %A, i32 2
+ %vecext3 = extractelement <4 x i32> %A, i32 3
+ %sub4 = sub i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %sub4, i32 1
+ %vecext6 = extractelement <4 x i32> %B, i32 1
+ %vecext7 = extractelement <4 x i32> %B, i32 0
+ %sub8 = sub i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %sub8, i32 2
+ %vecext10 = extractelement <4 x i32> %B, i32 3
+ %vecext11 = extractelement <4 x i32> %B, i32 2
+ %sub12 = sub i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 3
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: not_a_hsub_1
+; CHECK-NOT: phsubd
+; CHECK: ret
+
+
+define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 2
+ %vecext1 = extractelement <4 x float> %A, i32 3
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %sub, i32 1
+ %vecext2 = extractelement <4 x float> %A, i32 0
+ %vecext3 = extractelement <4 x float> %A, i32 1
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %sub4, i32 0
+ %vecext6 = extractelement <4 x float> %B, i32 3
+ %vecext7 = extractelement <4 x float> %B, i32 2
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %sub8, i32 3
+ %vecext10 = extractelement <4 x float> %B, i32 0
+ %vecext11 = extractelement <4 x float> %B, i32 1
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 2
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: not_a_hsub_2
+; CHECK-NOT: hsubps
+; CHECK: ret
+
+
+define <2 x double> @not_a_hsub_3(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %B, i32 0
+ %vecext1 = extractelement <2 x double> %B, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %sub, i32 1
+ %vecext2 = extractelement <2 x double> %A, i32 1
+ %vecext3 = extractelement <2 x double> %A, i32 0
+ %sub2 = fsub double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 0
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: not_a_hsub_3
+; CHECK-NOT: hsubpd
+; CHECK: ret
+
+
+; Test AVX horizontal add/sub of packed single/double precision
+; floating point values from 256-bit vectors.
+
+define <8 x float> @avx_vhadd_ps(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <8 x float> %a, i32 2
+ %vecext3 = extractelement <8 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 1
+ %vecext6 = extractelement <8 x float> %b, i32 0
+ %vecext7 = extractelement <8 x float> %b, i32 1
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <8 x float> %vecinit5, float %add8, i32 2
+ %vecext10 = extractelement <8 x float> %b, i32 2
+ %vecext11 = extractelement <8 x float> %b, i32 3
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <8 x float> %vecinit9, float %add12, i32 3
+ %vecext14 = extractelement <8 x float> %a, i32 4
+ %vecext15 = extractelement <8 x float> %a, i32 5
+ %add16 = fadd float %vecext14, %vecext15
+ %vecinit17 = insertelement <8 x float> %vecinit13, float %add16, i32 4
+ %vecext18 = extractelement <8 x float> %a, i32 6
+ %vecext19 = extractelement <8 x float> %a, i32 7
+ %add20 = fadd float %vecext18, %vecext19
+ %vecinit21 = insertelement <8 x float> %vecinit17, float %add20, i32 5
+ %vecext22 = extractelement <8 x float> %b, i32 4
+ %vecext23 = extractelement <8 x float> %b, i32 5
+ %add24 = fadd float %vecext22, %vecext23
+ %vecinit25 = insertelement <8 x float> %vecinit21, float %add24, i32 6
+ %vecext26 = extractelement <8 x float> %b, i32 6
+ %vecext27 = extractelement <8 x float> %b, i32 7
+ %add28 = fadd float %vecext26, %vecext27
+ %vecinit29 = insertelement <8 x float> %vecinit25, float %add28, i32 7
+ ret <8 x float> %vecinit29
+}
+; CHECK-LABEL: avx_vhadd_ps
+; SSE3: haddps
+; SSE3-NEXT: haddps
+; SSSE3: haddps
+; SSSE3-NEXT: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK: ret
+
+
+define <8 x float> @avx_vhsub_ps(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %sub, i32 0
+ %vecext2 = extractelement <8 x float> %a, i32 2
+ %vecext3 = extractelement <8 x float> %a, i32 3
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %sub4, i32 1
+ %vecext6 = extractelement <8 x float> %b, i32 0
+ %vecext7 = extractelement <8 x float> %b, i32 1
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <8 x float> %vecinit5, float %sub8, i32 2
+ %vecext10 = extractelement <8 x float> %b, i32 2
+ %vecext11 = extractelement <8 x float> %b, i32 3
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <8 x float> %vecinit9, float %sub12, i32 3
+ %vecext14 = extractelement <8 x float> %a, i32 4
+ %vecext15 = extractelement <8 x float> %a, i32 5
+ %sub16 = fsub float %vecext14, %vecext15
+ %vecinit17 = insertelement <8 x float> %vecinit13, float %sub16, i32 4
+ %vecext18 = extractelement <8 x float> %a, i32 6
+ %vecext19 = extractelement <8 x float> %a, i32 7
+ %sub20 = fsub float %vecext18, %vecext19
+ %vecinit21 = insertelement <8 x float> %vecinit17, float %sub20, i32 5
+ %vecext22 = extractelement <8 x float> %b, i32 4
+ %vecext23 = extractelement <8 x float> %b, i32 5
+ %sub24 = fsub float %vecext22, %vecext23
+ %vecinit25 = insertelement <8 x float> %vecinit21, float %sub24, i32 6
+ %vecext26 = extractelement <8 x float> %b, i32 6
+ %vecext27 = extractelement <8 x float> %b, i32 7
+ %sub28 = fsub float %vecext26, %vecext27
+ %vecinit29 = insertelement <8 x float> %vecinit25, float %sub28, i32 7
+ ret <8 x float> %vecinit29
+}
+; CHECK-LABEL: avx_vhsub_ps
+; SSE3: hsubps
+; SSE3-NEXT: hsubps
+; SSSE3: hsubps
+; SSSE3-NEXT: hsubps
+; AVX: vhsubps
+; AVX2: vhsubps
+; CHECK: ret
+
+
+define <4 x double> @avx_hadd_pd(<4 x double> %a, <4 x double> %b) {
+ %vecext = extractelement <4 x double> %a, i32 0
+ %vecext1 = extractelement <4 x double> %a, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <4 x double> %b, i32 0
+ %vecext3 = extractelement <4 x double> %b, i32 1
+ %add4 = fadd double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %add4, i32 1
+ %vecext6 = extractelement <4 x double> %a, i32 2
+ %vecext7 = extractelement <4 x double> %a, i32 3
+ %add8 = fadd double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %add8, i32 2
+ %vecext10 = extractelement <4 x double> %b, i32 2
+ %vecext11 = extractelement <4 x double> %b, i32 3
+ %add12 = fadd double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %add12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_hadd_pd
+; SSE3: haddpd
+; SSE3-NEXT: haddpd
+; SSSE3: haddpd
+; SSSE3-NEXT: haddpd
+; AVX: vhaddpd
+; AVX2: vhaddpd
+; CHECK: ret
+
+
+define <4 x double> @avx_hsub_pd(<4 x double> %a, <4 x double> %b) {
+ %vecext = extractelement <4 x double> %a, i32 0
+ %vecext1 = extractelement <4 x double> %a, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %sub, i32 0
+ %vecext2 = extractelement <4 x double> %b, i32 0
+ %vecext3 = extractelement <4 x double> %b, i32 1
+ %sub4 = fsub double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %sub4, i32 1
+ %vecext6 = extractelement <4 x double> %a, i32 2
+ %vecext7 = extractelement <4 x double> %a, i32 3
+ %sub8 = fsub double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %sub8, i32 2
+ %vecext10 = extractelement <4 x double> %b, i32 2
+ %vecext11 = extractelement <4 x double> %b, i32 3
+ %sub12 = fsub double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %sub12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_hsub_pd
+; SSE3: hsubpd
+; SSE3-NEXT: hsubpd
+; SSSE3: hsubpd
+; SSSE3-NEXT: hsubpd
+; AVX: vhsubpd
+; AVX2: vhsubpd
+; CHECK: ret
+
+
+; Test AVX2 horizontal add of packed integer values from 256-bit vectors.
+
+define <8 x i32> @avx2_hadd_d(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %a, i32 2
+ %vecext3 = extractelement <8 x i32> %a, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 1
+ %vecext6 = extractelement <8 x i32> %b, i32 0
+ %vecext7 = extractelement <8 x i32> %b, i32 1
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <8 x i32> %vecinit5, i32 %add8, i32 2
+ %vecext10 = extractelement <8 x i32> %b, i32 2
+ %vecext11 = extractelement <8 x i32> %b, i32 3
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <8 x i32> %vecinit9, i32 %add12, i32 3
+ %vecext14 = extractelement <8 x i32> %a, i32 4
+ %vecext15 = extractelement <8 x i32> %a, i32 5
+ %add16 = add i32 %vecext14, %vecext15
+ %vecinit17 = insertelement <8 x i32> %vecinit13, i32 %add16, i32 4
+ %vecext18 = extractelement <8 x i32> %a, i32 6
+ %vecext19 = extractelement <8 x i32> %a, i32 7
+ %add20 = add i32 %vecext18, %vecext19
+ %vecinit21 = insertelement <8 x i32> %vecinit17, i32 %add20, i32 5
+ %vecext22 = extractelement <8 x i32> %b, i32 4
+ %vecext23 = extractelement <8 x i32> %b, i32 5
+ %add24 = add i32 %vecext22, %vecext23
+ %vecinit25 = insertelement <8 x i32> %vecinit21, i32 %add24, i32 6
+ %vecext26 = extractelement <8 x i32> %b, i32 6
+ %vecext27 = extractelement <8 x i32> %b, i32 7
+ %add28 = add i32 %vecext26, %vecext27
+ %vecinit29 = insertelement <8 x i32> %vecinit25, i32 %add28, i32 7
+ ret <8 x i32> %vecinit29
+}
+; CHECK-LABEL: avx2_hadd_d
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; SSSE3-NEXT: phaddd
+; AVX: vphaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; AVX2-NOT: vphaddd
+; CHECK: ret
+
+
+define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) {
+ %vecext = extractelement <16 x i16> %a, i32 0
+ %vecext1 = extractelement <16 x i16> %a, i32 1
+ %add = add i16 %vecext, %vecext1
+ %vecinit = insertelement <16 x i16> undef, i16 %add, i32 0
+ %vecext4 = extractelement <16 x i16> %a, i32 2
+ %vecext6 = extractelement <16 x i16> %a, i32 3
+ %add8 = add i16 %vecext4, %vecext6
+ %vecinit10 = insertelement <16 x i16> %vecinit, i16 %add8, i32 1
+ %vecext11 = extractelement <16 x i16> %a, i32 4
+ %vecext13 = extractelement <16 x i16> %a, i32 5
+ %add15 = add i16 %vecext11, %vecext13
+ %vecinit17 = insertelement <16 x i16> %vecinit10, i16 %add15, i32 2
+ %vecext18 = extractelement <16 x i16> %a, i32 6
+ %vecext20 = extractelement <16 x i16> %a, i32 7
+ %add22 = add i16 %vecext18, %vecext20
+ %vecinit24 = insertelement <16 x i16> %vecinit17, i16 %add22, i32 3
+ %vecext25 = extractelement <16 x i16> %a, i32 8
+ %vecext27 = extractelement <16 x i16> %a, i32 9
+ %add29 = add i16 %vecext25, %vecext27
+ %vecinit31 = insertelement <16 x i16> %vecinit24, i16 %add29, i32 8
+ %vecext32 = extractelement <16 x i16> %a, i32 10
+ %vecext34 = extractelement <16 x i16> %a, i32 11
+ %add36 = add i16 %vecext32, %vecext34
+ %vecinit38 = insertelement <16 x i16> %vecinit31, i16 %add36, i32 9
+ %vecext39 = extractelement <16 x i16> %a, i32 12
+ %vecext41 = extractelement <16 x i16> %a, i32 13
+ %add43 = add i16 %vecext39, %vecext41
+ %vecinit45 = insertelement <16 x i16> %vecinit38, i16 %add43, i32 10
+ %vecext46 = extractelement <16 x i16> %a, i32 14
+ %vecext48 = extractelement <16 x i16> %a, i32 15
+ %add50 = add i16 %vecext46, %vecext48
+ %vecinit52 = insertelement <16 x i16> %vecinit45, i16 %add50, i32 11
+ %vecext53 = extractelement <16 x i16> %b, i32 0
+ %vecext55 = extractelement <16 x i16> %b, i32 1
+ %add57 = add i16 %vecext53, %vecext55
+ %vecinit59 = insertelement <16 x i16> %vecinit52, i16 %add57, i32 4
+ %vecext60 = extractelement <16 x i16> %b, i32 2
+ %vecext62 = extractelement <16 x i16> %b, i32 3
+ %add64 = add i16 %vecext60, %vecext62
+ %vecinit66 = insertelement <16 x i16> %vecinit59, i16 %add64, i32 5
+ %vecext67 = extractelement <16 x i16> %b, i32 4
+ %vecext69 = extractelement <16 x i16> %b, i32 5
+ %add71 = add i16 %vecext67, %vecext69
+ %vecinit73 = insertelement <16 x i16> %vecinit66, i16 %add71, i32 6
+ %vecext74 = extractelement <16 x i16> %b, i32 6
+ %vecext76 = extractelement <16 x i16> %b, i32 7
+ %add78 = add i16 %vecext74, %vecext76
+ %vecinit80 = insertelement <16 x i16> %vecinit73, i16 %add78, i32 7
+ %vecext81 = extractelement <16 x i16> %b, i32 8
+ %vecext83 = extractelement <16 x i16> %b, i32 9
+ %add85 = add i16 %vecext81, %vecext83
+ %vecinit87 = insertelement <16 x i16> %vecinit80, i16 %add85, i32 12
+ %vecext88 = extractelement <16 x i16> %b, i32 10
+ %vecext90 = extractelement <16 x i16> %b, i32 11
+ %add92 = add i16 %vecext88, %vecext90
+ %vecinit94 = insertelement <16 x i16> %vecinit87, i16 %add92, i32 13
+ %vecext95 = extractelement <16 x i16> %b, i32 12
+ %vecext97 = extractelement <16 x i16> %b, i32 13
+ %add99 = add i16 %vecext95, %vecext97
+ %vecinit101 = insertelement <16 x i16> %vecinit94, i16 %add99, i32 14
+ %vecext102 = extractelement <16 x i16> %b, i32 14
+ %vecext104 = extractelement <16 x i16> %b, i32 15
+ %add106 = add i16 %vecext102, %vecext104
+ %vecinit108 = insertelement <16 x i16> %vecinit101, i16 %add106, i32 15
+ ret <16 x i16> %vecinit108
+}
+; CHECK-LABEL: avx2_hadd_w
+; SSE3-NOT: phaddw
+; SSSE3: phaddw
+; SSSE3-NEXT: phaddw
+; AVX: vphaddw
+; AVX: vphaddw
+; AVX2: vphaddw
+; AVX2-NOT: vphaddw
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/haddsub-undef.ll b/test/CodeGen/X86/haddsub-undef.ll
new file mode 100644
index 0000000..954a9d9
--- /dev/null
+++ b/test/CodeGen/X86/haddsub-undef.ll
@@ -0,0 +1,325 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 -mattr=+ssse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+
+; Verify that we correctly fold horizontal binop even in the presence of UNDEFs.
+
+define <4 x float> @test1_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %a, i32 2
+ %vecext3 = extractelement <4 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
+ %vecext10 = extractelement <4 x float> %b, i32 2
+ %vecext11 = extractelement <4 x float> %b, i32 3
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit5, float %add12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: test1_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test2_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext6 = extractelement <4 x float> %b, i32 0
+ %vecext7 = extractelement <4 x float> %b, i32 1
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit, float %add8, i32 2
+ %vecext10 = extractelement <4 x float> %b, i32 2
+ %vecext11 = extractelement <4 x float> %b, i32 3
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: test2_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test3_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %a, i32 2
+ %vecext3 = extractelement <4 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
+ %vecext6 = extractelement <4 x float> %b, i32 0
+ %vecext7 = extractelement <4 x float> %b, i32 1
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %add8, i32 2
+ ret <4 x float> %vecinit9
+}
+; CHECK-LABEL: test3_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test4_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ ret <4 x float> %vecinit
+}
+; CHECK-LABEL: test4_undef
+; CHECK-NOT: haddps
+; CHECK: ret
+
+
+define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
+ %vecext = extractelement <2 x double> %a, i32 0
+ %vecext1 = extractelement <2 x double> %a, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %add, i32 0
+ ret <2 x double> %vecinit
+}
+; CHECK-LABEL: test5_undef
+; CHECK-NOT: haddpd
+; CHECK: ret
+
+
+define <4 x float> @test6_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %a, i32 2
+ %vecext3 = extractelement <4 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
+ ret <4 x float> %vecinit5
+}
+; CHECK-LABEL: test6_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test7_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %b, i32 0
+ %vecext1 = extractelement <4 x float> %b, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 2
+ %vecext2 = extractelement <4 x float> %b, i32 2
+ %vecext3 = extractelement <4 x float> %b, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 3
+ ret <4 x float> %vecinit5
+}
+; CHECK-LABEL: test7_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %a, i32 2
+ %vecext3 = extractelement <4 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 2
+ ret <4 x float> %vecinit5
+}
+; CHECK-LABEL: test8_undef
+; CHECK-NOT: haddps
+; CHECK: ret
+
+
+define <4 x float> @test9_undef(<4 x float> %a, <4 x float> %b) {
+ %vecext = extractelement <4 x float> %a, i32 0
+ %vecext1 = extractelement <4 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %b, i32 2
+ %vecext3 = extractelement <4 x float> %b, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 3
+ ret <4 x float> %vecinit5
+}
+; CHECK-LABEL: test9_undef
+; CHECK: haddps
+; CHECK-NEXT: ret
+
+define <8 x float> @test10_undef(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <8 x float> %b, i32 2
+ %vecext3 = extractelement <8 x float> %b, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 3
+ ret <8 x float> %vecinit5
+}
+; CHECK-LABEL: test10_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NOT: haddps
+; CHECK: ret
+
+define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <8 x float> %b, i32 4
+ %vecext3 = extractelement <8 x float> %b, i32 5
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 6
+ ret <8 x float> %vecinit5
+}
+; CHECK-LABEL: test11_undef
+; SSE-NOT: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK: ret
+
+define <8 x float> @test12_undef(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <8 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <8 x float> %a, i32 2
+ %vecext3 = extractelement <8 x float> %a, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x float> %vecinit, float %add4, i32 1
+ ret <8 x float> %vecinit5
+}
+; CHECK-LABEL: test12_undef
+; SSE: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NOT: haddps
+; CHECK: ret
+
+define <8 x float> @test13_undef(<8 x float> %a, <8 x float> %b) {
+ %vecext = extractelement <8 x float> %a, i32 0
+ %vecext1 = extractelement <8 x float> %a, i32 1
+ %add1 = fadd float %vecext, %vecext1
+ %vecinit1 = insertelement <8 x float> undef, float %add1, i32 0
+ %vecext2 = extractelement <8 x float> %a, i32 2
+ %vecext3 = extractelement <8 x float> %a, i32 3
+ %add2 = fadd float %vecext2, %vecext3
+ %vecinit2 = insertelement <8 x float> %vecinit1, float %add2, i32 1
+ %vecext4 = extractelement <8 x float> %a, i32 4
+ %vecext5 = extractelement <8 x float> %a, i32 5
+ %add3 = fadd float %vecext4, %vecext5
+ %vecinit3 = insertelement <8 x float> %vecinit2, float %add3, i32 2
+ %vecext6 = extractelement <8 x float> %a, i32 6
+ %vecext7 = extractelement <8 x float> %a, i32 7
+ %add4 = fadd float %vecext6, %vecext7
+ %vecinit4 = insertelement <8 x float> %vecinit3, float %add4, i32 3
+ ret <8 x float> %vecinit4
+}
+; CHECK-LABEL: test13_undef
+; SSE: haddps
+; SSE-NOT: haddps
+; AVX: vhaddps
+; AVX2: vhaddps
+; CHECK-NOT: haddps
+; CHECK: ret
+
+define <8 x i32> @test14_undef(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %b, i32 2
+ %vecext3 = extractelement <8 x i32> %b, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 3
+ ret <8 x i32> %vecinit5
+}
+; CHECK-LABEL: test14_undef
+; SSE: phaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; CHECK-NOT: phaddd
+; CHECK: ret
+
+; On AVX2, the following sequence can be folded into a single horizontal add.
+; If the Subtarget doesn't support AVX2, then we avoid emitting two packed
+; integer horizontal adds instead of two scalar adds followed by vector inserts.
+define <8 x i32> @test15_undef(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %b, i32 4
+ %vecext3 = extractelement <8 x i32> %b, i32 5
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 6
+ ret <8 x i32> %vecinit5
+}
+; CHECK-LABEL: test15_undef
+; SSE-NOT: phaddd
+; AVX-NOT: vphaddd
+; AVX2: vphaddd
+; CHECK: ret
+
+define <8 x i32> @test16_undef(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %a, i32 2
+ %vecext3 = extractelement <8 x i32> %a, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 1
+ ret <8 x i32> %vecinit5
+}
+; CHECK-LABEL: test16_undef
+; SSE: phaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; CHECK-NOT: haddps
+; CHECK: ret
+
+define <8 x i32> @test17_undef(<8 x i32> %a, <8 x i32> %b) {
+ %vecext = extractelement <8 x i32> %a, i32 0
+ %vecext1 = extractelement <8 x i32> %a, i32 1
+ %add1 = add i32 %vecext, %vecext1
+ %vecinit1 = insertelement <8 x i32> undef, i32 %add1, i32 0
+ %vecext2 = extractelement <8 x i32> %a, i32 2
+ %vecext3 = extractelement <8 x i32> %a, i32 3
+ %add2 = add i32 %vecext2, %vecext3
+ %vecinit2 = insertelement <8 x i32> %vecinit1, i32 %add2, i32 1
+ %vecext4 = extractelement <8 x i32> %a, i32 4
+ %vecext5 = extractelement <8 x i32> %a, i32 5
+ %add3 = add i32 %vecext4, %vecext5
+ %vecinit3 = insertelement <8 x i32> %vecinit2, i32 %add3, i32 2
+ %vecext6 = extractelement <8 x i32> %a, i32 6
+ %vecext7 = extractelement <8 x i32> %a, i32 7
+ %add4 = add i32 %vecext6, %vecext7
+ %vecinit4 = insertelement <8 x i32> %vecinit3, i32 %add4, i32 3
+ ret <8 x i32> %vecinit4
+}
+; CHECK-LABEL: test17_undef
+; SSE: phaddd
+; AVX: vphaddd
+; AVX2: vphaddd
+; CHECK-NOT: haddps
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/i8-umulo.ll b/test/CodeGen/X86/i8-umulo.ll
new file mode 100644
index 0000000..ba846f3
--- /dev/null
+++ b/test/CodeGen/X86/i8-umulo.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mcpu=generic -march=x86 < %s | FileCheck %s
+; PR19858
+
+declare {i8, i1} @llvm.umul.with.overflow.i8(i8 %a, i8 %b)
+define i8 @testumulo(i32 %argc) {
+; CHECK: imulw
+; CHECK: testb %{{.+}}, %{{.+}}
+; CHECK: je [[NOOVERFLOWLABEL:.+]]
+; CHECK: {{.*}}[[NOOVERFLOWLABEL]]:
+; CHECK-NEXT: movb
+; CHECK-NEXT: retl
+top:
+ %RHS = trunc i32 %argc to i8
+ %umul = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 25, i8 %RHS)
+ %ex = extractvalue { i8, i1 } %umul, 1
+ br i1 %ex, label %overflow, label %nooverlow
+
+overflow:
+ ret i8 %RHS
+
+nooverlow:
+ %umul.value = extractvalue { i8, i1 } %umul, 0
+ ret i8 %umul.value
+}
diff --git a/test/CodeGen/X86/jump_table_alias.ll b/test/CodeGen/X86/jump_table_alias.ll
new file mode 100644
index 0000000..f3691fd
--- /dev/null
+++ b/test/CodeGen/X86/jump_table_alias.ll
@@ -0,0 +1,33 @@
+; RUN: llc <%s -jump-table-type=single | FileCheck %s
+target triple = "x86_64-unknown-linux-gnu"
+define i32 @f() unnamed_addr jumptable {
+entry:
+ ret i32 0
+}
+
+@i = alias internal i32 ()* @f
+@j = alias i32 ()* @f
+
+define i32 @main(i32 %argc, i8** %argv) {
+ %temp = alloca i32 ()*, align 8
+ store i32 ()* @i, i32()** %temp, align 8
+; CHECK: movq $__llvm_jump_instr_table_0_1
+ %1 = load i32 ()** %temp, align 8
+; CHECK: movl $__llvm_jump_instr_table_0_1
+ %2 = call i32 ()* %1()
+ %3 = call i32 ()* @i()
+; CHECK: callq i
+ %4 = call i32 ()* @j()
+; CHECK: callq j
+ ret i32 %3
+}
+
+; There should only be one table, even though there are two GlobalAliases,
+; because they both alias the same value.
+
+; CHECK: .globl __llvm_jump_instr_table_0_1
+; CHECK: .align 8, 0x90
+; CHECK: .type __llvm_jump_instr_table_0_1,@function
+; CHECK: __llvm_jump_instr_table_0_1:
+; CHECK: jmp f@PLT
+
diff --git a/test/CodeGen/X86/jump_table_bitcast.ll b/test/CodeGen/X86/jump_table_bitcast.ll
new file mode 100644
index 0000000..33a798f
--- /dev/null
+++ b/test/CodeGen/X86/jump_table_bitcast.ll
@@ -0,0 +1,46 @@
+; RUN: llc <%s -jump-table-type=single | FileCheck %s
+target triple = "x86_64-unknown-linux-gnu"
+define i32 @f() unnamed_addr jumptable {
+ ret i32 0
+}
+
+define i32 @g(i8* %a) unnamed_addr jumptable {
+ ret i32 0
+}
+
+define void @h(void ()* %func) unnamed_addr jumptable {
+ ret void
+}
+
+define i32 @main() {
+ %g = alloca i32 (...)*, align 8
+ store i32 (...)* bitcast (i32 ()* @f to i32 (...)*), i32 (...)** %g, align 8
+; CHECK: movq $__llvm_jump_instr_table_0_[[ENTRY:1|2|3]], (%rsp)
+; CHECK: movl $__llvm_jump_instr_table_0_[[ENTRY]], %ecx
+ %1 = load i32 (...)** %g, align 8
+ %call = call i32 (...)* %1()
+ call void (void ()*)* @h(void ()* bitcast (void (void ()*)* @h to void ()*))
+; CHECK: movl $__llvm_jump_instr_table_0_{{1|2|3}}, %edi
+; CHECK: callq h
+
+ %a = call i32 (i32*)* bitcast (i32 (i8*)* @g to i32(i32*)*)(i32* null)
+; CHECK: callq g
+ ret i32 %a
+}
+
+; CHECK: .globl __llvm_jump_instr_table_0_1
+; CHECK: .align 8, 0x90
+; CHECK: .type __llvm_jump_instr_table_0_1,@function
+; CHECK: __llvm_jump_instr_table_0_1:
+; CHECK: jmp {{f|g|h}}@PLT
+; CHECK: .globl __llvm_jump_instr_table_0_2
+; CHECK: .align 8, 0x90
+; CHECK: .type __llvm_jump_instr_table_0_2,@function
+; CHECK: __llvm_jump_instr_table_0_2:
+; CHECK: jmp {{f|g|h}}@PLT
+; CHECK: .globl __llvm_jump_instr_table_0_3
+; CHECK: .align 8, 0x90
+; CHECK: .type __llvm_jump_instr_table_0_3,@function
+; CHECK: __llvm_jump_instr_table_0_3:
+; CHECK: jmp {{f|g|h}}@PLT
+
diff --git a/test/CodeGen/X86/jump_tables.ll b/test/CodeGen/X86/jump_tables.ll
new file mode 100644
index 0000000..5a0aed0
--- /dev/null
+++ b/test/CodeGen/X86/jump_tables.ll
@@ -0,0 +1,272 @@
+; RUN: llc <%s -jump-table-type=single | FileCheck --check-prefix=SINGLE %s
+; RUN: llc <%s -jump-table-type=arity | FileCheck --check-prefix=ARITY %s
+; RUN: llc <%s -jump-table-type=simplified | FileCheck --check-prefix=SIMPL %s
+; RUN: llc <%s -jump-table-type=full | FileCheck --check-prefix=FULL %s
+
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.fun_struct = type { i32 (...)* }
+
+define void @indirect_fun() unnamed_addr jumptable {
+ ret void
+}
+
+define void @indirect_fun_match() unnamed_addr jumptable {
+ ret void
+}
+
+define i32 @indirect_fun_i32() unnamed_addr jumptable {
+ ret i32 0
+}
+
+define i32 @indirect_fun_i32_1(i32 %a) unnamed_addr jumptable {
+ ret i32 %a
+}
+
+define i32 @indirect_fun_i32_2(i32 %a, i32 %b) unnamed_addr jumptable {
+ ret i32 %a
+}
+
+define i32* @indirect_fun_i32S_2(i32* %a, i32 %b) unnamed_addr jumptable {
+ ret i32* %a
+}
+
+define void @indirect_fun_struct(%struct.fun_struct %fs) unnamed_addr jumptable {
+ ret void
+}
+
+define void @indirect_fun_fun(i32 (...)* %fun, i32 %a) unnamed_addr jumptable {
+ ret void
+}
+
+define i32 @indirect_fun_fun_ret(i32 (...)* %fun, i32 %a) unnamed_addr jumptable {
+ ret i32 %a
+}
+
+define void @indirect_fun_array([19 x i8] %a) unnamed_addr jumptable {
+ ret void
+}
+
+define void @indirect_fun_vec(<3 x i32> %a) unnamed_addr jumptable {
+ ret void
+}
+
+define void @indirect_fun_vec_2(<4 x float> %a) unnamed_addr jumptable {
+ ret void
+}
+
+define i32 @m(void ()* %fun) {
+ call void ()* %fun()
+ ret i32 0
+}
+
+define void ()* @get_fun() {
+ ret void ()* @indirect_fun
+; SINGLE: movl $__llvm_jump_instr_table_0_
+; ARITY: movl $__llvm_jump_instr_table_
+; SIMPL: movl $__llvm_jump_instr_table_
+; FULL: movl $__llvm_jump_instr_table_
+}
+
+define i32 @main(i32 %argc, i8** %argv) {
+ %f = call void ()* ()* @get_fun()
+ %a = call i32 @m(void ()* %f)
+ ret i32 %a
+}
+
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_1
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_1,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_1:
+; SINGLE-DAG: jmp indirect_fun_array@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_2
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_2,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_2:
+; SINGLE-DAG: jmp indirect_fun_i32_2@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_3
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_3,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_3:
+; SINGLE-DAG: jmp indirect_fun_vec_2@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_4
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_4,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_4:
+; SINGLE-DAG: jmp indirect_fun_i32S_2@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_5
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_5,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_5:
+; SINGLE-DAG: jmp indirect_fun_struct@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_6
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_6,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_6:
+; SINGLE-DAG: jmp indirect_fun_i32_1@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_7
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_7,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_7:
+; SINGLE-DAG: jmp indirect_fun_i32@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_8
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_8,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_8:
+; SINGLE-DAG: jmp indirect_fun_fun@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_9
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_9,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_9:
+; SINGLE-DAG: jmp indirect_fun_fun_ret@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_10
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_10,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_10:
+; SINGLE-DAG: jmp indirect_fun@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_11
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_11,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_11:
+; SINGLE-DAG: jmp indirect_fun_match@PLT
+; SINGLE-DAG: .globl __llvm_jump_instr_table_0_12
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: .type __llvm_jump_instr_table_0_12,@function
+; SINGLE-DAG: __llvm_jump_instr_table_0_12:
+; SINGLE-DAG: jmp indirect_fun_vec@PLT
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: ud2
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: ud2
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: ud2
+; SINGLE-DAG: .align 8, 0x90
+; SINGLE-DAG: ud2
+
+
+; ARITY-DAG: .globl __llvm_jump_instr_table_2_1
+; ARITY-DAG: .align 8, 0x90
+; ARITY-DAG: .type __llvm_jump_instr_table_2_1,@function
+; ARITY-DAG: __llvm_jump_instr_table_2_1:
+; ARITY-DAG: jmp indirect_fun{{.*}}@PLT
+; ARITY-DAG: .align 8, 0x90
+; ARITY-DAG: ud2
+; ARITY-DAG: .globl __llvm_jump_instr_table_0_1
+; ARITY-DAG: .align 8, 0x90
+; ARITY-DAG: .type __llvm_jump_instr_table_0_1,@function
+; ARITY-DAG: __llvm_jump_instr_table_0_1:
+; ARITY-DAG: jmp indirect_fun{{.*}}@PLT
+; ARITY-DAG: .globl __llvm_jump_instr_table_1_1
+; ARITY-DAG: .align 8, 0x90
+; ARITY-DAG: .type __llvm_jump_instr_table_1_1,@function
+; ARITY-DAG: __llvm_jump_instr_table_1_1:
+; ARITY-DAG: jmp indirect_fun{{.*}}@PLT
+
+; SIMPL-DAG: .globl __llvm_jump_instr_table_2_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_2_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_2_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: ud2
+; SIMPL-DAG: .globl __llvm_jump_instr_table_0_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_0_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_0_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+; SIMPL-DAG: .globl __llvm_jump_instr_table_1_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_1_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_1_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+; SIMPL-DAG: .globl __llvm_jump_instr_table_3_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_3_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_3_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+; SIMPL-DAG: .globl __llvm_jump_instr_table_4_1
+; SIMPL-DAG: .align 8, 0x90
+; SIMPL-DAG: .type __llvm_jump_instr_table_4_1,@function
+; SIMPL-DAG: __llvm_jump_instr_table_4_1:
+; SIMPL-DAG: jmp indirect_fun{{.*}}@PLT
+
+
+; FULL-DAG: .globl __llvm_jump_instr_table_10_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_10_1,@function
+; FULL-DAG:__llvm_jump_instr_table_10_1:
+; FULL-DAG: jmp indirect_fun_i32_1@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_9_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_9_1,@function
+; FULL-DAG:__llvm_jump_instr_table_9_1:
+; FULL-DAG: jmp indirect_fun_i32_2@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_7_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_7_1,@function
+; FULL-DAG:__llvm_jump_instr_table_7_1:
+; FULL-DAG: jmp indirect_fun_i32S_2@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_3_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_3_1,@function
+; FULL-DAG:__llvm_jump_instr_table_3_1:
+; FULL-DAG: jmp indirect_fun_vec_2@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_2_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_2_1,@function
+; FULL-DAG:__llvm_jump_instr_table_2_1:
+; FULL-DAG: jmp indirect_fun@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_8_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_8_1,@function
+; FULL-DAG:__llvm_jump_instr_table_8_1:
+; FULL-DAG: jmp indirect_fun_i32@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_1_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_1_1,@function
+; FULL-DAG:__llvm_jump_instr_table_1_1:
+; FULL-DAG: jmp indirect_fun_array@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_0_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_0_1,@function
+; FULL-DAG:__llvm_jump_instr_table_0_1:
+; FULL-DAG: jmp indirect_fun_vec@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_6_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_6_1,@function
+; FULL-DAG:__llvm_jump_instr_table_6_1:
+; FULL-DAG: jmp indirect_fun_struct@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_5_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_5_1,@function
+; FULL-DAG:__llvm_jump_instr_table_5_1:
+; FULL-DAG: jmp indirect_fun_fun@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
+; FULL-DAG: .globl __llvm_jump_instr_table_4_1
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: .type __llvm_jump_instr_table_4_1,@function
+; FULL-DAG:__llvm_jump_instr_table_4_1:
+; FULL-DAG: jmp indirect_fun_fun_ret@PLT
+; FULL-DAG: .align 8, 0x90
+; FULL-DAG: ud2
diff --git a/test/CodeGen/X86/libcall-sret.ll b/test/CodeGen/X86/libcall-sret.ll
new file mode 100644
index 0000000..67b99ac
--- /dev/null
+++ b/test/CodeGen/X86/libcall-sret.ll
@@ -0,0 +1,28 @@
+; RUN: llc -mtriple=i686-linux-gnu -o - %s | FileCheck %s
+
+@var = global i128 0
+
+; We were trying to convert the i128 operation into a libcall, but failing to
+; perform sret demotion when we couldn't return the result in registers. Make
+; sure we marshal the return properly:
+
+define void @test_sret_libcall(i128 %l, i128 %r) {
+; CHECK-LABEL: test_sret_libcall:
+
+ ; Stack for call: 4(sret ptr), 16(i128 %l), 16(128 %r). So next logical
+ ; (aligned) place for the actual sret data is %esp + 40.
+; CHECK: leal 40(%esp), [[SRET_ADDR:%[a-z]+]]
+; CHECK: movl [[SRET_ADDR]], (%esp)
+; CHECK: calll __multi3
+; CHECK-DAG: movl 40(%esp), [[RES0:%[a-z]+]]
+; CHECK-DAG: movl 44(%esp), [[RES1:%[a-z]+]]
+; CHECK-DAG: movl 48(%esp), [[RES2:%[a-z]+]]
+; CHECK-DAG: movl 52(%esp), [[RES3:%[a-z]+]]
+; CHECK-DAG: movl [[RES0]], var
+; CHECK-DAG: movl [[RES1]], var+4
+; CHECK-DAG: movl [[RES2]], var+8
+; CHECK-DAG: movl [[RES3]], var+12
+ %prod = mul i128 %l, %r
+ store i128 %prod, i128* @var
+ ret void
+}
diff --git a/test/CodeGen/X86/lit.local.cfg b/test/CodeGen/X86/lit.local.cfg
index 3d91b03..8ed58f1 100644
--- a/test/CodeGen/X86/lit.local.cfg
+++ b/test/CodeGen/X86/lit.local.cfg
@@ -6,7 +6,6 @@
# cleanly.
config.suffixes = ['.ll', '.test', '.txt']
-targets = set(config.root.targets_to_build.split())
-if not 'X86' in targets:
+if not 'X86' in config.root.targets:
config.unsupported = True
diff --git a/test/CodeGen/X86/lower-bitcast.ll b/test/CodeGen/X86/lower-bitcast.ll
index b9b29a5..f47161e 100644
--- a/test/CodeGen/X86/lower-bitcast.ll
+++ b/test/CodeGen/X86/lower-bitcast.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-WIDE
define double @test1(double %A) {
@@ -9,14 +10,19 @@ define double @test1(double %A) {
}
; FIXME: Ideally we should be able to fold the entire body of @test1 into a
; single paddd instruction. At the moment we produce the sequence
-; pshufd+paddq+pshufd.
-
+; pshufd+paddq+pshufd. This is fixed with the widening legalization.
+;
; CHECK-LABEL: test1
; CHECK-NOT: movsd
; CHECK: pshufd
-; CHECK-NEXT: paddq
+; CHECK-NEXT: paddd
; CHECK-NEXT: pshufd
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test1
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE: paddd
+; CHECK-WIDE-NEXT: ret
define double @test2(double %A, double %B) {
@@ -26,17 +32,15 @@ define double @test2(double %A, double %B) {
%3 = bitcast <2 x i32> %add to double
ret double %3
}
-; FIXME: Ideally we should be able to fold the entire body of @test2 into a
-; single 'paddd %xmm1, %xmm0' instruction. At the moment we produce the
-; sequence pshufd+pshufd+paddq+pshufd.
-
; CHECK-LABEL: test2
; CHECK-NOT: movsd
-; CHECK: pshufd
-; CHECK-NEXT: pshufd
-; CHECK-NEXT: paddq
-; CHECK-NEXT: pshufd
+; CHECK: paddd
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test2
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE: paddd
+; CHECK-WIDE-NEXT: ret
define i64 @test3(i64 %A) {
@@ -50,6 +54,12 @@ define i64 @test3(i64 %A) {
; CHECK: addps
; CHECK-NOT: pshufd
; CHECK: ret
+;
+; CHECK-WIDE-LABEL: test3
+; CHECK-WIDE-NOT: pshufd
+; CHECK-WIDE: addps
+; CHECK-WIDE-NOT: pshufd
+; CHECK-WIDE: ret
define i64 @test4(i64 %A) {
@@ -59,13 +69,20 @@ define i64 @test4(i64 %A) {
ret i64 %2
}
; FIXME: At the moment we still produce the sequence pshufd+paddq+pshufd.
-; Ideally, we should fold that sequence into a single paddd.
-
+; Ideally, we should fold that sequence into a single paddd. This is fixed with
+; the widening legalization.
+;
; CHECK-LABEL: test4
; CHECK: pshufd
; CHECK-NEXT: paddq
; CHECK-NEXT: pshufd
; CHECK: ret
+;
+; CHECK-WIDE-LABEL: test4
+; CHECK-WIDE: movd %{{rdi|rcx}},
+; CHECK-WIDE-NEXT: paddd
+; CHECK-WIDE-NEXT: movd {{.*}}, %rax
+; CHECK-WIDE: ret
define double @test5(double %A) {
@@ -77,6 +94,10 @@ define double @test5(double %A) {
; CHECK-LABEL: test5
; CHECK: addps
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test5
+; CHECK-WIDE: addps
+; CHECK-WIDE-NEXT: ret
define double @test6(double %A) {
@@ -86,14 +107,20 @@ define double @test6(double %A) {
ret double %2
}
; FIXME: Ideally we should be able to fold the entire body of @test6 into a
-; single paddw instruction.
-
+; single paddw instruction. This is fixed with the widening legalization.
+;
; CHECK-LABEL: test6
; CHECK-NOT: movsd
; CHECK: punpcklwd
-; CHECK-NEXT: paddd
+; CHECK-NEXT: paddw
; CHECK-NEXT: pshufb
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test6
+; CHECK-WIDE-NOT: mov
+; CHECK-WIDE-NOT: punpcklwd
+; CHECK-WIDE: paddw
+; CHECK-WIDE-NEXT: ret
define double @test7(double %A, double %B) {
@@ -103,17 +130,17 @@ define double @test7(double %A, double %B) {
%3 = bitcast <4 x i16> %add to double
ret double %3
}
-; FIXME: Ideally we should be able to fold the entire body of @test7 into a
-; single 'paddw %xmm1, %xmm0' instruction. At the moment we produce the
-; sequence pshufd+pshufd+paddd+pshufd.
-
; CHECK-LABEL: test7
; CHECK-NOT: movsd
-; CHECK: punpcklwd
-; CHECK-NEXT: punpcklwd
-; CHECK-NEXT: paddd
-; CHECK-NEXT: pshufb
+; CHECK-NOT: punpcklwd
+; CHECK: paddw
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test7
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE-NOT: punpcklwd
+; CHECK-WIDE: paddw
+; CHECK-WIDE-NEXT: ret
define double @test8(double %A) {
@@ -124,14 +151,20 @@ define double @test8(double %A) {
}
; FIXME: Ideally we should be able to fold the entire body of @test8 into a
; single paddb instruction. At the moment we produce the sequence
-; pshufd+paddw+pshufd.
-
+; pshufd+paddw+pshufd. This is fixed with the widening legalization.
+;
; CHECK-LABEL: test8
; CHECK-NOT: movsd
; CHECK: punpcklbw
-; CHECK-NEXT: paddw
+; CHECK-NEXT: paddb
; CHECK-NEXT: pshufb
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test8
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE-NOT: punpcklbw
+; CHECK-WIDE: paddb
+; CHECK-WIDE-NEXT: ret
define double @test9(double %A, double %B) {
@@ -141,15 +174,15 @@ define double @test9(double %A, double %B) {
%3 = bitcast <8 x i8> %add to double
ret double %3
}
-; FIXME: Ideally we should be able to fold the entire body of @test9 into a
-; single 'paddb %xmm1, %xmm0' instruction. At the moment we produce the
-; sequence pshufd+pshufd+paddw+pshufd.
-
; CHECK-LABEL: test9
; CHECK-NOT: movsd
-; CHECK: punpcklbw
-; CHECK-NEXT: punpcklbw
-; CHECK-NEXT: paddw
-; CHECK-NEXT: pshufb
+; CHECK-NOT: punpcklbw
+; CHECK: paddb
; CHECK-NEXT: ret
+;
+; CHECK-WIDE-LABEL: test9
+; CHECK-WIDE-NOT: movsd
+; CHECK-WIDE-NOT: punpcklbw
+; CHECK-WIDE: paddb
+; CHECK-WIDE-NEXT: ret
diff --git a/test/CodeGen/X86/macho-comdat.ll b/test/CodeGen/X86/macho-comdat.ll
new file mode 100644
index 0000000..3c2d997
--- /dev/null
+++ b/test/CodeGen/X86/macho-comdat.ll
@@ -0,0 +1,6 @@
+; RUN: not llc -mtriple x86_64-apple-darwin < %s 2> %t
+; RUN: FileCheck < %t %s
+
+$f = comdat any
+@v = global i32 0, comdat $f
+; CHECK: LLVM ERROR: MachO doesn't support COMDATs, 'f' cannot be lowered.
diff --git a/test/CodeGen/X86/null-streamer.ll b/test/CodeGen/X86/null-streamer.ll
index 7c0e82f..fa77fcb 100644
--- a/test/CodeGen/X86/null-streamer.ll
+++ b/test/CodeGen/X86/null-streamer.ll
@@ -1,6 +1,7 @@
; Check the MCNullStreamer operates correctly, at least on a minimal test case.
;
; RUN: llc -filetype=null -o %t -march=x86 %s
+; RUN: llc -filetype=null -o %t -mtriple=i686-cygwin %s
define void @f0() {
ret void
@@ -9,3 +10,20 @@ define void @f0() {
define void @f1() {
ret void
}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!11, !13}
+
+!0 = metadata !{i32 786449, metadata !1, i32 4, metadata !" ", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !9, metadata !2, metadata !""}
+!1 = metadata !{metadata !"", metadata !""}
+!2 = metadata !{}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"", metadata !"", metadata !"", i32 2, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 ()* null, null, null, metadata !2, i32 2}
+!5 = metadata !{i32 786473, metadata !1}
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null}
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5}
+!9 = metadata !{metadata !10}
+!10 = metadata !{i32 786484, i32 0, null, metadata !"i", metadata !"i", metadata !"_ZL1i", metadata !5, i32 1, metadata !8, i32 1, i32 1, null, null}
+!11 = metadata !{i32 2, metadata !"Dwarf Version", i32 3}
+!13 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/X86/pr20020.ll b/test/CodeGen/X86/pr20020.ll
new file mode 100644
index 0000000..83dae36
--- /dev/null
+++ b/test/CodeGen/X86/pr20020.ll
@@ -0,0 +1,73 @@
+; RUN: llc < %s -mtriple=x86_64-apple-macosx -disable-lsr -post-RA-scheduler=1 -break-anti-dependencies=critical | FileCheck %s
+
+; In PR20020, the critical anti-dependency breaker algorithm mistakenly
+; changes the register operands of an 'xorl %eax, %eax' to 'xorl %ecx, %ecx'
+; and then immediately reloads %rcx with a value based on the wrong %rax
+
+; CHECK-NOT: xorl %ecx, %ecx
+; CHECK: leaq 1(%rax), %rcx
+
+
+%struct.planet = type { double, double, double }
+
+; Function Attrs: nounwind ssp uwtable
+define void @advance(i32 %nbodies, %struct.planet* nocapture %bodies) #0 {
+entry:
+ %cmp4 = icmp sgt i32 %nbodies, 0
+ br i1 %cmp4, label %for.body.preheader, label %for.end38
+
+for.body.preheader: ; preds = %entry
+ %gep = getelementptr %struct.planet* %bodies, i64 1, i32 1
+ %gep13 = bitcast double* %gep to %struct.planet*
+ %0 = add i32 %nbodies, -1
+ br label %for.body
+
+for.body: ; preds = %for.body.preheader, %for.inc20
+ %iv19 = phi i32 [ %0, %for.body.preheader ], [ %iv.next, %for.inc20 ]
+ %iv = phi %struct.planet* [ %gep13, %for.body.preheader ], [ %gep14, %for.inc20 ]
+ %iv9 = phi i64 [ %iv.next10, %for.inc20 ], [ 0, %for.body.preheader ]
+ %iv.next10 = add nuw nsw i64 %iv9, 1
+ %1 = trunc i64 %iv.next10 to i32
+ %cmp22 = icmp slt i32 %1, %nbodies
+ br i1 %cmp22, label %for.body3.lr.ph, label %for.inc20
+
+for.body3.lr.ph: ; preds = %for.body
+ %x = getelementptr inbounds %struct.planet* %bodies, i64 %iv9, i32 0
+ %y = getelementptr inbounds %struct.planet* %bodies, i64 %iv9, i32 1
+ %vx = getelementptr inbounds %struct.planet* %bodies, i64 %iv9, i32 2
+ br label %for.body3
+
+for.body3: ; preds = %for.body3, %for.body3.lr.ph
+ %iv20 = phi i32 [ %iv.next21, %for.body3 ], [ %iv19, %for.body3.lr.ph ]
+ %iv15 = phi %struct.planet* [ %gep16, %for.body3 ], [ %iv, %for.body3.lr.ph ]
+ %iv1517 = bitcast %struct.planet* %iv15 to double*
+ %2 = load double* %x, align 8
+ %gep18 = getelementptr double* %iv1517, i64 -1
+ %3 = load double* %gep18, align 8
+ %sub = fsub double %2, %3
+ %4 = load double* %y, align 8
+ %5 = load double* %iv1517, align 8
+ %sub8 = fsub double %4, %5
+ %add10 = fadd double %sub, %sub8
+ %call = tail call double @sqrt(double %sub8) #2
+ store double %add10, double* %vx, align 8
+ %gep16 = getelementptr %struct.planet* %iv15, i64 1
+ %iv.next21 = add i32 %iv20, -1
+ %exitcond = icmp eq i32 %iv.next21, 0
+ br i1 %exitcond, label %for.inc20, label %for.body3
+
+for.inc20: ; preds = %for.body3, %for.body
+ %lftr.wideiv11 = trunc i64 %iv.next10 to i32
+ %gep14 = getelementptr %struct.planet* %iv, i64 1
+ %iv.next = add i32 %iv19, -1
+ %exitcond12 = icmp eq i32 %lftr.wideiv11, %nbodies
+ br i1 %exitcond12, label %for.end38, label %for.body
+
+for.end38: ; preds = %for.inc20, %entry
+ ret void
+}
+
+; Function Attrs: nounwind
+declare double @sqrt(double) #1
+
+attributes #0 = { "no-frame-pointer-elim-non-leaf" }
diff --git a/test/CodeGen/X86/pr20088.ll b/test/CodeGen/X86/pr20088.ll
new file mode 100644
index 0000000..3a82962
--- /dev/null
+++ b/test/CodeGen/X86/pr20088.ll
@@ -0,0 +1,9 @@
+; RUN: llc < %s -march=x86-64 -mattr=+avx | FileCheck %s
+
+declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>)
+
+define <16 x i8> @foo(<16 x i8> %x) {
+; CHECK: vpblendvb
+ %res = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> zeroinitializer, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8> %x)
+ ret <16 x i8> %res;
+}
diff --git a/test/CodeGen/X86/pr5145.ll b/test/CodeGen/X86/pr5145.ll
index d048db8..32a797b 100644
--- a/test/CodeGen/X86/pr5145.ll
+++ b/test/CodeGen/X86/pr5145.ll
@@ -5,29 +5,29 @@ define void @atomic_maxmin_i8() {
; CHECK: atomic_maxmin_i8
%1 = atomicrmw max i8* @sc8, i8 5 acquire
; CHECK: [[LABEL1:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmovl
+; CHECK: movsbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL1]]
%2 = atomicrmw min i8* @sc8, i8 6 acquire
; CHECK: [[LABEL3:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmovg
+; CHECK: movsbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL3]]
%3 = atomicrmw umax i8* @sc8, i8 7 acquire
; CHECK: [[LABEL5:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmovb
+; CHECK: movzbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL5]]
%4 = atomicrmw umin i8* @sc8, i8 8 acquire
; CHECK: [[LABEL7:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmova
+; CHECK: movzbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL7]]
diff --git a/test/CodeGen/X86/pshufd-combine-crash.ll b/test/CodeGen/X86/pshufd-combine-crash.ll
new file mode 100644
index 0000000..84c69e3
--- /dev/null
+++ b/test/CodeGen/X86/pshufd-combine-crash.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 -debug
+
+; REQUIRES: asserts
+
+; Test that the dag combiner doesn't assert if we try to replace a sequence of two
+; v4f32 X86ISD::PSHUFD nodes with a single PSHUFD.
+
+
+define <4 x float> @test(<4 x float> %V) {
+ %1 = shufflevector <4 x float> %V, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ ret <4 x float> %2
+}
+
diff --git a/test/CodeGen/X86/rdpmc.ll b/test/CodeGen/X86/rdpmc.ll
new file mode 100644
index 0000000..7f1ca46
--- /dev/null
+++ b/test/CodeGen/X86/rdpmc.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -march=x86-64 -mcpu=generic | FileCheck %s --check-prefix=CHECK --check-prefix=X86-64
+; RUN: llc < %s -march=x86 -mcpu=generic | FileCheck %s --check-prefix=CHECK --check-prefix=X86
+
+; Verify that we correctly lower the "Read Performance-Monitoring Counters"
+; x86 builtin.
+
+
+define i64 @test_builtin_read_pmc(i32 %ID) {
+ %1 = tail call i64 @llvm.x86.rdpmc(i32 %ID)
+ ret i64 %1
+}
+; CHECK-LABEL: test_builtin_read_pmc
+; CHECK: rdpmc
+; X86-NOT: shlq
+; X86-NOT: or
+; X86-64: shlq
+; X86-64: or
+; CHECK-NOT: mov
+; CHECK: ret
+
+declare i64 @llvm.x86.rdpmc(i32 %ID)
+
diff --git a/test/CodeGen/X86/shift-parts.ll b/test/CodeGen/X86/shift-parts.ll
index ce4f538..ddad307 100644
--- a/test/CodeGen/X86/shift-parts.ll
+++ b/test/CodeGen/X86/shift-parts.ll
@@ -1,10 +1,12 @@
-; RUN: llc < %s -march=x86-64 | grep shrdq
+; RUN: llc -march=x86-64 < %s | FileCheck %s
; PR4736
%0 = type { i32, i8, [35 x i8] }
@g_144 = external global %0, align 8 ; <%0*> [#uses=1]
+; CHECK: shrdq
+
define i32 @int87(i32 %uint64p_8) nounwind {
entry:
%srcval4 = load i320* bitcast (%0* @g_144 to i320*), align 8 ; <i320> [#uses=1]
diff --git a/test/CodeGen/X86/sqrt.ll b/test/CodeGen/X86/sqrt.ll
new file mode 100644
index 0000000..be7c6e8
--- /dev/null
+++ b/test/CodeGen/X86/sqrt.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx2,+avx | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx2,+avx -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=AVX
+
+define float @test_sqrt_f32(float %a) {
+; SSE2-LABEL: test_sqrt_f32
+; SSE2: sqrtss %xmm0, %xmm0
+; AVX-LABEL: test_sqrt_f32
+; AVX: vsqrtss %xmm0, %xmm0
+ %res = call float @llvm.sqrt.f32(float %a)
+ ret float %res
+}
+declare float @llvm.sqrt.f32(float) nounwind readnone
+
+define double @test_sqrt_f64(double %a) {
+; SSE2-LABEL: test_sqrt_f64
+; SSE2: sqrtsd %xmm0, %xmm0
+; AVX-LABEL: test_sqrt_f64
+; AVX: vsqrtsd %xmm0, %xmm0
+ %res = call double @llvm.sqrt.f64(double %a)
+ ret double %res
+}
+declare double @llvm.sqrt.f64(double) nounwind readnone
+
+
diff --git a/test/CodeGen/X86/sse2-intrinsics-x86.ll b/test/CodeGen/X86/sse2-intrinsics-x86.ll
index cfc892d..c906ecd 100644
--- a/test/CodeGen/X86/sse2-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-x86.ll
@@ -717,3 +717,30 @@ define void @test_x86_sse2_pause() {
ret void
}
declare void @llvm.x86.sse2.pause() nounwind
+
+define <4 x i32> @test_x86_sse2_pshuf_d(<4 x i32> %a) {
+; CHECK-LABEL: test_x86_sse2_pshuf_d:
+; CHECK: pshufd $27
+entry:
+ %res = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27) nounwind readnone
+ ret <4 x i32> %res
+}
+declare <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32>, i8) nounwind readnone
+
+define <8 x i16> @test_x86_sse2_pshufl_w(<8 x i16> %a) {
+; CHECK-LABEL: test_x86_sse2_pshufl_w:
+; CHECK: pshuflw $27
+entry:
+ %res = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27) nounwind readnone
+ ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16>, i8) nounwind readnone
+
+define <8 x i16> @test_x86_sse2_pshufh_w(<8 x i16> %a) {
+; CHECK-LABEL: test_x86_sse2_pshufh_w:
+; CHECK: pshufhw $27
+entry:
+ %res = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %a, i8 27) nounwind readnone
+ ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8) nounwind readnone
diff --git a/test/CodeGen/X86/sse3-avx-addsub-2.ll b/test/CodeGen/X86/sse3-avx-addsub-2.ll
new file mode 100644
index 0000000..b7706cc
--- /dev/null
+++ b/test/CodeGen/X86/sse3-avx-addsub-2.ll
@@ -0,0 +1,318 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+
+
+; Verify that we correctly generate 'addsub' instructions from
+; a sequence of vector extracts + float add/sub + vector inserts.
+
+define <4 x float> @test1(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 2
+ %4 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %5 = extractelement <4 x float> %A, i32 1
+ %6 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %5, %6
+ %7 = extractelement <4 x float> %A, i32 3
+ %8 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %7, %8
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub, i32 0
+ %vecinsert4 = insertelement <4 x float> %vecinsert3, float %sub2, i32 2
+ ret <4 x float> %vecinsert4
+}
+; CHECK-LABEL: test1
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test2(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 2
+ %2 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 3
+ %4 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %sub2, i32 2
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test2
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test3(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 3
+ %4 = extractelement <4 x float> %B, i32 3
+ %add = fadd float %4, %3
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 0
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add, i32 3
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test3
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test4(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 2
+ %2 = extractelement <4 x float> %B, i32 2
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 1
+ %4 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 2
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add, i32 1
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test4
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test5(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub2 = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 1
+ %4 = extractelement <4 x float> %B, i32 1
+ %add2 = fadd float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %sub2, i32 0
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 1
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test5
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test6(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 2
+ %4 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %5 = extractelement <4 x float> %A, i32 1
+ %6 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %5, %6
+ %7 = extractelement <4 x float> %A, i32 3
+ %8 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %7, %8
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub, i32 0
+ %vecinsert4 = insertelement <4 x float> %vecinsert3, float %sub2, i32 2
+ ret <4 x float> %vecinsert4
+}
+; CHECK-LABEL: test6
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <4 x double> @test7(<4 x double> %A, <4 x double> %B) {
+ %1 = extractelement <4 x double> %A, i32 0
+ %2 = extractelement <4 x double> %B, i32 0
+ %sub = fsub double %1, %2
+ %3 = extractelement <4 x double> %A, i32 2
+ %4 = extractelement <4 x double> %B, i32 2
+ %sub2 = fsub double %3, %4
+ %5 = extractelement <4 x double> %A, i32 1
+ %6 = extractelement <4 x double> %B, i32 1
+ %add = fadd double %5, %6
+ %7 = extractelement <4 x double> %A, i32 3
+ %8 = extractelement <4 x double> %B, i32 3
+ %add2 = fadd double %7, %8
+ %vecinsert1 = insertelement <4 x double> undef, double %add, i32 1
+ %vecinsert2 = insertelement <4 x double> %vecinsert1, double %add2, i32 3
+ %vecinsert3 = insertelement <4 x double> %vecinsert2, double %sub, i32 0
+ %vecinsert4 = insertelement <4 x double> %vecinsert3, double %sub2, i32 2
+ ret <4 x double> %vecinsert4
+}
+; CHECK-LABEL: test7
+; SSE: addsubpd
+; SSE-NEXT: addsubpd
+; AVX: vaddsubpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test8(<2 x double> %A, <2 x double> %B) {
+ %1 = extractelement <2 x double> %A, i32 0
+ %2 = extractelement <2 x double> %B, i32 0
+ %sub = fsub double %1, %2
+ %3 = extractelement <2 x double> %A, i32 1
+ %4 = extractelement <2 x double> %B, i32 1
+ %add = fadd double %3, %4
+ %vecinsert1 = insertelement <2 x double> undef, double %sub, i32 0
+ %vecinsert2 = insertelement <2 x double> %vecinsert1, double %add, i32 1
+ ret <2 x double> %vecinsert2
+}
+; CHECK-LABEL: test8
+; SSE: addsubpd
+; AVX: vaddsubpd
+; CHECK: ret
+
+
+define <8 x float> @test9(<8 x float> %A, <8 x float> %B) {
+ %1 = extractelement <8 x float> %A, i32 0
+ %2 = extractelement <8 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <8 x float> %A, i32 2
+ %4 = extractelement <8 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %5 = extractelement <8 x float> %A, i32 1
+ %6 = extractelement <8 x float> %B, i32 1
+ %add = fadd float %5, %6
+ %7 = extractelement <8 x float> %A, i32 3
+ %8 = extractelement <8 x float> %B, i32 3
+ %add2 = fadd float %7, %8
+ %9 = extractelement <8 x float> %A, i32 4
+ %10 = extractelement <8 x float> %B, i32 4
+ %sub3 = fsub float %9, %10
+ %11 = extractelement <8 x float> %A, i32 6
+ %12 = extractelement <8 x float> %B, i32 6
+ %sub4 = fsub float %11, %12
+ %13 = extractelement <8 x float> %A, i32 5
+ %14 = extractelement <8 x float> %B, i32 5
+ %add3 = fadd float %13, %14
+ %15 = extractelement <8 x float> %A, i32 7
+ %16 = extractelement <8 x float> %B, i32 7
+ %add4 = fadd float %15, %16
+ %vecinsert1 = insertelement <8 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <8 x float> %vecinsert1, float %add2, i32 3
+ %vecinsert3 = insertelement <8 x float> %vecinsert2, float %sub, i32 0
+ %vecinsert4 = insertelement <8 x float> %vecinsert3, float %sub2, i32 2
+ %vecinsert5 = insertelement <8 x float> %vecinsert4, float %add3, i32 5
+ %vecinsert6 = insertelement <8 x float> %vecinsert5, float %add4, i32 7
+ %vecinsert7 = insertelement <8 x float> %vecinsert6, float %sub3, i32 4
+ %vecinsert8 = insertelement <8 x float> %vecinsert7, float %sub4, i32 6
+ ret <8 x float> %vecinsert8
+}
+; CHECK-LABEL: test9
+; SSE: addsubps
+; SSE-NEXT: addsubps
+; AVX: vaddsubps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+; Verify that we don't generate addsub instruction for the following
+; functions.
+define <4 x float> @test10(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 0
+ ret <4 x float> %vecinsert1
+}
+; CHECK-LABEL: test10
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test11(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 2
+ %2 = extractelement <4 x float> %B, i32 2
+ %sub = fsub float %1, %2
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 2
+ ret <4 x float> %vecinsert1
+}
+; CHECK-LABEL: test11
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test12(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 1
+ %2 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %1, %2
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ ret <4 x float> %vecinsert1
+}
+; CHECK-LABEL: test12
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test13(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 3
+ %2 = extractelement <4 x float> %B, i32 3
+ %add = fadd float %1, %2
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 3
+ ret <4 x float> %vecinsert1
+}
+; CHECK-LABEL: test13
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test14(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, %2
+ %3 = extractelement <4 x float> %A, i32 2
+ %4 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 0
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %sub2, i32 2
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test14
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test15(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 1
+ %2 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %1, %2
+ %3 = extractelement <4 x float> %A, i32 3
+ %4 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %3, %4
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ ret <4 x float> %vecinsert2
+}
+; CHECK-LABEL: test15
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
+define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
+ %1 = extractelement <4 x float> %A, i32 0
+ %2 = extractelement <4 x float> %B, i32 0
+ %sub = fsub float %1, undef
+ %3 = extractelement <4 x float> %A, i32 2
+ %4 = extractelement <4 x float> %B, i32 2
+ %sub2 = fsub float %3, %4
+ %5 = extractelement <4 x float> %A, i32 1
+ %6 = extractelement <4 x float> %B, i32 1
+ %add = fadd float %5, undef
+ %7 = extractelement <4 x float> %A, i32 3
+ %8 = extractelement <4 x float> %B, i32 3
+ %add2 = fadd float %7, %8
+ %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
+ %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
+ %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub, i32 0
+ %vecinsert4 = insertelement <4 x float> %vecinsert3, float %sub2, i32 2
+ ret <4 x float> %vecinsert4
+}
+; CHECK-LABEL: test16
+; CHECK-NOT: addsubps
+; CHECK: ret
+
+
diff --git a/test/CodeGen/X86/sse3-avx-addsub.ll b/test/CodeGen/X86/sse3-avx-addsub.ll
new file mode 100644
index 0000000..8b66743
--- /dev/null
+++ b/test/CodeGen/X86/sse3-avx-addsub.ll
@@ -0,0 +1,296 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s -check-prefix=SSE -check-prefix=CHECK
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s -check-prefix=AVX -check-prefix=CHECK
+
+; Test ADDSUB ISel patterns.
+
+; Functions below are obtained from the following source:
+;
+; typedef double double2 __attribute__((ext_vector_type(2)));
+; typedef double double4 __attribute__((ext_vector_type(4)));
+; typedef float float4 __attribute__((ext_vector_type(4)));
+; typedef float float8 __attribute__((ext_vector_type(8)));
+;
+; float4 test1(float4 A, float4 B) {
+; float4 X = A - B;
+; float4 Y = A + B;
+; return (float4){X[0], Y[1], X[2], Y[3]};
+; }
+;
+; float8 test2(float8 A, float8 B) {
+; float8 X = A - B;
+; float8 Y = A + B;
+; return (float8){X[0], Y[1], X[2], Y[3], X[4], Y[5], X[6], Y[7]};
+; }
+;
+; double4 test3(double4 A, double4 B) {
+; double4 X = A - B;
+; double4 Y = A + B;
+; return (double4){X[0], Y[1], X[2], Y[3]};
+; }
+;
+; double2 test4(double2 A, double2 B) {
+; double2 X = A - B;
+; double2 Y = A + B;
+; return (double2){X[0], Y[1]};
+; }
+
+define <4 x float> @test1(<4 x float> %A, <4 x float> %B) {
+ %sub = fsub <4 x float> %A, %B
+ %add = fadd <4 x float> %A, %B
+ %vecinit6 = shufflevector <4 x float> %sub, <4 x float> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %vecinit6
+}
+; CHECK-LABEL: test1
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <8 x float> @test2(<8 x float> %A, <8 x float> %B) {
+ %sub = fsub <8 x float> %A, %B
+ %add = fadd <8 x float> %A, %B
+ %vecinit14 = shufflevector <8 x float> %sub, <8 x float> %add, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %vecinit14
+}
+; CHECK-LABEL: test2
+; SSE: addsubps
+; SSE-NEXT: addsubps
+; AVX: vaddsubps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+define <4 x double> @test3(<4 x double> %A, <4 x double> %B) {
+ %sub = fsub <4 x double> %A, %B
+ %add = fadd <4 x double> %A, %B
+ %vecinit6 = shufflevector <4 x double> %sub, <4 x double> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %vecinit6
+}
+; CHECK-LABEL: test3
+; SSE: addsubpd
+; SSE: addsubpd
+; AVX: vaddsubpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test4(<2 x double> %A, <2 x double> %B) #0 {
+ %add = fadd <2 x double> %A, %B
+ %sub = fsub <2 x double> %A, %B
+ %vecinit2 = shufflevector <2 x double> %sub, <2 x double> %add, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: test4
+; SSE: addsubpd
+; AVX: vaddsubpd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @test1b(<4 x float> %A, <4 x float>* %B) {
+ %1 = load <4 x float>* %B
+ %add = fadd <4 x float> %A, %1
+ %sub = fsub <4 x float> %A, %1
+ %vecinit6 = shufflevector <4 x float> %sub, <4 x float> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %vecinit6
+}
+; CHECK-LABEL: test1b
+; SSE: addsubps
+; AVX: vaddsubps
+; CHECK-NEXT: ret
+
+
+define <8 x float> @test2b(<8 x float> %A, <8 x float>* %B) {
+ %1 = load <8 x float>* %B
+ %add = fadd <8 x float> %A, %1
+ %sub = fsub <8 x float> %A, %1
+ %vecinit14 = shufflevector <8 x float> %sub, <8 x float> %add, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %vecinit14
+}
+; CHECK-LABEL: test2b
+; SSE: addsubps
+; SSE-NEXT: addsubps
+; AVX: vaddsubps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+define <4 x double> @test3b(<4 x double> %A, <4 x double>* %B) {
+ %1 = load <4 x double>* %B
+ %add = fadd <4 x double> %A, %1
+ %sub = fsub <4 x double> %A, %1
+ %vecinit6 = shufflevector <4 x double> %sub, <4 x double> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %vecinit6
+}
+; CHECK-LABEL: test3b
+; SSE: addsubpd
+; SSE: addsubpd
+; AVX: vaddsubpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test4b(<2 x double> %A, <2 x double>* %B) {
+ %1 = load <2 x double>* %B
+ %sub = fsub <2 x double> %A, %1
+ %add = fadd <2 x double> %A, %1
+ %vecinit2 = shufflevector <2 x double> %sub, <2 x double> %add, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: test4b
+; SSE: addsubpd
+; AVX: vaddsubpd
+; CHECK-NEXT: ret
+
+; Functions below are obtained from the following source:
+;
+; float4 test1(float4 A, float4 B) {
+; float4 X = A + B;
+; float4 Y = A - B;
+; return (float4){X[0], Y[1], X[2], Y[3]};
+; }
+;
+; float8 test2(float8 A, float8 B) {
+; float8 X = A + B;
+; float8 Y = A - B;
+; return (float8){X[0], Y[1], X[2], Y[3], X[4], Y[5], X[6], Y[7]};
+; }
+;
+; double4 test3(double4 A, double4 B) {
+; double4 X = A + B;
+; double4 Y = A - B;
+; return (double4){X[0], Y[1], X[2], Y[3]};
+; }
+;
+; double2 test4(double2 A, double2 B) {
+; double2 X = A + B;
+; double2 Y = A - B;
+; return (double2){X[0], Y[1]};
+; }
+
+define <4 x float> @test5(<4 x float> %A, <4 x float> %B) {
+ %sub = fsub <4 x float> %A, %B
+ %add = fadd <4 x float> %A, %B
+ %vecinit6 = shufflevector <4 x float> %add, <4 x float> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %vecinit6
+}
+; CHECK-LABEL: test5
+; SSE: xorps
+; SSE-NEXT: addsubps
+; AVX: vxorps
+; AVX-NEXT: vaddsubps
+; CHECK: ret
+
+
+define <8 x float> @test6(<8 x float> %A, <8 x float> %B) {
+ %sub = fsub <8 x float> %A, %B
+ %add = fadd <8 x float> %A, %B
+ %vecinit14 = shufflevector <8 x float> %add, <8 x float> %sub, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %vecinit14
+}
+; CHECK-LABEL: test6
+; SSE: xorps
+; SSE-NEXT: addsubps
+; SSE: xorps
+; SSE-NEXT: addsubps
+; AVX: vxorps
+; AVX-NEXT: vaddsubps
+; AVX-NOT: vxorps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+define <4 x double> @test7(<4 x double> %A, <4 x double> %B) {
+ %sub = fsub <4 x double> %A, %B
+ %add = fadd <4 x double> %A, %B
+ %vecinit6 = shufflevector <4 x double> %add, <4 x double> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %vecinit6
+}
+; CHECK-LABEL: test7
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; AVX: vxorpd
+; AVX-NEXT: vaddsubpd
+; AVX-NOT: vxorpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test8(<2 x double> %A, <2 x double> %B) #0 {
+ %add = fadd <2 x double> %A, %B
+ %sub = fsub <2 x double> %A, %B
+ %vecinit2 = shufflevector <2 x double> %add, <2 x double> %sub, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: test8
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; AVX: vxorpd
+; AVX-NEXT: vaddsubpd
+; CHECK: ret
+
+
+define <4 x float> @test5b(<4 x float> %A, <4 x float> %B) {
+ %sub = fsub <4 x float> %A, %B
+ %add = fadd <4 x float> %B, %A
+ %vecinit6 = shufflevector <4 x float> %add, <4 x float> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %vecinit6
+}
+; CHECK-LABEL: test5
+; SSE: xorps
+; SSE-NEXT: addsubps
+; AVX: vxorps
+; AVX-NEXT: vaddsubps
+; CHECK: ret
+
+
+define <8 x float> @test6b(<8 x float> %A, <8 x float> %B) {
+ %sub = fsub <8 x float> %A, %B
+ %add = fadd <8 x float> %B, %A
+ %vecinit14 = shufflevector <8 x float> %add, <8 x float> %sub, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %vecinit14
+}
+; CHECK-LABEL: test6
+; SSE: xorps
+; SSE-NEXT: addsubps
+; SSE: xorps
+; SSE-NEXT: addsubps
+; AVX: vxorps
+; AVX-NEXT: vaddsubps
+; AVX-NOT: vxorps
+; AVX-NOT: vaddsubps
+; CHECK: ret
+
+
+define <4 x double> @test7b(<4 x double> %A, <4 x double> %B) {
+ %sub = fsub <4 x double> %A, %B
+ %add = fadd <4 x double> %B, %A
+ %vecinit6 = shufflevector <4 x double> %add, <4 x double> %sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %vecinit6
+}
+; CHECK-LABEL: test7
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; AVX: vxorpd
+; AVX-NEXT: vaddsubpd
+; AVX-NOT: vxorpd
+; AVX-NOT: vaddsubpd
+; CHECK: ret
+
+
+define <2 x double> @test8b(<2 x double> %A, <2 x double> %B) #0 {
+ %add = fadd <2 x double> %B, %A
+ %sub = fsub <2 x double> %A, %B
+ %vecinit2 = shufflevector <2 x double> %add, <2 x double> %sub, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: test8
+; SSE: xorpd
+; SSE-NEXT: addsubpd
+; AVX: vxorpd
+; AVX-NEXT: vaddsubpd
+; CHECK: ret
+
diff --git a/test/CodeGen/X86/sse41-blend.ll b/test/CodeGen/X86/sse41-blend.ll
index 8ad7987..3a48121 100644
--- a/test/CodeGen/X86/sse41-blend.ll
+++ b/test/CodeGen/X86/sse41-blend.ll
@@ -117,6 +117,24 @@ define <16 x i8> @constant_pblendvb(<16 x i8> %xyzw, <16 x i8> %abcd) {
%1 = select <16 x i1> <i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <16 x i8> %xyzw, <16 x i8> %abcd
ret <16 x i8> %1
}
+
declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>)
declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>)
declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>)
+
+;; 2 tests for shufflevectors that optimize to blend + immediate
+; CHECK-LABEL: @blend_shufflevector_4xfloat
+; CHECK: blendps $6, %xmm1, %xmm0
+; CHECK: ret
+define <4 x float> @blend_shufflevector_4xfloat(<4 x float> %a, <4 x float> %b) {
+ %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
+ ret <4 x float> %1
+}
+
+; CHECK-LABEL: @blend_shufflevector_8xi16
+; CHECK: pblendw $134, %xmm1, %xmm0
+; CHECK: ret
+define <8 x i16> @blend_shufflevector_8xi16(<8 x i16> %a, <8 x i16> %b) {
+ %1 = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 15>
+ ret <8 x i16> %1
+}
diff --git a/test/CodeGen/X86/sse41.ll b/test/CodeGen/X86/sse41.ll
index a3c6201..6726a3e 100644
--- a/test/CodeGen/X86/sse41.ll
+++ b/test/CodeGen/X86/sse41.ll
@@ -692,3 +692,25 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
%13 = fadd <4 x float> %11, %12
ret <4 x float> %13
}
+
+define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) {
+; CHECK-LABEL: insertps_with_undefs:
+; CHECK-NOT: shufps
+; CHECK: insertps $32, %xmm0
+; CHECK: ret
+ %1 = load float* %b, align 4
+ %2 = insertelement <4 x float> undef, float %1, i32 0
+ %result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 4, i32 undef, i32 0, i32 7>
+ ret <4 x float> %result
+}
+
+; Test for a bug in X86ISelLowering.cpp:getINSERTPS where we were using
+; the destination index to change the load, instead of the source index.
+define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) {
+; CHECK-LABEL: pr20087:
+; CHECK: insertps $48
+; CHECK: ret
+ %load = load <4 x float> *%ptr
+ %ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2>
+ ret <4 x float> %ret
+}
diff --git a/test/CodeGen/X86/stackmap-fast-isel.ll b/test/CodeGen/X86/stackmap-fast-isel.ll
new file mode 100644
index 0000000..0b7e6db
--- /dev/null
+++ b/test/CodeGen/X86/stackmap-fast-isel.ll
@@ -0,0 +1,165 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim -fast-isel -fast-isel-abort | FileCheck %s
+
+; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
+; CHECK-NEXT: __LLVM_StackMaps:
+; Header
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 0
+; Num Functions
+; CHECK-NEXT: .long 4
+; Num LargeConstants
+; CHECK-NEXT: .long 3
+; Num Callsites
+; CHECK-NEXT: .long 7
+
+; Functions and stack size
+; CHECK-NEXT: .quad _constantargs
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _liveConstant
+; CHECK-NEXT: .quad 8
+; CHECK-NEXT: .quad _directFrameIdx
+; CHECK-NEXT: .quad 40
+; CHECK-NEXT: .quad _longid
+; CHECK-NEXT: .quad 8
+
+; Large Constants
+; CHECK-NEXT: .quad 2147483648
+; CHECK-NEXT: .quad 4294967295
+; CHECK-NEXT: .quad 4294967296
+
+; Callsites
+; Constant arguments
+;
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .long L{{.*}}-_constantargs
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 12
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 65536
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 2000000000
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 2147483647
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+; LargeConstant at index 0
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+; LargeConstant at index 1
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 1
+; LargeConstant at index 2
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 2
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long -1
+
+define void @constantargs() {
+entry:
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 1, i32 15, i16 65535, i16 -1, i32 65536, i32 2000000000, i32 2147483647, i32 -1, i32 4294967295, i32 4294967296, i64 2147483648, i64 4294967295, i64 4294967296, i64 -1)
+ ret void
+}
+
+; Map a constant value.
+;
+; CHECK-LABEL: .long L{{.*}}-_liveConstant
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 33
+
+define void @liveConstant() {
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 15, i32 5, i32 33)
+ ret void
+}
+
+; Directly map an alloca's address.
+;
+; Callsite 16
+; CHECK-LABEL: .long L{{.*}}-_directFrameIdx
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Direct RBP - ofs
+; CHECK-NEXT: .byte 2
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 6
+; CHECK-NEXT: .long
+
+define void @directFrameIdx() {
+entry:
+ %metadata1 = alloca i64, i32 3, align 8
+ store i64 11, i64* %metadata1
+ store i64 12, i64* %metadata1
+ store i64 13, i64* %metadata1
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 16, i32 0, i64* %metadata1)
+ ret void
+}
+
+; Test a 64-bit ID.
+;
+; CHECK: .quad 4294967295
+; CHECK-LABEL: .long L{{.*}}-_longid
+; CHECK: .quad 4294967296
+; CHECK-LABEL: .long L{{.*}}-_longid
+; CHECK: .quad 9223372036854775807
+; CHECK-LABEL: .long L{{.*}}-_longid
+; CHECK: .quad -1
+; CHECK-LABEL: .long L{{.*}}-_longid
+define void @longid() {
+entry:
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4294967295, i32 0)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4294967296, i32 0)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 9223372036854775807, i32 0)
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 -1, i32 0)
+ ret void
+}
+
+declare void @llvm.experimental.stackmap(i64, i32, ...)
diff --git a/test/CodeGen/X86/stackmap-liveness.ll b/test/CodeGen/X86/stackmap-liveness.ll
index 9ce5254..897595d 100644
--- a/test/CodeGen/X86/stackmap-liveness.ll
+++ b/test/CodeGen/X86/stackmap-liveness.ll
@@ -1,6 +1,5 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim -enable-stackmap-liveness| FileCheck -check-prefix=STACK %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim -enable-patchpoint-liveness| FileCheck -check-prefix=PATCH %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim -enable-patchpoint-liveness=false | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -disable-fp-elim | FileCheck -check-prefix=PATCH %s
;
; Note: Print verbose stackmaps using -debug-only=stackmaps.
@@ -37,36 +36,21 @@ entry:
; Align
; CHECK-NEXT: .align 3
-; StackMap 1 (stackmap liveness information enabled)
-; STACK-LABEL: .long L{{.*}}-_stackmap_liveness
-; STACK-NEXT: .short 0
-; STACK-NEXT: .short 0
-; Padding
-; STACK-NEXT: .short 0
-; Num LiveOut Entries: 2
-; STACK-NEXT: .short 2
-; LiveOut Entry 1: %RSP (8 bytes)
-; STACK-NEXT: .short 7
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 8
-; LiveOut Entry 2: %YMM2 (16 bytes) --> %XMM2
-; STACK-NEXT: .short 19
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 16
-; Align
-; STACK-NEXT: .align 3
-
; StackMap 1 (patchpoint liveness information enabled)
; PATCH-LABEL: .long L{{.*}}-_stackmap_liveness
; PATCH-NEXT: .short 0
; PATCH-NEXT: .short 0
; Padding
; PATCH-NEXT: .short 0
-; Num LiveOut Entries: 0
-; PATCH-NEXT: .short 0
+; Num LiveOut Entries: 1
+; PATCH-NEXT: .short 1
+; LiveOut Entry 1: %YMM2 (16 bytes) --> %XMM2
+; PATCH-NEXT: .short 19
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 16
; Align
; PATCH-NEXT: .align 3
- call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 1, i32 5)
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 1, i32 12, i8* null, i32 0)
%a2 = call i64 asm sideeffect "", "={r8}"() nounwind
%a3 = call i8 asm sideeffect "", "={ah}"() nounwind
%a4 = call <4 x double> asm sideeffect "", "={ymm0}"() nounwind
@@ -83,52 +67,37 @@ entry:
; Align
; CHECK-NEXT: .align 3
-; StackMap 2 (stackmap liveness information enabled)
-; STACK-LABEL: .long L{{.*}}-_stackmap_liveness
-; STACK-NEXT: .short 0
-; STACK-NEXT: .short 0
-; Padding
-; STACK-NEXT: .short 0
-; Num LiveOut Entries: 6
-; STACK-NEXT: .short 6
-; LiveOut Entry 1: %RAX (1 bytes) --> %AL or %AH
-; STACK-NEXT: .short 0
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 1
-; LiveOut Entry 2: %RSP (8 bytes)
-; STACK-NEXT: .short 7
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 8
-; LiveOut Entry 3: %R8 (8 bytes)
-; STACK-NEXT: .short 8
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 8
-; LiveOut Entry 4: %YMM0 (32 bytes)
-; STACK-NEXT: .short 17
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 32
-; LiveOut Entry 5: %YMM1 (32 bytes)
-; STACK-NEXT: .short 18
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 32
-; LiveOut Entry 6: %YMM2 (16 bytes) --> %XMM2
-; STACK-NEXT: .short 19
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 16
-; Align
-; STACK-NEXT: .align 3
-
; StackMap 2 (patchpoint liveness information enabled)
; PATCH-LABEL: .long L{{.*}}-_stackmap_liveness
; PATCH-NEXT: .short 0
; PATCH-NEXT: .short 0
; Padding
; PATCH-NEXT: .short 0
-; Num LiveOut Entries: 0
+; Num LiveOut Entries: 5
+; PATCH-NEXT: .short 5
+; LiveOut Entry 1: %RAX (1 bytes) --> %AL or %AH
; PATCH-NEXT: .short 0
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 1
+; LiveOut Entry 2: %R8 (8 bytes)
+; PATCH-NEXT: .short 8
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 8
+; LiveOut Entry 3: %YMM0 (32 bytes)
+; PATCH-NEXT: .short 17
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 32
+; LiveOut Entry 4: %YMM1 (32 bytes)
+; PATCH-NEXT: .short 18
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 32
+; LiveOut Entry 5: %YMM2 (16 bytes) --> %XMM2
+; PATCH-NEXT: .short 19
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 16
; Align
; PATCH-NEXT: .align 3
- call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 2, i32 5)
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 2, i32 12, i8* null, i32 0)
call void asm sideeffect "", "{r8},{ah},{ymm0},{ymm1}"(i64 %a2, i8 %a3, <4 x double> %a4, <4 x double> %a5) nounwind
; StackMap 3 (no liveness information available)
@@ -142,36 +111,25 @@ entry:
; Align
; CHECK-NEXT: .align 3
-; StackMap 3 (stackmap liveness information enabled)
-; STACK-LABEL: .long L{{.*}}-_stackmap_liveness
-; STACK-NEXT: .short 0
-; STACK-NEXT: .short 0
-; Padding
-; STACK-NEXT: .short 0
-; Num LiveOut Entries: 2
-; STACK-NEXT: .short 2
-; LiveOut Entry 1: %RSP (8 bytes)
-; STACK-NEXT: .short 7
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 8
-; LiveOut Entry 2: %YMM2 (16 bytes) --> %XMM2
-; STACK-NEXT: .short 19
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 16
-; Align
-; STACK-NEXT: .align 3
-
; StackMap 3 (patchpoint liveness information enabled)
; PATCH-LABEL: .long L{{.*}}-_stackmap_liveness
; PATCH-NEXT: .short 0
; PATCH-NEXT: .short 0
; Padding
; PATCH-NEXT: .short 0
-; Num LiveOut Entries: 0
-; PATCH-NEXT: .short 0
+; Num LiveOut Entries: 2
+; PATCH-NEXT: .short 2
+; LiveOut Entry 1: %RSP (8 bytes)
+; PATCH-NEXT: .short 7
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 8
+; LiveOut Entry 2: %YMM2 (16 bytes) --> %XMM2
+; PATCH-NEXT: .short 19
+; PATCH-NEXT: .byte 0
+; PATCH-NEXT: .byte 16
; Align
; PATCH-NEXT: .align 3
- call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 3, i32 5)
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 3, i32 12, i8* null, i32 0)
call void asm sideeffect "", "{xmm2}"(<2 x double> %a1) nounwind
ret void
}
@@ -179,33 +137,6 @@ entry:
define void @mixed_liveness() {
entry:
%a1 = call <2 x double> asm sideeffect "", "={xmm2}"() nounwind
-; StackMap 4 (stackmap liveness information enabled)
-; STACK-LABEL: .long L{{.*}}-_mixed_liveness
-; STACK-NEXT: .short 0
-; STACK-NEXT: .short 0
-; Padding
-; STACK-NEXT: .short 0
-; Num LiveOut Entries: 1
-; STACK-NEXT: .short 1
-; LiveOut Entry 1: %YMM2 (16 bytes) --> %XMM2
-; STACK-NEXT: .short 19
-; STACK-NEXT: .byte 0
-; STACK-NEXT: .byte 16
-; Align
-; STACK-NEXT: .align 3
-
-
-; StackMap 5 (stackmap liveness information enabled)
-; STACK-LABEL: .long L{{.*}}-_mixed_liveness
-; STACK-NEXT: .short 0
-; STACK-NEXT: .short 0
-; Padding
-; STACK-NEXT: .short 0
-; Num LiveOut Entries: 0
-; STACK-NEXT: .short 0
-; Align
-; STACK-NEXT: .align 3
-
; StackMap 4 (patchpoint liveness information enabled)
; PATCH-LABEL: .long L{{.*}}-_mixed_liveness
; PATCH-NEXT: .short 0
diff --git a/test/CodeGen/X86/swizzle-2.ll b/test/CodeGen/X86/swizzle-2.ll
new file mode 100644
index 0000000..4b1f903
--- /dev/null
+++ b/test/CodeGen/X86/swizzle-2.ll
@@ -0,0 +1,515 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+
+; Test that we correctly fold a shuffle that performs a swizzle of another
+; shuffle node according to the rule
+; shuffle (shuffle (x, undef, M0), undef, M1) -> shuffle(x, undef, M2)
+;
+; We only do this if the resulting mask is legal to avoid introducing an
+; illegal shuffle that is expanded into a sub-optimal sequence of instructions
+; during lowering stage.
+
+
+define <4 x i32> @swizzle_1(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_1
+; Mask: [1,0,3,2]
+; CHECK: pshufd $-79
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_2(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_2
+; Mask: [2,1,3,0]
+; CHECK: pshufd $54
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_3(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_3
+; Mask: [1,0,3,2]
+; CHECK: pshufd $-79
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_4(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_4
+; Mask: [3,1,0,2]
+; CHECK: pshufd $-121
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_5(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_5
+; Mask: [2,3,0,1]
+; CHECK: pshufd $78
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_6(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_6
+; Mask: [2,0,1,3]
+; CHECK: pshufd $-46
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_7(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_7
+; Mask: [0,2,3,1]
+; CHECK: pshufd $120
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_8(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_8
+; Mask: [1,3,2,0]
+; CHECK: pshufd $45
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_9(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_9
+; Mask: [2,3,0,1]
+; CHECK: pshufd $78
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_10(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_10
+; Mask: [1,2,0,3]
+; CHECK: pshufd $-55
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_11(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_11
+; Mask: [3,2,1,0]
+; CHECK: pshufd $27
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_12(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_12
+; Mask: [0,3,1,2]
+; CHECK: pshufd $-100
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_13(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_13
+; Mask: [3,2,1,0]
+; CHECK: pshufd $27
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @swizzle_14(<4 x i32> %v) {
+ %1 = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ ret <4 x i32> %2
+}
+; CHECK-LABEL: swizzle_14
+; Mask: [3,0,2,1]
+; CHECK: pshufd $99
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_15(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 0, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_15
+; Mask: [1,0,3,2]
+; CHECK: pshufd $-79
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_16(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 1, i32 0, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_16
+; Mask: [2,1,3,0]
+; CHECK: pshufd $54
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_17(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_17
+; Mask: [1,0,3,2]
+; CHECK: pshufd $-79
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_18(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 2, i32 1, i32 3, i32 0>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_18
+; Mask: [3,1,0,2]
+; CHECK: pshufd $-121
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_19(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_19
+; Mask: [2,3,0,1]
+; CHECK: pshufd $78
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_20(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_20
+; Mask: [2,0,1,3]
+; CHECK: pshufd $-46
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_21(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_21
+; Mask: [0,2,3,1]
+; CHECK: pshufd $120
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_22(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_22
+; Mask: [1,3,2,0]
+; CHECK: pshufd $45
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_23(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_23
+; Mask: [2,3,0,1]
+; CHECK: pshufd $78
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_24(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 1, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_24
+; Mask: [1,2,0,3]
+; CHECK: pshufd $-55
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_25(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_25
+; Mask: [3,2,1,0]
+; CHECK: pshufd $27
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_26(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_26
+; Mask: [0,3,1,2]
+; CHECK: pshufd $-100
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_27(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_27
+; Mask: [3,2,1,0]
+; CHECK: pshufd $27
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_28(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_28
+; Mask: [3,0,2,1]
+; CHECK: pshufd $99
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+
+define <4 x float> @swizzle_29(<4 x float> %v) {
+ %1 = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 1, i32 2, i32 0>
+ %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
+ ret <4 x float> %2
+}
+; CHECK-LABEL: swizzle_29
+; Mask: [1,3,2,0]
+; CHECK: pshufd $45
+; CHECK-NOT: pshufd
+; CHECK-NEXT: ret
+
+; Make sure that we combine the shuffles from each function below into a single
+; legal shuffle (either pshuflw or pshufb depending on the masks).
+
+define <8 x i16> @swizzle_30(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 7, i32 5, i32 6, i32 4>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_30
+; Mask: [1,3,2,0,5,7,6,4]
+; CHECK: pshuflw $45
+; CHECK-NOT: pshufb
+; CHECK-NEXT: ret
+
+
+define <8 x i16> @swizzle_31(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 3, i32 0, i32 2, i32 1, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 3, i32 0, i32 2, i32 1, i32 7, i32 5, i32 6, i32 4>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_31
+; Mask: [1,3,2,0,4,5,6,7]
+; CHECK: pshuflw $45
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_32(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 7, i32 5, i32 6, i32 4>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_32
+; Mask: [2,3,0,1,4,5,6,7] --> equivalent to pshufd mask [1,0,2,3]
+; CHECK: pshufd $-31
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+define <8 x i16> @swizzle_33(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 4, i32 6, i32 5, i32 7, i32 2, i32 3, i32 1, i32 0>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 6, i32 5, i32 7, i32 2, i32 3, i32 1, i32 0>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_33
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_34(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 4, i32 7, i32 6, i32 5, i32 1, i32 2, i32 0, i32 3>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 7, i32 6, i32 5, i32 1, i32 2, i32 0, i32 3>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_34
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_35(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 1, i32 3, i32 0, i32 2>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 1, i32 3, i32 0, i32 2>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_35
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_36(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 4, i32 6, i32 7, i32 5, i32 0, i32 1, i32 3, i32 2>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 6, i32 7, i32 5, i32 0, i32 1, i32 3, i32 2>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_36
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_37(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 7, i32 4, i32 6, i32 5>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_37
+; Mask: [0,1,2,3,4,7,6,5]
+; CHECK: pshufhw $108
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_38(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 5, i32 6, i32 4, i32 7, i32 0, i32 2, i32 1, i32 3>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 5, i32 6, i32 4, i32 7, i32 0, i32 2, i32 1, i32 3>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_38
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_39(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 5, i32 4, i32 6, i32 7, i32 3, i32 2, i32 1, i32 0>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 5, i32 4, i32 6, i32 7, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_39
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_40(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 6, i32 4, i32 7, i32 5, i32 1, i32 0, i32 3, i32 2>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 6, i32 4, i32 7, i32 5, i32 1, i32 0, i32 3, i32 2>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_40
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_41(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 6, i32 7, i32 5, i32 4, i32 0, i32 1, i32 3, i32 2>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 6, i32 7, i32 5, i32 4, i32 0, i32 1, i32 3, i32 2>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_41
+; CHECK: pshufb
+; CHECK-NOT: pshufb
+; CHECK-NOT: shufpd
+; CHECK: ret
+
+
+define <8 x i16> @swizzle_42(<8 x i16> %v) {
+ %1 = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 7, i32 6, i32 4, i32 5>
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 7, i32 6, i32 4, i32 5>
+ ret <8 x i16> %2
+}
+; CHECK-LABEL: swizzle_42
+; Mask: [0,1,2,3,5,4,7,6]
+; CHECK: pshufhw $-79
+; CHECK-NOT: pshufb
+; CHECK: ret
+
+
diff --git a/test/CodeGen/X86/swizzle-avx2.ll b/test/CodeGen/X86/swizzle-avx2.ll
new file mode 100644
index 0000000..29dfa6c
--- /dev/null
+++ b/test/CodeGen/X86/swizzle-avx2.ll
@@ -0,0 +1,91 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 | FileCheck %s
+
+; Test that we correctly fold a shuffle that performs a swizzle of another
+; shuffle node according to the rule
+; shuffle (shuffle (x, undef, M0), undef, M1) -> shuffle(x, undef, M2)
+;
+; We only do this if the resulting mask is legal to avoid introducing an
+; illegal shuffle that is expanded into a sub-optimal sequence of instructions
+; during lowering stage.
+
+; Check that we produce a single vector permute / shuffle in all cases.
+
+define <8 x i32> @swizzle_1(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 7, i32 5, i32 6, i32 4>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 7, i32 5, i32 6, i32 4>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_1
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_2(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_2
+; CHECK: vpshufd $78
+; CHECK-NOT: vpermd
+; CHECK-NOT: vpshufd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_3(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 2, i32 3, i32 0, i32 1>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 2, i32 3, i32 0, i32 1>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_3
+; CHECK: vpshufd $78
+; CHECK-NOT: vpermd
+; CHECK-NOT: vpshufd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_4(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 4, i32 7, i32 5, i32 6, i32 3, i32 2, i32 0, i32 1>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 4, i32 7, i32 5, i32 6, i32 3, i32 2, i32 0, i32 1>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_4
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_5(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 0, i32 2, i32 1, i32 3>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 0, i32 2, i32 1, i32 3>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_5
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_6(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 2, i32 1, i32 3, i32 0, i32 4, i32 7, i32 6, i32 5>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 2, i32 1, i32 3, i32 0, i32 4, i32 7, i32 6, i32 5>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_6
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
+define <8 x i32> @swizzle_7(<8 x i32> %v) {
+ %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 0, i32 3, i32 1, i32 2, i32 5, i32 4, i32 6, i32 7>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 0, i32 3, i32 1, i32 2, i32 5, i32 4, i32 6, i32 7>
+ ret <8 x i32> %2
+}
+; CHECK-LABEL: swizzle_7
+; CHECK: vpermd
+; CHECK-NOT: vpermd
+; CHECK: ret
+
+
diff --git a/test/CodeGen/X86/testb-je-fusion.ll b/test/CodeGen/X86/testb-je-fusion.ll
new file mode 100644
index 0000000..9e946ae
--- /dev/null
+++ b/test/CodeGen/X86/testb-je-fusion.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s
+
+; testb should be scheduled right before je to enable macro-fusion.
+
+; CHECK: testb $2, %{{[abcd]}}h
+; CHECK-NEXT: je
+
+define i32 @check_flag(i32 %flags, ...) nounwind {
+entry:
+ %and = and i32 %flags, 512
+ %tobool = icmp eq i32 %and, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ br label %if.end
+
+if.end:
+ %hasflag = phi i32 [ 1, %if.then ], [ 0, %entry ]
+ ret i32 %hasflag
+}
diff --git a/test/CodeGen/X86/vec_cast2.ll b/test/CodeGen/X86/vec_cast2.ll
index 5f6e7a8..1a6c05d 100644
--- a/test/CodeGen/X86/vec_cast2.ll
+++ b/test/CodeGen/X86/vec_cast2.ll
@@ -1,8 +1,20 @@
; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=corei7-avx -mattr=+avx -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-WIDE
;CHECK-LABEL: foo1_8:
;CHECK: vcvtdq2ps
;CHECK: ret
+;
+;CHECK-WIDE-LABEL: foo1_8:
+;CHECK-WIDE: vpmovzxbd %xmm0, %xmm1
+;CHECK-WIDE-NEXT: vpslld $24, %xmm1, %xmm1
+;CHECK-WIDE-NEXT: vpsrad $24, %xmm1, %xmm1
+;CHECK-WIDE-NEXT: vpshufb {{.*}}, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vpslld $24, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vpsrad $24, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+;CHECK-WIDE-NEXT: vcvtdq2ps %ymm0, %ymm0
+;CHECK-WIDE-NEXT: ret
define <8 x float> @foo1_8(<8 x i8> %src) {
%res = sitofp <8 x i8> %src to <8 x float>
ret <8 x float> %res
@@ -11,6 +23,13 @@ define <8 x float> @foo1_8(<8 x i8> %src) {
;CHECK-LABEL: foo1_4:
;CHECK: vcvtdq2ps
;CHECK: ret
+;
+;CHECK-WIDE-LABEL: foo1_4:
+;CHECK-WIDE: vpmovzxbd %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vpslld $24, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vpsrad $24, %xmm0, %xmm0
+;CHECK-WIDE-NEXT: vcvtdq2ps %xmm0, %xmm0
+;CHECK-WIDE-NEXT: ret
define <4 x float> @foo1_4(<4 x i8> %src) {
%res = sitofp <4 x i8> %src to <4 x float>
ret <4 x float> %res
@@ -19,6 +38,10 @@ define <4 x float> @foo1_4(<4 x i8> %src) {
;CHECK-LABEL: foo2_8:
;CHECK: vcvtdq2ps
;CHECK: ret
+;
+;CHECK-WIDE-LABEL: foo2_8:
+;CHECK-WIDE: vcvtdq2ps %ymm{{.*}}, %ymm{{.*}}
+;CHECK-WIDE: ret
define <8 x float> @foo2_8(<8 x i8> %src) {
%res = uitofp <8 x i8> %src to <8 x float>
ret <8 x float> %res
@@ -27,6 +50,10 @@ define <8 x float> @foo2_8(<8 x i8> %src) {
;CHECK-LABEL: foo2_4:
;CHECK: vcvtdq2ps
;CHECK: ret
+;
+;CHECK-WIDE-LABEL: foo2_4:
+;CHECK-WIDE: vcvtdq2ps %xmm{{.*}}, %xmm{{.*}}
+;CHECK-WIDE: ret
define <4 x float> @foo2_4(<4 x i8> %src) {
%res = uitofp <4 x i8> %src to <4 x float>
ret <4 x float> %res
diff --git a/test/CodeGen/X86/vec_splat.ll b/test/CodeGen/X86/vec_splat.ll
index a02e383..28f2a90 100644
--- a/test/CodeGen/X86/vec_splat.ll
+++ b/test/CodeGen/X86/vec_splat.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -march=x86 -mcpu=pentium4 -mattr=+sse2 | FileCheck %s -check-prefix=SSE2
; RUN: llc < %s -march=x86 -mcpu=pentium4 -mattr=+sse3 | FileCheck %s -check-prefix=SSE3
+; RUN: llc < %s -march=x86-64 -mattr=+avx | FileCheck %s -check-prefix=AVX
define void @test_v4sf(<4 x float>* %P, <4 x float>* %Q, float %X) nounwind {
%tmp = insertelement <4 x float> zeroinitializer, float %X, i32 0 ; <<4 x float>> [#uses=1]
@@ -37,6 +38,23 @@ define void @test_v2sd(<2 x double>* %P, <2 x double>* %Q, double %X) nounwind {
define <4 x float> @load_extract_splat(<4 x float>* nocapture readonly %ptr, i64 %i, i64 %j) nounwind {
%1 = getelementptr inbounds <4 x float>* %ptr, i64 %i
%2 = load <4 x float>* %1, align 16
+ %3 = trunc i64 %j to i32
+ %4 = extractelement <4 x float> %2, i32 %3
+ %5 = insertelement <4 x float> undef, float %4, i32 0
+ %6 = insertelement <4 x float> %5, float %4, i32 1
+ %7 = insertelement <4 x float> %6, float %4, i32 2
+ %8 = insertelement <4 x float> %7, float %4, i32 3
+ ret <4 x float> %8
+
+; AVX-LABEL: load_extract_splat
+; AVX-NOT: rsp
+; AVX: vbroadcastss
+}
+
+; Fold extract of a load into the load's address computation. This avoids spilling to the stack.
+define <4 x float> @load_extract_splat1(<4 x float>* nocapture readonly %ptr, i64 %i, i64 %j) nounwind {
+ %1 = getelementptr inbounds <4 x float>* %ptr, i64 %i
+ %2 = load <4 x float>* %1, align 16
%3 = extractelement <4 x float> %2, i64 %j
%4 = insertelement <4 x float> undef, float %3, i32 0
%5 = insertelement <4 x float> %4, float %3, i32 1
@@ -44,7 +62,7 @@ define <4 x float> @load_extract_splat(<4 x float>* nocapture readonly %ptr, i64
%7 = insertelement <4 x float> %6, float %3, i32 3
ret <4 x float> %7
-; AVX-LABEL: load_extract_splat
+; AVX-LABEL: load_extract_splat1
; AVX-NOT: movs
; AVX: vbroadcastss
}
diff --git a/test/CodeGen/X86/vec_split.ll b/test/CodeGen/X86/vec_split.ll
index f9e7c20..bc2c663 100644
--- a/test/CodeGen/X86/vec_split.ll
+++ b/test/CodeGen/X86/vec_split.ll
@@ -40,3 +40,36 @@ define <32 x i16> @split32(<32 x i16> %a, <32 x i16> %b, <32 x i8> %__mask) {
%2 = select <32 x i1> %1, <32 x i16> %a, <32 x i16> %b
ret <32 x i16> %2
}
+
+; PR19492
+define i128 @split128(<2 x i128> %a, <2 x i128> %b) {
+; SSE4-LABEL: split128:
+; SSE4: addq
+; SSE4: adcq
+; SSE4: addq
+; SSE4: adcq
+; SSE4: addq
+; SSE4: adcq
+; SSE4: ret
+; AVX1-LABEL: split128:
+; AVX1: addq
+; AVX1: adcq
+; AVX1: addq
+; AVX1: adcq
+; AVX1: addq
+; AVX1: adcq
+; AVX1: ret
+; AVX2-LABEL: split128:
+; AVX2: addq
+; AVX2: adcq
+; AVX2: addq
+; AVX2: adcq
+; AVX2: addq
+; AVX2: adcq
+; AVX2: ret
+ %add = add nsw <2 x i128> %a, %b
+ %rdx.shuf = shufflevector <2 x i128> %add, <2 x i128> undef, <2 x i32> <i32 undef, i32 0>
+ %bin.rdx = add <2 x i128> %add, %rdx.shuf
+ %e = extractelement <2 x i128> %bin.rdx, i32 1
+ ret i128 %e
+}
diff --git a/test/CodeGen/X86/vector-gep.ll b/test/CodeGen/X86/vector-gep.ll
index 9c68f44..3f7ee3a 100644
--- a/test/CodeGen/X86/vector-gep.ll
+++ b/test/CodeGen/X86/vector-gep.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mcpu=corei7-avx | FileCheck %s
+; RUN: llc < %s -mtriple=i686-linux -mcpu=corei7-avx | FileCheck %s
; RUN: opt -instsimplify -disable-output < %s
;CHECK-LABEL: AGEP0:
diff --git a/test/CodeGen/X86/vector-idiv.ll b/test/CodeGen/X86/vector-idiv.ll
index 4c30184..b6d43e9 100644
--- a/test/CodeGen/X86/vector-idiv.ll
+++ b/test/CodeGen/X86/vector-idiv.ll
@@ -8,7 +8,7 @@ define <4 x i32> @test1(<4 x i32> %a) {
; SSE41-LABEL: test1:
; SSE41: pmuludq
-; SSE41: pshufd $57
+; SSE41: pshufd $49
; SSE41: pmuludq
; SSE41: shufps $-35
; SSE41: psubd
@@ -18,7 +18,7 @@ define <4 x i32> @test1(<4 x i32> %a) {
; AVX-LABEL: test1:
; AVX: vpmuludq
-; AVX: vpshufd $57
+; AVX: vpshufd $49
; AVX: vpmuludq
; AVX: vshufps $-35
; AVX: vpsubd
@@ -32,11 +32,11 @@ define <8 x i32> @test2(<8 x i32> %a) {
ret <8 x i32> %div
; AVX-LABEL: test2:
-; AVX: vpermd
+; AVX: vpbroadcastd
+; AVX: vpalignr $4
; AVX: vpmuludq
-; AVX: vshufps $-35
; AVX: vpmuludq
-; AVX: vshufps $-35
+; AVX: vpblendd $170
; AVX: vpsubd
; AVX: vpsrld $1
; AVX: vpadd
@@ -107,6 +107,12 @@ define <16 x i16> @test6(<16 x i16> %a) {
define <16 x i8> @test7(<16 x i8> %a) {
%div = sdiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
ret <16 x i8> %div
+
+; FIXME: scalarized
+; SSE41-LABEL: test7:
+; SSE41: pext
+; AVX-LABEL: test7:
+; AVX: pext
}
define <4 x i32> @test8(<4 x i32> %a) {
@@ -115,8 +121,8 @@ define <4 x i32> @test8(<4 x i32> %a) {
; SSE41-LABEL: test8:
; SSE41: pmuldq
-; SSE41: pshufd $57
-; SSE41-NOT: pshufd $57
+; SSE41: pshufd $49
+; SSE41-NOT: pshufd $49
; SSE41: pmuldq
; SSE41: shufps $-35
; SSE41: pshufd $-40
@@ -130,8 +136,8 @@ define <4 x i32> @test8(<4 x i32> %a) {
; SSE: pand
; SSE: paddd
; SSE: pmuludq
-; SSE: pshufd $57
-; SSE-NOT: pshufd $57
+; SSE: pshufd $49
+; SSE-NOT: pshufd $49
; SSE: pmuludq
; SSE: shufps $-35
; SSE: pshufd $-40
@@ -143,8 +149,8 @@ define <4 x i32> @test8(<4 x i32> %a) {
; AVX-LABEL: test8:
; AVX: vpmuldq
-; AVX: vpshufd $57
-; AVX-NOT: vpshufd $57
+; AVX: vpshufd $49
+; AVX-NOT: vpshufd $49
; AVX: vpmuldq
; AVX: vshufps $-35
; AVX: vpshufd $-40
@@ -159,12 +165,11 @@ define <8 x i32> @test9(<8 x i32> %a) {
ret <8 x i32> %div
; AVX-LABEL: test9:
+; AVX: vpalignr $4
; AVX: vpbroadcastd
; AVX: vpmuldq
-; AVX: vshufps $-35
; AVX: vpmuldq
-; AVX: vshufps $-35
-; AVX: vpshufd $-40
+; AVX: vpblendd $170
; AVX: vpadd
; AVX: vpsrld $31
; AVX: vpsrad $2
@@ -177,10 +182,10 @@ define <8 x i32> @test10(<8 x i32> %a) {
; AVX-LABEL: test10:
; AVX: vpbroadcastd
+; AVX: vpalignr $4
; AVX: vpmuludq
-; AVX: vshufps $-35
; AVX: vpmuludq
-; AVX: vshufps $-35
+; AVX: vpblendd $170
; AVX: vpsubd
; AVX: vpsrld $1
; AVX: vpadd
@@ -193,12 +198,11 @@ define <8 x i32> @test11(<8 x i32> %a) {
ret <8 x i32> %rem
; AVX-LABEL: test11:
+; AVX: vpalignr $4
; AVX: vpbroadcastd
; AVX: vpmuldq
-; AVX: vshufps $-35
; AVX: vpmuldq
-; AVX: vshufps $-35
-; AVX: vpshufd $-40
+; AVX: vpblendd $170
; AVX: vpadd
; AVX: vpsrld $31
; AVX: vpsrad $2
diff --git a/test/CodeGen/X86/vector-shuffle-128-v16.ll b/test/CodeGen/X86/vector-shuffle-128-v16.ll
new file mode 100644
index 0000000..4da7e42
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-128-v16.ll
@@ -0,0 +1,196 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_01_01_01_01_01_01_01_01(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_00_00_00_00_01_01_01_01_01_01_01_01
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,5,5,5]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,2,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,6,6,6,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: punpcklwd %xmm0, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 2, i32 2, i32 3, i32 3, i32 3, i32 3>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: punpckhwd %xmm0, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 5, i32 5, i32 5, i32 5, i32 6, i32 6, i32 6, i32 6, i32 7, i32 7, i32 7, i32 7>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_00_00_04_04_04_04_08_08_08_08_12_12_12_12
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,2,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,6,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4, i32 8, i32 8, i32 8, i32 8, i32 12, i32 12, i32 12, i32 12>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_00_01_01_02_02_03_03_04_04_05_05_06_06_07_07(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_00_01_01_02_02_03_03_04_04_05_05_06_06_07_07
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_0101010101010101(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_0101010101010101
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23
+; CHECK-SSE2: punpcklbw %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklbw %xmm0, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 16, i32 0, i32 16, i32 1, i32 16, i32 2, i32 16, i32 3, i32 16, i32 4, i32 16, i32 5, i32 16, i32 6, i32 16, i32 7>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_11_10_09_08_15_14_13_12(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_03_02_01_00_07_06_05_04_11_10_09_08_15_14_13_12
+; CHECK-SSE2: pxor %xmm1, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm2
+; CHECK-SSE2-NEXT: punpckhbw %xmm1, %xmm2
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: packuswb %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20
+; CHECK-SSE2: pxor %xmm2, %xmm2
+; CHECK-SSE2-NEXT: punpcklbw %xmm2, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: punpcklbw %xmm2, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: packuswb %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 19, i32 18, i32 17, i32 16, i32 23, i32 22, i32 21, i32 20>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20
+; CHECK-SSE2: pxor %xmm2, %xmm2
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm3
+; CHECK-SSE2-NEXT: punpcklbw %xmm2, %xmm3
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm4
+; CHECK-SSE2-NEXT: punpckhbw %xmm2, %xmm4
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm4 = xmm4[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: shufpd {{.*}} # xmm4 = xmm4[0],xmm3[1]
+; CHECK-SSE2-NEXT: punpckhbw %xmm2, %xmm1
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: punpcklbw %xmm2, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
+; CHECK-SSE2-NEXT: packuswb %xmm4, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 31, i32 30, i32 29, i32 28, i32 11, i32 10, i32 9, i32 8, i32 23, i32 22, i32 21, i32 20>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @zext_to_v8i16_shuffle(<16 x i8> %a) {
+; CHECK-SSE2-LABEL: @zext_to_v8i16_shuffle
+; CHECK-SSE2: pxor %xmm1, %xmm1
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm0
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 1, i32 19, i32 2, i32 21, i32 3, i32 23, i32 4, i32 25, i32 5, i32 27, i32 6, i32 29, i32 7, i32 31>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @zext_to_v4i32_shuffle(<16 x i8> %a) {
+; CHECK-SSE2-LABEL: @zext_to_v4i32_shuffle
+; CHECK-SSE2: pxor %xmm1, %xmm1
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm0
+; CHECK-SSE2-NEXT: punpcklbw %xmm1, %xmm0
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 1, i32 21, i32 22, i32 23, i32 2, i32 25, i32 26, i32 27, i32 3, i32 29, i32 30, i32 31>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @trunc_v4i32_shuffle(<16 x i8> %a) {
+; CHECK-SSE2-LABEL: @trunc_v4i32_shuffle
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pand
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: packuswb %xmm0, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <16 x i8> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-128-v2.ll b/test/CodeGen/X86/vector-shuffle-128-v2.ll
new file mode 100644
index 0000000..78b4ee7
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-128-v2.ll
@@ -0,0 +1,219 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <2 x i64> @shuffle_v2i64_00(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_00
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,1,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_10(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_10
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[2,3,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_11(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_11
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 1>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_22(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_22
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[0,1,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_32(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_32
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[2,3,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_33(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_33
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 3>
+ ret <2 x i64> %shuffle
+}
+
+define <2 x double> @shuffle_v2f64_00(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_00
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 0>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_10(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_10
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 0>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_11(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_11
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 1>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_22(<2 x double> %a, <2 x double> %b) {
+; FIXME: Should these use movapd + shufpd to remove a domain change at the cost
+; of a mov?
+;
+; CHECK-SSE2-LABEL: @shuffle_v2f64_22
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[0,1,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 2, i32 2>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_32(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_32
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[2,3,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 3, i32 2>
+ ret <2 x double> %shuffle
+}
+define <2 x double> @shuffle_v2f64_33(<2 x double> %a, <2 x double> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2f64_33
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 3, i32 3>
+ ret <2 x double> %shuffle
+}
+
+
+define <2 x i64> @shuffle_v2i64_02(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_02
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_02_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_02_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm2[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_03(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_03
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_03_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_03_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm2[1]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 3>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_12(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_12
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1],xmm1[0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_12_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_12_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm2[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_13(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_13
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_13_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_13_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm2[1]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_20(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_20
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm0[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_20_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_20_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[0],xmm1[0]
+; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_21(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_21
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm0[1]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_21_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_21_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[0],xmm1[1]
+; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_30(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_30
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm0[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_30_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_30_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[1],xmm1[0]
+; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 0>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_31(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_31
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm0[1]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
+ ret <2 x i64> %shuffle
+}
+define <2 x i64> @shuffle_v2i64_31_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v2i64_31_copy
+; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[1],xmm1[1]
+; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
+ ret <2 x i64> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-128-v4.ll b/test/CodeGen/X86/vector-shuffle-128-v4.ll
new file mode 100644
index 0000000..7d496fa
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-128-v4.ll
@@ -0,0 +1,170 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <4 x i32> @shuffle_v4i32_0001(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0001
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,0,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0020(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0020
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,0,2,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0300(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0300
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[0,3,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_1000(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_1000
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[1,0,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_2200(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_2200
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[2,2,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_3330(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_3330
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[3,3,3,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_3210(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_3210
+; CHECK-SSE2: pshufd {{.*}} # xmm0 = xmm0[3,2,1,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %shuffle
+}
+
+define <4 x float> @shuffle_v4f32_0001(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_0001
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,0,0,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_0020(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_0020
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,0,2,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_0300(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_0300
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,3,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_1000(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_1000
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[1,0,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_2200(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_2200
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[2,2,0,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_3330(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_3330
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[3,3,3,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
+ ret <4 x float> %shuffle
+}
+define <4 x float> @shuffle_v4f32_3210(<4 x float> %a, <4 x float> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4f32_3210
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[3,2,1,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x float> %shuffle
+}
+
+define <4 x i32> @shuffle_v4i32_0124(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0124
+; CHECK-SSE2: shufps {{.*}} # xmm1 = xmm1[0,0],xmm0[2,0]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,1],xmm1[2,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0142(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0142
+; CHECK-SSE2: shufps {{.*}} # xmm1 = xmm1[0,0],xmm0[2,0]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,1],xmm1[0,2]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 2>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0412(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0412
+; CHECK-SSE2: shufps {{.*}} # xmm1 = xmm1[0,0],xmm0[0,0]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm1 = xmm1[2,0],xmm0[1,2]
+; CHECK-SSE2-NEXT: movaps %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 2>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_4012(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_4012
+; CHECK-SSE2: shufps {{.*}} # xmm1 = xmm1[0,0],xmm0[0,0]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm1 = xmm1[0,2],xmm0[1,2]
+; CHECK-SSE2-NEXT: movaps %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0145(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0145
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_0451(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_0451
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,1],xmm1[0,1]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,2,3,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 5, i32 1>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_4501(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_4501
+; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm0[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
+ ret <4 x i32> %shuffle
+}
+define <4 x i32> @shuffle_v4i32_4015(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v4i32_4015
+; CHECK-SSE2: shufps {{.*}} # xmm0 = xmm0[0,1],xmm1[0,1]
+; CHECK-SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[2,0,1,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 5>
+ ret <4 x i32> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-128-v8.ll b/test/CodeGen/X86/vector-shuffle-128-v8.ll
new file mode 100644
index 0000000..5d1922a
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-128-v8.ll
@@ -0,0 +1,493 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <8 x i16> @shuffle_v8i16_01012323(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_01012323
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,0,1,1]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_67452301(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_67452301
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,2,1,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 2, i32 3, i32 0, i32 1>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_456789AB(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_456789AB
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1],xmm1[0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_00000000(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_00000000
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_00004444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_00004444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_31206745(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_31206745
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,1,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,3,2]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 6, i32 7, i32 4, i32 5>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_44440000(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_44440000
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,1,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_75643120(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_75643120
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,3,0,1]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,1,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,5,6,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 7, i32 5, i32 6, i32 4, i32 3, i32 1, i32 2, i32 0>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_10545410(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_10545410
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,0,3,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,4,7,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 0, i32 5, i32 4, i32 5, i32 4, i32 1, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_54105410(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_54105410
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,4,7,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 5, i32 4, i32 1, i32 0, i32 5, i32 4, i32 1, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_54101054(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_54101054
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 5, i32 4, i32 1, i32 0, i32 1, i32 0, i32 5, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_04400440(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_04400440
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,6,4,4,6]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 0, i32 4, i32 4, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_40044004(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_40044004
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[2,0,0,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 0, i32 0, i32 4, i32 4, i32 0, i32 0, i32 4>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_26405173(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_26405173
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,5,4,6]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,2,1]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,3,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,6,4,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 6, i32 4, i32 0, i32 5, i32 1, i32 7, i32 3>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_20645173(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_20645173
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,5,4,6]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,2,1]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,0,3,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,5,6,4,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 0, i32 6, i32 4, i32 5, i32 1, i32 7, i32 3>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_26401375(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_26401375
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,5,4,6]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,1,2]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,3,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 6, i32 4, i32 0, i32 1, i32 3, i32 7, i32 5>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_00444444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_00444444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,2,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_44004444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_44004444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[2,2,0,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_04404444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_04404444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_04400000(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_04400000
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,0,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_04404567(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_04404567
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0X444444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0X444444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,1,2,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 undef, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_44X04444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_44X04444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[2,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 undef, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_X4404444(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_X4404444
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,2,0,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 4, i32 4, i32 0, i32 4, i32 4, i32 4, i32 4>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0127XXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0127XXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,1,3]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,7,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_XXXX4563(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_XXXX4563
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,2,0]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 4, i32 5, i32 6, i32 3>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_4563XXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_4563XXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,0,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_01274563(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_01274563
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,1,3]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,6,5,4,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,1,2]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 7, i32 4, i32 5, i32 6, i32 3>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_45630127(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_45630127
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,1,2,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,0,1,3]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,6,7,5,4]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 3, i32 0, i32 1, i32 2, i32 7>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_08192a3b(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_08192a3b
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0c1d2e3f(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0c1d2e3f
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 12, i32 1, i32 13, i32 2, i32 14, i32 3, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_4c5d6e7f(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_4c5d6e7f
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_48596a7b(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_48596a7b
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 8, i32 5, i32 9, i32 6, i32 10, i32 7, i32 11>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_08196e7f(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_08196e7f
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[0,3,2,3]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0c1d6879(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0c1d6879
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,0,2,3]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 12, i32 1, i32 13, i32 6, i32 8, i32 7, i32 9>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_109832ba(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_109832ba
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm0[2,0,3,1,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[2,0,3,1,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklqdq %xmm0, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 0, i32 9, i32 8, i32 3, i32 2, i32 11, i32 10>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_8091a2b3(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_8091a2b3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: punpcklwd %xmm0, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 8, i32 0, i32 9, i32 1, i32 10, i32 2, i32 11, i32 3>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_c4d5e6f7(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_c4d5e6f7
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm2 = xmm0[2,3,2,3]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 12, i32 4, i32 13, i32 5, i32 14, i32 6, i32 15, i32 7>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0213cedf(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_0213cedf
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,3,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm1[0,2,1,3,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklqdq %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 1, i32 3, i32 12, i32 14, i32 13, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_032dXXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_032dXXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,1,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,3,2,1,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 3, i32 2, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+define <8 x i16> @shuffle_v8i16_XXXcXXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_XXXcXXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm1[2,1,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,1,2,1,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_012dXXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_012dXXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[2,1,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,1,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,2,0,3,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_XXXXcde3(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_XXXXcde3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,2,1]
+; CHECK-SSE2-NEXT: punpckhwd %xmm0, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm1[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,7,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,2]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 12, i32 13, i32 14, i32 3>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_cde3XXXX(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_cde3XXXX
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,2,1]
+; CHECK-SSE2-NEXT: punpckhwd %xmm0, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm1[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,7,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 12, i32 13, i32 14, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_012dcde3(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-SSE2-LABEL: @shuffle_v8i16_012dcde3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm2 = xmm0[0,1,2,1]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm3 = xmm1[2,1,2,3]
+; CHECK-SSE2-NEXT: punpckhwd %xmm2, %xmm1
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm1 = xmm1[0,1,2,3,4,7,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[0,2,2,3]
+; CHECK-SSE2-NEXT: punpcklwd %xmm3, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[3,1,2,0]
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; CHECK-SSE2-NEXT: pshufd {{.*}} # xmm0 = xmm0[2,1,2,3]
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[1,2,0,3,4,5,6,7]
+; CHECK-SSE2-NEXT: punpcklqdq %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 13, i32 12, i32 13, i32 14, i32 3>
+ ret <8 x i16> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-combining.ll b/test/CodeGen/X86/vector-shuffle-combining.ll
new file mode 100644
index 0000000..e60ecb7
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -0,0 +1,119 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+declare <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32>, i8)
+declare <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16>, i8)
+declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8)
+
+define <4 x i32> @combine_pshufd1(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd1
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
+ %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 27)
+ ret <4 x i32> %c
+}
+
+define <4 x i32> @combine_pshufd2(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd2
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
+ %b.cast = bitcast <4 x i32> %b to <8 x i16>
+ %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b.cast, i8 -28)
+ %c.cast = bitcast <8 x i16> %c to <4 x i32>
+ %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27)
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @combine_pshufd3(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
+ %b.cast = bitcast <4 x i32> %b to <8 x i16>
+ %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b.cast, i8 -28)
+ %c.cast = bitcast <8 x i16> %c to <4 x i32>
+ %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27)
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @combine_pshufd4(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd4
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -31)
+ %b.cast = bitcast <4 x i32> %b to <8 x i16>
+ %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b.cast, i8 27)
+ %c.cast = bitcast <8 x i16> %c to <4 x i32>
+ %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 -31)
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @combine_pshufd5(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd5
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -76)
+ %b.cast = bitcast <4 x i32> %b to <8 x i16>
+ %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b.cast, i8 27)
+ %c.cast = bitcast <8 x i16> %c to <4 x i32>
+ %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 -76)
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @combine_pshufd6(<4 x i32> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufd6
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufd $0
+; CHECK-SSE2-NEXT: retq
+ %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 0)
+ %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 8)
+ ret <4 x i32> %c
+}
+
+define <8 x i16> @combine_pshuflw1(<8 x i16> %a) {
+; CHECK-SSE2-LABEL: @combine_pshuflw1
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
+ %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27)
+ ret <8 x i16> %c
+}
+
+define <8 x i16> @combine_pshuflw2(<8 x i16> %a) {
+; CHECK-SSE2-LABEL: @combine_pshuflw2
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: retq
+ %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
+ %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 -28)
+ %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27)
+ ret <8 x i16> %d
+}
+
+define <8 x i16> @combine_pshuflw3(<8 x i16> %a) {
+; CHECK-SSE2-LABEL: @combine_pshuflw3
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-SSE2-NEXT: retq
+ %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
+ %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 27)
+ %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27)
+ ret <8 x i16> %d
+}
+
+define <8 x i16> @combine_pshufhw1(<8 x i16> %a) {
+; CHECK-SSE2-LABEL: @combine_pshufhw1
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-SSE2-NEXT: retq
+ %b = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %a, i8 27)
+ %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27)
+ %d = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %c, i8 27)
+ ret <8 x i16> %d
+}
+
diff --git a/test/CodeGen/X86/vselect.ll b/test/CodeGen/X86/vselect.ll
index 0cf03fc..42cf06a 100644
--- a/test/CodeGen/X86/vselect.ll
+++ b/test/CodeGen/X86/vselect.ll
@@ -262,3 +262,17 @@ define <2 x i64> @test25(<2 x i64> %a, <2 x i64> %b) {
; CHECK: movsd
; CHECK: ret
+define <4 x float> @select_of_shuffles_0(<2 x float> %a0, <2 x float> %b0, <2 x float> %a1, <2 x float> %b1) {
+; CHECK-LABEL: select_of_shuffles_0
+; CHECK-DAG: movlhps %xmm2, [[REGA:%xmm[0-9]+]]
+; CHECK-DAG: movlhps %xmm3, [[REGB:%xmm[0-9]+]]
+; CHECK: subps [[REGB]], [[REGA]]
+ %1 = shufflevector <2 x float> %a0, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %2 = shufflevector <2 x float> %a1, <2 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
+ %3 = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> %2, <4 x float> %1
+ %4 = shufflevector <2 x float> %b0, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %5 = shufflevector <2 x float> %b1, <2 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
+ %6 = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> %5, <4 x float> %4
+ %7 = fsub <4 x float> %3, %6
+ ret <4 x float> %7
+}
diff --git a/test/CodeGen/X86/widen_cast-4.ll b/test/CodeGen/X86/widen_cast-4.ll
index 1bc06a7..19b84f1 100644
--- a/test/CodeGen/X86/widen_cast-4.ll
+++ b/test/CodeGen/X86/widen_cast-4.ll
@@ -1,8 +1,9 @@
; RUN: llc < %s -march=x86 -mattr=+sse4.2 | FileCheck %s
-; CHECK: psraw
-; CHECK: psraw
+; RUN: llc < %s -march=x86 -mattr=+sse4.2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-WIDE
define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
+; CHECK-LABEL: update:
+; CHECK-WIDE-LABEL: update:
entry:
%dst_i.addr = alloca i64* ; <i64**> [#uses=2]
%src_i.addr = alloca i64* ; <i64**> [#uses=2]
@@ -44,6 +45,26 @@ forbody: ; preds = %forcond
%shr = ashr <8 x i8> %add, < i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2 > ; <<8 x i8>> [#uses=1]
store <8 x i8> %shr, <8 x i8>* %arrayidx10
br label %forinc
+; CHECK: %forbody
+; CHECK: pmovzxbw
+; CHECK-NEXT: paddw
+; CHECK-NEXT: psllw $8
+; CHECK-NEXT: psraw $8
+; CHECK-NEXT: psraw $2
+; CHECK-NEXT: pshufb
+; CHECK-NEXT: movlpd
+;
+; FIXME: We shouldn't require both a movd and an insert.
+; CHECK-WIDE: %forbody
+; CHECK-WIDE: movd
+; CHECK-WIDE-NEXT: pinsrd
+; CHECK-WIDE-NEXT: paddb
+; CHECK-WIDE-NEXT: psrlw $2
+; CHECK-WIDE-NEXT: pand
+; CHECK-WIDE-NEXT: pxor
+; CHECK-WIDE-NEXT: psubb
+; CHECK-WIDE-NEXT: pextrd
+; CHECK-WIDE-NEXT: movd
forinc: ; preds = %forbody
%tmp15 = load i32* %i ; <i32> [#uses=1]
diff --git a/test/CodeGen/X86/widen_cast-6.ll b/test/CodeGen/X86/widen_cast-6.ll
index 7c06ad8..46d8dd7 100644
--- a/test/CodeGen/X86/widen_cast-6.ll
+++ b/test/CodeGen/X86/widen_cast-6.ll
@@ -1,9 +1,13 @@
; RUN: llc < %s -march=x86 -mattr=+sse4.1 | FileCheck %s
-; CHECK: movd
; Test bit convert that requires widening in the operand.
define i32 @return_v2hi() nounwind {
+; CHECK-LABEL: @return_v2hi
+; CHECK: pushl
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popl
+; CHECK-NEXT: ret
entry:
%retval12 = bitcast <2 x i16> zeroinitializer to i32 ; <i32> [#uses=1]
ret i32 %retval12
diff --git a/test/CodeGen/X86/widen_conversions.ll b/test/CodeGen/X86/widen_conversions.ll
new file mode 100644
index 0000000..522ab47
--- /dev/null
+++ b/test/CodeGen/X86/widen_conversions.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -mcpu=x86-64 -x86-experimental-vector-widening-legalization -x86-experimental-vector-shuffle-lowering | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define <4 x i32> @zext_v4i8_to_v4i32(<4 x i8>* %ptr) {
+; CHECK-LABEL: zext_v4i8_to_v4i32:
+;
+; CHECK: movd (%{{.*}}), %[[X:xmm[0-9]+]]
+; CHECK-NEXT: pxor %[[Z:xmm[0-9]+]], %[[Z]]
+; CHECK-NEXT: punpcklbw %[[Z]], %[[X]]
+; CHECK-NEXT: punpcklbw %[[Z]], %[[X]]
+; CHECK-NEXT: ret
+
+ %val = load <4 x i8>* %ptr
+ %ext = zext <4 x i8> %val to <4 x i32>
+ ret <4 x i32> %ext
+}
diff --git a/test/CodeGen/X86/widen_shuffle-1.ll b/test/CodeGen/X86/widen_shuffle-1.ll
index 803402b..a355b75 100644
--- a/test/CodeGen/X86/widen_shuffle-1.ll
+++ b/test/CodeGen/X86/widen_shuffle-1.ll
@@ -33,7 +33,9 @@ entry:
define void @shuf3(<4 x float> %tmp10, <4 x float> %vecinit15, <4 x float>* %dst) nounwind {
entry:
; CHECK-LABEL: shuf3:
-; CHECK: shufps
+; CHECK-NOT: movlhps
+; CHECK-NOT: shufps
+; CHECK: pshufd
%shuffle.i.i.i12 = shufflevector <4 x float> %tmp10, <4 x float> %vecinit15, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
%tmp25.i.i = shufflevector <4 x float> %shuffle.i.i.i12, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
%tmp1.i.i = shufflevector <3 x float> %tmp25.i.i, <3 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
diff --git a/test/CodeGen/X86/win64_eh.ll b/test/CodeGen/X86/win64_eh.ll
new file mode 100644
index 0000000..f1f874e
--- /dev/null
+++ b/test/CodeGen/X86/win64_eh.ll
@@ -0,0 +1,170 @@
+; RUN: llc < %s -O0 -mcpu=corei7 -mtriple=x86_64-pc-win32 | FileCheck %s -check-prefix=WIN64
+; RUN: llc < %s -O0 -mcpu=corei7 -mtriple=x86_64-pc-mingw32 | FileCheck %s -check-prefix=WIN64
+
+; Check function without prolog
+define void @foo0() uwtable {
+entry:
+ ret void
+}
+; WIN64-LABEL: foo0:
+; WIN64: .seh_proc foo0
+; WIN64: .seh_endprologue
+; WIN64: ret
+; WIN64: .seh_endproc
+
+; Checks a small stack allocation
+define void @foo1() uwtable {
+entry:
+ %baz = alloca [2000 x i16], align 2
+ ret void
+}
+; WIN64-LABEL: foo1:
+; WIN64: .seh_proc foo1
+; WIN64: subq $4000, %rsp
+; WIN64: .seh_stackalloc 4000
+; WIN64: .seh_endprologue
+; WIN64: addq $4000, %rsp
+; WIN64: ret
+; WIN64: .seh_endproc
+
+; Checks a stack allocation requiring call to __chkstk/___chkstk_ms
+define void @foo2() uwtable {
+entry:
+ %baz = alloca [4000 x i16], align 2
+ ret void
+}
+; WIN64-LABEL: foo2:
+; WIN64: .seh_proc foo2
+; WIN64: movabsq $8000, %rax
+; WIN64: callq {{__chkstk|___chkstk_ms}}
+; WIN64: subq %rax, %rsp
+; WIN64: .seh_stackalloc 8000
+; WIN64: .seh_endprologue
+; WIN64: addq $8000, %rsp
+; WIN64: ret
+; WIN64: .seh_endproc
+
+
+; Checks stack push
+define i32 @foo3(i32 %f_arg, i32 %e_arg, i32 %d_arg, i32 %c_arg, i32 %b_arg, i32 %a_arg) uwtable {
+entry:
+ %a = alloca i32
+ %b = alloca i32
+ %c = alloca i32
+ %d = alloca i32
+ %e = alloca i32
+ %f = alloca i32
+ store i32 %a_arg, i32* %a
+ store i32 %b_arg, i32* %b
+ store i32 %c_arg, i32* %c
+ store i32 %d_arg, i32* %d
+ store i32 %e_arg, i32* %e
+ store i32 %f_arg, i32* %f
+ %tmp = load i32* %a
+ %tmp1 = mul i32 %tmp, 2
+ %tmp2 = load i32* %b
+ %tmp3 = mul i32 %tmp2, 3
+ %tmp4 = add i32 %tmp1, %tmp3
+ %tmp5 = load i32* %c
+ %tmp6 = mul i32 %tmp5, 5
+ %tmp7 = add i32 %tmp4, %tmp6
+ %tmp8 = load i32* %d
+ %tmp9 = mul i32 %tmp8, 7
+ %tmp10 = add i32 %tmp7, %tmp9
+ %tmp11 = load i32* %e
+ %tmp12 = mul i32 %tmp11, 11
+ %tmp13 = add i32 %tmp10, %tmp12
+ %tmp14 = load i32* %f
+ %tmp15 = mul i32 %tmp14, 13
+ %tmp16 = add i32 %tmp13, %tmp15
+ ret i32 %tmp16
+}
+; WIN64-LABEL: foo3:
+; WIN64: .seh_proc foo3
+; WIN64: pushq %rsi
+; WIN64: .seh_pushreg 6
+; WIN64: subq $24, %rsp
+; WIN64: .seh_stackalloc 24
+; WIN64: .seh_endprologue
+; WIN64: addq $24, %rsp
+; WIN64: popq %rsi
+; WIN64: ret
+; WIN64: .seh_endproc
+
+
+; Check emission of eh handler and handler data
+declare i32 @_d_eh_personality(i32, i32, i64, i8*, i8*)
+declare void @_d_eh_resume_unwind(i8*)
+
+declare i32 @bar()
+
+define i32 @foo4() #0 {
+entry:
+ %step = alloca i32, align 4
+ store i32 0, i32* %step
+ %tmp = load i32* %step
+
+ %tmp1 = invoke i32 @bar()
+ to label %finally unwind label %landingpad
+
+finally:
+ store i32 1, i32* %step
+ br label %endtryfinally
+
+landingpad:
+ %landing_pad = landingpad { i8*, i32 } personality i32 (i32, i32, i64, i8*, i8*)* @_d_eh_personality
+ cleanup
+ %tmp3 = extractvalue { i8*, i32 } %landing_pad, 0
+ store i32 2, i32* %step
+ call void @_d_eh_resume_unwind(i8* %tmp3)
+ unreachable
+
+endtryfinally:
+ %tmp10 = load i32* %step
+ ret i32 %tmp10
+}
+; WIN64-LABEL: foo4:
+; WIN64: .seh_proc foo4
+; WIN64: .seh_handler _d_eh_personality, @unwind, @except
+; WIN64: subq $56, %rsp
+; WIN64: .seh_stackalloc 56
+; WIN64: .seh_endprologue
+; WIN64: addq $56, %rsp
+; WIN64: ret
+; WIN64: .seh_handlerdata
+; WIN64: .seh_endproc
+
+
+; Check stack re-alignment and xmm spilling
+define void @foo5() uwtable {
+entry:
+ %s = alloca i32, align 64
+ call void asm sideeffect "", "~{rbx},~{rdi},~{xmm6},~{xmm7}"()
+ ret void
+}
+; WIN64-LABEL: foo5:
+; WIN64: .seh_proc foo5
+; WIN64: pushq %rbp
+; WIN64: .seh_pushreg 5
+; WIN64: movq %rsp, %rbp
+; WIN64: pushq %rdi
+; WIN64: .seh_pushreg 7
+; WIN64: pushq %rbx
+; WIN64: .seh_pushreg 3
+; WIN64: andq $-64, %rsp
+; WIN64: subq $128, %rsp
+; WIN64: .seh_stackalloc 48
+; WIN64: .seh_setframe 5, 64
+; WIN64: movaps %xmm7, -32(%rbp) # 16-byte Spill
+; WIN64: movaps %xmm6, -48(%rbp) # 16-byte Spill
+; WIN64: .seh_savexmm 6, 16
+; WIN64: .seh_savexmm 7, 32
+; WIN64: .seh_endprologue
+; WIN64: movaps -48(%rbp), %xmm6 # 16-byte Reload
+; WIN64: movaps -32(%rbp), %xmm7 # 16-byte Reload
+; WIN64: leaq -16(%rbp), %rsp
+; WIN64: popq %rbx
+; WIN64: popq %rdi
+; WIN64: popq %rbp
+; WIN64: retq
+; WIN64: .seh_endproc
diff --git a/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll b/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll
index 5d7a10b..08d0257 100644
--- a/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll
+++ b/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll
@@ -3,7 +3,7 @@
; clang -Oz -c test1.cpp -emit-llvm -S -o
; Verify that we generate shld insruction when we are optimizing for size,
; even for X86_64 processors that are known to have poor latency double
-; precision shift instuctions.
+; precision shift instructions.
; uint64_t lshift10(uint64_t a, uint64_t b)
; {
; return (a << 10) | (b >> 54);
@@ -25,7 +25,7 @@ attributes #0 = { minsize nounwind optsize readnone uwtable "less-precise-fpmad"
; clang -Os -c test2.cpp -emit-llvm -S
; Verify that we generate shld insruction when we are optimizing for size,
; even for X86_64 processors that are known to have poor latency double
-; precision shift instuctions.
+; precision shift instructions.
; uint64_t lshift11(uint64_t a, uint64_t b)
; {
; return (a << 11) | (b >> 53);
@@ -46,7 +46,7 @@ attributes #1 = { nounwind optsize readnone uwtable "less-precise-fpmad"="false"
; clang -O2 -c test2.cpp -emit-llvm -S
; Verify that we do not generate shld insruction when we are not optimizing
; for size for X86_64 processors that are known to have poor latency double
-; precision shift instuctions.
+; precision shift instructions.
; uint64_t lshift12(uint64_t a, uint64_t b)
; {
; return (a << 12) | (b >> 52);
diff --git a/test/CodeGen/X86/x86-64-frameaddr.ll b/test/CodeGen/X86/x86-64-frameaddr.ll
deleted file mode 100644
index 7d36a7a..0000000
--- a/test/CodeGen/X86/x86-64-frameaddr.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-; CHECK: stack_end_address
-; CHECK: {{movq.+rbp.*$}}
-; CHECK: {{movq.+rbp.*$}}
-; CHECK: ret
-
-define i64* @stack_end_address() nounwind {
-entry:
- tail call i8* @llvm.frameaddress( i32 0 )
- bitcast i8* %0 to i64*
- ret i64* %1
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/X86/x86-64-static-relo-movl.ll b/test/CodeGen/X86/x86-64-static-relo-movl.ll
new file mode 100644
index 0000000..71e52bb
--- /dev/null
+++ b/test/CodeGen/X86/x86-64-static-relo-movl.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mtriple=x86_64-pc-win32-macho -relocation-model=static -O0 < %s | FileCheck %s
+
+; Ensure that we don't generate a movl and not a lea for a static relocation
+; when compiling for 64 bit.
+
+%struct.MatchInfo = type [64 x i64]
+
+@NO_MATCH = internal constant %struct.MatchInfo zeroinitializer, align 8
+
+define void @setup() {
+ %pending = alloca %struct.MatchInfo, align 8
+ %t = bitcast %struct.MatchInfo* %pending to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %t, i8* bitcast (%struct.MatchInfo* @NO_MATCH to i8*), i64 512, i32 8, i1 false)
+ %u = getelementptr inbounds %struct.MatchInfo* %pending, i32 0, i32 2
+ %v = load i64* %u, align 8
+ br label %done
+done:
+ ret void
+
+ ; CHECK: movabsq $_NO_MATCH, {{.*}}
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1)
diff --git a/test/CodeGen/X86/x86-frameaddr.ll b/test/CodeGen/X86/x86-frameaddr.ll
deleted file mode 100644
index d595874..0000000
--- a/test/CodeGen/X86/x86-frameaddr.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | grep ebp
-
-define i8* @t() nounwind {
-entry:
- %0 = tail call i8* @llvm.frameaddress(i32 0)
- ret i8* %0
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/X86/x86-frameaddr2.ll b/test/CodeGen/X86/x86-frameaddr2.ll
deleted file mode 100644
index c509115..0000000
--- a/test/CodeGen/X86/x86-frameaddr2.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | count 3
-
-define i8* @t() nounwind {
-entry:
- %0 = tail call i8* @llvm.frameaddress(i32 2)
- ret i8* %0
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll b/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll
new file mode 100644
index 0000000..d885f1c
--- /dev/null
+++ b/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll
@@ -0,0 +1,41 @@
+; RUN: llc -mattr=+avx < %s | FileCheck %s
+
+; Check that we properly upgrade the AVX vbroadcast intrinsics to IR. The
+; expectation is that we should still get the original instruction back that
+; maps to the intrinsic.
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; CHECK-LABEL: test_mm_broadcast_ss:
+define <4 x float> @test_mm_broadcast_ss(float* readonly %__a){
+entry:
+ %0 = bitcast float* %__a to i8*
+; CHECK: vbroadcastss (%{{.*}}), %xmm
+ %1 = tail call <4 x float> @llvm.x86.avx.vbroadcast.ss(i8* %0)
+ ret <4 x float> %1
+}
+
+; CHECK-LABEL: test_mm256_broadcast_sd:
+define <4 x double> @test_mm256_broadcast_sd(double* readonly %__a) {
+entry:
+ %0 = bitcast double* %__a to i8*
+; CHECK: vbroadcastsd (%{{.*}}), %ymm
+ %1 = tail call <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8* %0)
+ ret <4 x double> %1
+}
+
+; CHECK-LABEL: test_mm256_broadcast_ss:
+define <8 x float> @test_mm256_broadcast_ss(float* readonly %__a) {
+entry:
+ %0 = bitcast float* %__a to i8*
+; CHECK: vbroadcastss (%{{.*}}), %ymm
+ %1 = tail call <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8* %0)
+ ret <8 x float> %1
+}
+
+declare <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8*)
+
+declare <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8*)
+
+declare <4 x float> @llvm.x86.avx.vbroadcast.ss(i8*)
diff --git a/test/CodeGen/X86/xaluo.ll b/test/CodeGen/X86/xaluo.ll
new file mode 100644
index 0000000..f078631
--- /dev/null
+++ b/test/CodeGen/X86/xaluo.ll
@@ -0,0 +1,743 @@
+; RUN: llc -mtriple=x86_64-darwin-unknown < %s | FileCheck %s --check-prefix=DAG
+; RUN: llc -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort < %s | FileCheck %s --check-prefix=FAST
+; RUN: llc -mtriple=x86_64-darwin-unknown < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort < %s | FileCheck %s
+
+;
+; Get the actual value of the overflow bit.
+;
+; SADDO reg, reg
+define zeroext i1 @saddo.i8(i8 signext %v1, i8 signext %v2, i8* %res) {
+entry:
+; DAG-LABEL: saddo.i8
+; DAG: addb %sil, %dil
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i8
+; FAST: addb %sil, %dil
+; FAST-NEXT: seto %al
+ %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v1, i8 %v2)
+ %val = extractvalue {i8, i1} %t, 0
+ %obit = extractvalue {i8, i1} %t, 1
+ store i8 %val, i8* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i16(i16 %v1, i16 %v2, i16* %res) {
+entry:
+; DAG-LABEL: saddo.i16
+; DAG: addw %si, %di
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i16
+; FAST: addw %si, %di
+; FAST-NEXT: seto %al
+ %t = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %v1, i16 %v2)
+ %val = extractvalue {i16, i1} %t, 0
+ %obit = extractvalue {i16, i1} %t, 1
+ store i16 %val, i16* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: saddo.i32
+; DAG: addl %esi, %edi
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i32
+; FAST: addl %esi, %edi
+; FAST-NEXT: seto %al
+ %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64
+; DAG: addq %rsi, %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i64
+; FAST: addq %rsi, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; SADDO reg, imm | imm, reg
+; FIXME: INC isn't supported in FastISel yet
+define zeroext i1 @saddo.i64imm1(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm1
+; DAG: incq %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i64imm1
+; FAST: addq $1, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 1)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; FIXME: DAG doesn't optimize immediates on the LHS.
+define zeroext i1 @saddo.i64imm2(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm2
+; DAG: mov
+; DAG-NEXT: addq
+; DAG-NEXT: seto
+; FAST-LABEL: saddo.i64imm2
+; FAST: addq $1, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 1, i64 %v1)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; Check boundary conditions for large immediates.
+define zeroext i1 @saddo.i64imm3(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm3
+; DAG: addq $-2147483648, %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: saddo.i64imm3
+; FAST: addq $-2147483648, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -2147483648)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i64imm4(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm4
+; DAG: movabsq $-21474836489, %[[REG:[a-z]+]]
+; DAG-NEXT: addq %rdi, %[[REG]]
+; DAG-NEXT: seto
+; FAST-LABEL: saddo.i64imm4
+; FAST: movabsq $-21474836489, %[[REG:[a-z]+]]
+; FAST-NEXT: addq %rdi, %[[REG]]
+; FAST-NEXT: seto
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -21474836489)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @saddo.i64imm5(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm5
+; DAG: addq $2147483647, %rdi
+; DAG-NEXT: seto
+; FAST-LABEL: saddo.i64imm5
+; FAST: addq $2147483647, %rdi
+; FAST-NEXT: seto
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 2147483647)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; TODO: FastISel shouldn't use movabsq.
+define zeroext i1 @saddo.i64imm6(i64 %v1, i64* %res) {
+entry:
+; DAG-LABEL: saddo.i64imm6
+; DAG: movl $2147483648, %ecx
+; DAG: addq %rdi, %rcx
+; DAG-NEXT: seto
+; FAST-LABEL: saddo.i64imm6
+; FAST: movabsq $2147483648, %[[REG:[a-z]+]]
+; FAST: addq %rdi, %[[REG]]
+; FAST-NEXT: seto
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 2147483648)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; UADDO
+define zeroext i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: uaddo.i32
+; DAG: addl %esi, %edi
+; DAG-NEXT: setb %al
+; FAST-LABEL: uaddo.i32
+; FAST: addl %esi, %edi
+; FAST-NEXT: setb %al
+ %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @uaddo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: uaddo.i64
+; DAG: addq %rsi, %rdi
+; DAG-NEXT: setb %al
+; FAST-LABEL: uaddo.i64
+; FAST: addq %rsi, %rdi
+; FAST-NEXT: setb %al
+ %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; SSUBO
+define zeroext i1 @ssubo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: ssubo.i32
+; DAG: subl %esi, %edi
+; DAG-NEXT: seto %al
+; FAST-LABEL: ssubo.i32
+; FAST: subl %esi, %edi
+; FAST-NEXT: seto %al
+ %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: ssubo.i64
+; DAG: subq %rsi, %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: ssubo.i64
+; FAST: subq %rsi, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; USUBO
+define zeroext i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: usubo.i32
+; DAG: subl %esi, %edi
+; DAG-NEXT: setb %al
+; FAST-LABEL: usubo.i32
+; FAST: subl %esi, %edi
+; FAST-NEXT: setb %al
+ %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @usubo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: usubo.i64
+; DAG: subq %rsi, %rdi
+; DAG-NEXT: setb %al
+; FAST-LABEL: usubo.i64
+; FAST: subq %rsi, %rdi
+; FAST-NEXT: setb %al
+ %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; SMULO
+define zeroext i1 @smulo.i8(i8 %v1, i8 %v2, i8* %res) {
+entry:
+; FAST-LABEL: smulo.i8
+; FAST: movb %dil, %al
+; FAST-NEXT: imulb %sil
+; FAST-NEXT: seto %cl
+ %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %v1, i8 %v2)
+ %val = extractvalue {i8, i1} %t, 0
+ %obit = extractvalue {i8, i1} %t, 1
+ store i8 %val, i8* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @smulo.i16(i16 %v1, i16 %v2, i16* %res) {
+entry:
+; DAG-LABEL: smulo.i16
+; DAG: imulw %si, %di
+; DAG-NEXT: seto %al
+; FAST-LABEL: smulo.i16
+; FAST: imulw %si, %di
+; FAST-NEXT: seto %al
+ %t = call {i16, i1} @llvm.smul.with.overflow.i16(i16 %v1, i16 %v2)
+ %val = extractvalue {i16, i1} %t, 0
+ %obit = extractvalue {i16, i1} %t, 1
+ store i16 %val, i16* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @smulo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: smulo.i32
+; DAG: imull %esi, %edi
+; DAG-NEXT: seto %al
+; FAST-LABEL: smulo.i32
+; FAST: imull %esi, %edi
+; FAST-NEXT: seto %al
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: smulo.i64
+; DAG: imulq %rsi, %rdi
+; DAG-NEXT: seto %al
+; FAST-LABEL: smulo.i64
+; FAST: imulq %rsi, %rdi
+; FAST-NEXT: seto %al
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+; UMULO
+define zeroext i1 @umulo.i8(i8 %v1, i8 %v2, i8* %res) {
+entry:
+; FAST-LABEL: umulo.i8
+; FAST: movb %dil, %al
+; FAST-NEXT: mulb %sil
+; FAST-NEXT: seto %cl
+ %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %v1, i8 %v2)
+ %val = extractvalue {i8, i1} %t, 0
+ %obit = extractvalue {i8, i1} %t, 1
+ store i8 %val, i8* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @umulo.i16(i16 %v1, i16 %v2, i16* %res) {
+entry:
+; DAG-LABEL: umulo.i16
+; DAG: mulw %si
+; DAG-NEXT: seto
+; FAST-LABEL: umulo.i16
+; FAST: mulw %si
+; FAST-NEXT: seto
+ %t = call {i16, i1} @llvm.umul.with.overflow.i16(i16 %v1, i16 %v2)
+ %val = extractvalue {i16, i1} %t, 0
+ %obit = extractvalue {i16, i1} %t, 1
+ store i16 %val, i16* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @umulo.i32(i32 %v1, i32 %v2, i32* %res) {
+entry:
+; DAG-LABEL: umulo.i32
+; DAG: mull %esi
+; DAG-NEXT: seto
+; FAST-LABEL: umulo.i32
+; FAST: mull %esi
+; FAST-NEXT: seto
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ store i32 %val, i32* %res
+ ret i1 %obit
+}
+
+define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, i64* %res) {
+entry:
+; DAG-LABEL: umulo.i64
+; DAG: mulq %rsi
+; DAG-NEXT: seto
+; FAST-LABEL: umulo.i64
+; FAST: mulq %rsi
+; FAST-NEXT: seto
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ store i64 %val, i64* %res
+ ret i1 %obit
+}
+
+;
+; Check the use of the overflow bit in combination with a select instruction.
+;
+define i32 @saddo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: saddo.select.i32
+; CHECK: addl %esi, %eax
+; CHECK-NEXT: cmovol %edi, %esi
+ %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: saddo.select.i64
+; CHECK: addq %rsi, %rax
+; CHECK-NEXT: cmovoq %rdi, %rsi
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @uaddo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: uaddo.select.i32
+; CHECK: addl %esi, %eax
+; CHECK-NEXT: cmovbl %edi, %esi
+ %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @uaddo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: uaddo.select.i64
+; CHECK: addq %rsi, %rax
+; CHECK-NEXT: cmovbq %rdi, %rsi
+ %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @ssubo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: ssubo.select.i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: cmovol %edi, %esi
+ %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: ssubo.select.i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovoq %rdi, %rsi
+ %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @usubo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: usubo.select.i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: cmovbl %edi, %esi
+ %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @usubo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: usubo.select.i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: cmovbq %rdi, %rsi
+ %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @smulo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: smulo.select.i32
+; CHECK: imull %esi, %eax
+; CHECK-NEXT: cmovol %edi, %esi
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: smulo.select.i64
+; CHECK: imulq %rsi, %rax
+; CHECK-NEXT: cmovoq %rdi, %rsi
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+define i32 @umulo.select.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: umulo.select.i32
+; CHECK: mull %esi
+; CHECK-NEXT: cmovol %edi, %esi
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %obit = extractvalue {i32, i1} %t, 1
+ %ret = select i1 %obit, i32 %v1, i32 %v2
+ ret i32 %ret
+}
+
+define i64 @umulo.select.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: umulo.select.i64
+; CHECK: mulq %rsi
+; CHECK-NEXT: cmovoq %rdi, %rsi
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %obit = extractvalue {i64, i1} %t, 1
+ %ret = select i1 %obit, i64 %v1, i64 %v2
+ ret i64 %ret
+}
+
+
+;
+; Check the use of the overflow bit in combination with a branch instruction.
+;
+define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: saddo.br.i32
+; CHECK: addl %esi, %edi
+; CHECK-NEXT: jo
+ %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: saddo.br.i64
+; CHECK: addq %rsi, %rdi
+; CHECK-NEXT: jo
+ %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: uaddo.br.i32
+; CHECK: addl %esi, %edi
+; CHECK-NEXT: jb
+ %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: uaddo.br.i64
+; CHECK: addq %rsi, %rdi
+; CHECK-NEXT: jb
+ %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: ssubo.br.i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jo
+ %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: ssubo.br.i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jo
+ %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: usubo.br.i32
+; CHECK: cmpl %esi, %edi
+; CHECK-NEXT: jb
+ %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: usubo.br.i64
+; CHECK: cmpq %rsi, %rdi
+; CHECK-NEXT: jb
+ %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: smulo.br.i32
+; CHECK: imull %esi, %edi
+; CHECK-NEXT: jo
+ %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: smulo.br.i64
+; CHECK: imulq %rsi, %rdi
+; CHECK-NEXT: jo
+ %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
+entry:
+; CHECK-LABEL: umulo.br.i32
+; CHECK: mull %esi
+; CHECK-NEXT: jo
+ %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+ %val = extractvalue {i32, i1} %t, 0
+ %obit = extractvalue {i32, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
+entry:
+; CHECK-LABEL: umulo.br.i64
+; CHECK: mulq %rsi
+; CHECK-NEXT: jo
+ %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+ %val = extractvalue {i64, i1} %t, 0
+ %obit = extractvalue {i64, i1} %t, 1
+ br i1 %obit, label %overflow, label %continue, !prof !0
+
+overflow:
+ ret i1 false
+
+continue:
+ ret i1 true
+}
+
+declare {i8, i1} @llvm.sadd.with.overflow.i8 (i8, i8 ) nounwind readnone
+declare {i16, i1} @llvm.sadd.with.overflow.i16(i16, i16) nounwind readnone
+declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+declare {i8, i1} @llvm.smul.with.overflow.i8 (i8, i8 ) nounwind readnone
+declare {i16, i1} @llvm.smul.with.overflow.i16(i16, i16) nounwind readnone
+declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
+declare {i8, i1} @llvm.umul.with.overflow.i8 (i8, i8 ) nounwind readnone
+declare {i16, i1} @llvm.umul.with.overflow.i16(i16, i16) nounwind readnone
+declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
+
+!0 = metadata !{metadata !"branch_weights", i32 0, i32 2147483647}