aboutsummaryrefslogtreecommitdiffstats
path: root/test/CodeGen
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2015-03-23 12:10:34 -0700
committerStephen Hines <srhines@google.com>2015-03-23 12:10:34 -0700
commitebe69fe11e48d322045d5949c83283927a0d790b (patch)
treec92f1907a6b8006628a4b01615f38264d29834ea /test/CodeGen
parentb7d2e72b02a4cb8034f32f8247a2558d2434e121 (diff)
downloadexternal_llvm-ebe69fe11e48d322045d5949c83283927a0d790b.zip
external_llvm-ebe69fe11e48d322045d5949c83283927a0d790b.tar.gz
external_llvm-ebe69fe11e48d322045d5949c83283927a0d790b.tar.bz2
Update aosp/master LLVM for rebase to r230699.
Change-Id: I2b5be30509658cb8266be782de0ab24f9099f9b9
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll122
-rw-r--r--test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll16
-rw-r--r--test/CodeGen/AArch64/addsub-shifted.ll14
-rw-r--r--test/CodeGen/AArch64/analyze-branch.ll4
-rw-r--r--test/CodeGen/AArch64/analyzecmp.ll8
-rw-r--r--test/CodeGen/AArch64/argument-blocks.ll197
-rw-r--r--test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll46
-rw-r--r--test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll10
-rw-r--r--test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll8
-rw-r--r--test/CodeGen/AArch64/arm64-aapcs-be.ll18
-rw-r--r--test/CodeGen/AArch64/arm64-abi_align.ll10
-rw-r--r--test/CodeGen/AArch64/arm64-atomic-128.ll24
-rw-r--r--test/CodeGen/AArch64/arm64-ccmp-heuristics.ll8
-rw-r--r--test/CodeGen/AArch64/arm64-cse.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-fastcc-tailcall.ll6
-rw-r--r--test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-fold-address.ll10
-rw-r--r--test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll8
-rw-r--r--test/CodeGen/AArch64/arm64-ldp.ll85
-rw-r--r--test/CodeGen/AArch64/arm64-named-reg-alloc.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-named-reg-notareg.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-neon-copy.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-neon-select_cc.ll15
-rw-r--r--test/CodeGen/AArch64/arm64-platform-reg.ll15
-rw-r--r--test/CodeGen/AArch64/arm64-popcnt.ll20
-rw-r--r--test/CodeGen/AArch64/arm64-prefetch.ll8
-rw-r--r--test/CodeGen/AArch64/arm64-promote-const.ll118
-rw-r--r--test/CodeGen/AArch64/arm64-st1.ll192
-rw-r--r--test/CodeGen/AArch64/arm64-stackmap-nops.ll15
-rw-r--r--test/CodeGen/AArch64/arm64-stackpointer.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-tls-dynamics.ll8
-rw-r--r--test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll14
-rw-r--r--test/CodeGen/AArch64/arm64-variadic-aapcs.ll16
-rw-r--r--test/CodeGen/AArch64/arm64-vshuffle.ll26
-rw-r--r--test/CodeGen/AArch64/bitcast-v2i8.ll15
-rw-r--r--test/CodeGen/AArch64/br-to-eh-lpad.ll78
-rw-r--r--test/CodeGen/AArch64/combine-comparisons-by-cse.ll1
-rw-r--r--test/CodeGen/AArch64/compiler-ident.ll2
-rw-r--r--test/CodeGen/AArch64/cpus.ll1
-rw-r--r--test/CodeGen/AArch64/dp-3source.ll15
-rw-r--r--test/CodeGen/AArch64/f16-convert.ll15
-rw-r--r--test/CodeGen/AArch64/fast-isel-branch-cond-split.ll42
-rw-r--r--test/CodeGen/AArch64/fast-isel-branch_weights.ll2
-rw-r--r--test/CodeGen/AArch64/fast-isel-memcpy.ll15
-rw-r--r--test/CodeGen/AArch64/fast-isel-tbz.ll162
-rw-r--r--test/CodeGen/AArch64/fdiv-combine.ll94
-rw-r--r--test/CodeGen/AArch64/fp16-v8-instructions.ll8
-rw-r--r--test/CodeGen/AArch64/fpimm.ll23
-rw-r--r--test/CodeGen/AArch64/func-argpassing.ll10
-rw-r--r--test/CodeGen/AArch64/func-calls.ll12
-rw-r--r--test/CodeGen/AArch64/ghc-cc.ll89
-rw-r--r--test/CodeGen/AArch64/global-merge-1.ll1
-rw-r--r--test/CodeGen/AArch64/global-merge-2.ll1
-rw-r--r--test/CodeGen/AArch64/implicit-sret.ll13
-rw-r--r--test/CodeGen/AArch64/large_shift.ll21
-rw-r--r--test/CodeGen/AArch64/machine_cse_impdef_killflags.ll26
-rw-r--r--test/CodeGen/AArch64/neon-scalar-copy.ll126
-rw-r--r--test/CodeGen/AArch64/or-combine.ll44
-rw-r--r--test/CodeGen/AArch64/ragreedy-csr.ll48
-rw-r--r--test/CodeGen/AArch64/remat.ll1
-rw-r--r--test/CodeGen/AArch64/setcc-type-mismatch.ll11
-rw-r--r--test/CodeGen/ARM/2007-05-09-tailmerge-2.ll9
-rw-r--r--test/CodeGen/ARM/2007-05-22-tailmerge-3.ll23
-rw-r--r--test/CodeGen/ARM/2009-10-16-Scope.ll22
-rw-r--r--test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll34
-rw-r--r--test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll62
-rw-r--r--test/CodeGen/ARM/2010-08-04-StackVariable.ll110
-rw-r--r--test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll120
-rw-r--r--test/CodeGen/ARM/2011-04-12-FastRegAlloc.ll2
-rw-r--r--test/CodeGen/ARM/2011-05-04-MultipleLandingPadSuccs.ll10
-rw-r--r--test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll114
-rw-r--r--test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll8
-rw-r--r--test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll2
-rw-r--r--test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll2
-rw-r--r--test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll2
-rw-r--r--test/CodeGen/ARM/2014-08-04-muls-it.ll4
-rw-r--r--test/CodeGen/ARM/2015-01-21-thumbv4t-ldstr-opt.ll48
-rw-r--r--test/CodeGen/ARM/Windows/read-only-data.ll2
-rw-r--r--test/CodeGen/ARM/Windows/stack-probe-non-default.ll27
-rw-r--r--test/CodeGen/ARM/Windows/structors.ll2
-rw-r--r--test/CodeGen/ARM/aggregate-padding.ll101
-rw-r--r--test/CodeGen/ARM/alloc-no-stack-realign.ll78
-rw-r--r--test/CodeGen/ARM/arm-abi-attr.ll10
-rw-r--r--test/CodeGen/ARM/atomic-64bit.ll2
-rw-r--r--test/CodeGen/ARM/atomic-ops-v8.ll2
-rw-r--r--test/CodeGen/ARM/big-endian-neon-extend.ll81
-rw-r--r--test/CodeGen/ARM/build-attributes-encoding.s2
-rw-r--r--test/CodeGen/ARM/build-attributes.ll702
-rw-r--r--test/CodeGen/ARM/coalesce-dbgvalue.ll64
-rw-r--r--test/CodeGen/ARM/coalesce-subregs.ll1
-rw-r--r--test/CodeGen/ARM/crc32.ll58
-rw-r--r--test/CodeGen/ARM/cse-ldrlit.ll4
-rw-r--r--test/CodeGen/ARM/cse-libcalls.ll6
-rw-r--r--test/CodeGen/ARM/dagcombine-concatvector.ll2
-rw-r--r--test/CodeGen/ARM/debug-frame-vararg.ll82
-rw-r--r--test/CodeGen/ARM/debug-frame.ll70
-rw-r--r--test/CodeGen/ARM/debug-info-arg.ll80
-rw-r--r--test/CodeGen/ARM/debug-info-blocks.ll346
-rw-r--r--test/CodeGen/ARM/debug-info-branch-folding.ll118
-rw-r--r--test/CodeGen/ARM/debug-info-d16-reg.ll122
-rw-r--r--test/CodeGen/ARM/debug-info-qreg.ll122
-rw-r--r--test/CodeGen/ARM/debug-info-s16-reg.ll135
-rw-r--r--test/CodeGen/ARM/debug-info-sreg2.ll44
-rw-r--r--test/CodeGen/ARM/debug-segmented-stacks.ll68
-rw-r--r--test/CodeGen/ARM/dyn-stackalloc.ll14
-rw-r--r--test/CodeGen/ARM/emit-big-cst.ll2
-rw-r--r--test/CodeGen/ARM/fold-stack-adjust.ll2
-rw-r--r--test/CodeGen/ARM/frame-register.ll6
-rw-r--r--test/CodeGen/ARM/ghc-tcreturn-lowered.ll21
-rw-r--r--test/CodeGen/ARM/global-merge-1.ll10
-rw-r--r--test/CodeGen/ARM/globals.ll1
-rw-r--r--test/CodeGen/ARM/ifcvt-branch-weight-bug.ll4
-rw-r--r--test/CodeGen/ARM/ifcvt-branch-weight.ll4
-rw-r--r--test/CodeGen/ARM/inline-diagnostics.ll2
-rw-r--r--test/CodeGen/ARM/interrupt-attr.ll14
-rw-r--r--test/CodeGen/ARM/isel-v8i32-crash.ll26
-rw-r--r--test/CodeGen/ARM/krait-cpu-div-attribute.ll36
-rw-r--r--test/CodeGen/ARM/longMAC.ll41
-rw-r--r--test/CodeGen/ARM/memcpy-inline.ll15
-rw-r--r--test/CodeGen/ARM/metadata-default.ll4
-rw-r--r--test/CodeGen/ARM/metadata-short-enums.ll4
-rw-r--r--test/CodeGen/ARM/metadata-short-wchar.ll4
-rw-r--r--test/CodeGen/ARM/named-reg-alloc.ll2
-rw-r--r--test/CodeGen/ARM/named-reg-notareg.ll2
-rw-r--r--test/CodeGen/ARM/none-macho-v4t.ll8
-rw-r--r--test/CodeGen/ARM/null-streamer.ll2
-rw-r--r--test/CodeGen/ARM/odr_comdat.ll16
-rw-r--r--test/CodeGen/ARM/out-of-registers.ll4
-rw-r--r--test/CodeGen/ARM/section-name.ll2
-rw-r--r--test/CodeGen/ARM/setcc-type-mismatch.ll11
-rw-r--r--test/CodeGen/ARM/sjlj-prepare-critical-edge.ll128
-rw-r--r--test/CodeGen/ARM/spill-q.ll2
-rw-r--r--test/CodeGen/ARM/stack-alignment.ll164
-rw-r--r--test/CodeGen/ARM/stack_guard_remat.ll2
-rw-r--r--test/CodeGen/ARM/stackpointer.ll2
-rw-r--r--test/CodeGen/ARM/sub-cmp-peephole.ll60
-rw-r--r--test/CodeGen/ARM/tail-call-weak.ll19
-rw-r--r--test/CodeGen/ARM/tail-call.ll5
-rw-r--r--test/CodeGen/ARM/tail-merge-branch-weight.ll6
-rw-r--r--test/CodeGen/ARM/taildup-branch-weight.ll4
-rw-r--r--test/CodeGen/ARM/thumb1-varalloc.ll32
-rw-r--r--test/CodeGen/ARM/thumb1_return_sequence.ll48
-rw-r--r--test/CodeGen/ARM/thumb_indirect_calls.ll40
-rw-r--r--test/CodeGen/ARM/tls1.ll14
-rw-r--r--test/CodeGen/ARM/vdup.ll6
-rw-r--r--test/CodeGen/ARM/vector-DAGCombine.ll8
-rw-r--r--test/CodeGen/ARM/vector-load.ll253
-rw-r--r--test/CodeGen/ARM/vector-store.ll258
-rw-r--r--test/CodeGen/ARM/vfp-regs-dwarf.ll20
-rw-r--r--test/CodeGen/ARM/vld1.ll9
-rw-r--r--test/CodeGen/ARM/vst1.ll10
-rw-r--r--test/CodeGen/BPF/alu8.ll46
-rw-r--r--test/CodeGen/BPF/atomics.ll20
-rw-r--r--test/CodeGen/BPF/basictest.ll28
-rw-r--r--test/CodeGen/BPF/byval.ll27
-rw-r--r--test/CodeGen/BPF/cc_args.ll96
-rw-r--r--test/CodeGen/BPF/cc_ret.ll48
-rw-r--r--test/CodeGen/BPF/cmp.ll119
-rw-r--r--test/CodeGen/BPF/ex1.ll46
-rw-r--r--test/CodeGen/BPF/intrinsics.ll50
-rw-r--r--test/CodeGen/BPF/lit.local.cfg2
-rw-r--r--test/CodeGen/BPF/load.ll43
-rw-r--r--test/CodeGen/BPF/loops.ll111
-rw-r--r--test/CodeGen/BPF/many_args1.ll12
-rw-r--r--test/CodeGen/BPF/many_args2.ll15
-rw-r--r--test/CodeGen/BPF/sanity.ll117
-rw-r--r--test/CodeGen/BPF/setcc.ll99
-rw-r--r--test/CodeGen/BPF/shifts.ll101
-rw-r--r--test/CodeGen/BPF/sockex2.ll326
-rw-r--r--test/CodeGen/BPF/struct_ret1.ll17
-rw-r--r--test/CodeGen/BPF/struct_ret2.ll12
-rw-r--r--test/CodeGen/BPF/vararg1.ll9
-rw-r--r--test/CodeGen/Generic/MachineBranchProb.ll2
-rw-r--r--test/CodeGen/Generic/dbg_value.ll4
-rw-r--r--test/CodeGen/Generic/empty-phi.ll19
-rw-r--r--test/CodeGen/Generic/overloaded-intrinsic-name.ll57
-rw-r--r--test/CodeGen/Generic/print-machineinstrs.ll2
-rw-r--r--test/CodeGen/Hexagon/BranchPredict.ll4
-rw-r--r--test/CodeGen/Hexagon/always-ext.ll3
-rw-r--r--test/CodeGen/Hexagon/block-addr.ll2
-rw-r--r--test/CodeGen/Hexagon/cext-check.ll8
-rw-r--r--test/CodeGen/Hexagon/cmp-not.ll50
-rw-r--r--test/CodeGen/Hexagon/cmp-to-predreg.ll4
-rw-r--r--test/CodeGen/Hexagon/dadd.ll2
-rw-r--r--test/CodeGen/Hexagon/dmul.ll2
-rw-r--r--test/CodeGen/Hexagon/dsub.ll2
-rw-r--r--test/CodeGen/Hexagon/dualstore.ll17
-rw-r--r--test/CodeGen/Hexagon/hwloop-dbg.ll60
-rw-r--r--test/CodeGen/Hexagon/idxload-with-zero-offset.ll40
-rw-r--r--test/CodeGen/Hexagon/intrinsics/alu32_alu.ll202
-rw-r--r--test/CodeGen/Hexagon/intrinsics/alu32_perm.ll104
-rw-r--r--test/CodeGen/Hexagon/intrinsics/cr.ll132
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_alu.ll1020
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_bit.ll329
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_complex.ll349
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_fp.ll388
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll1525
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_perm.ll252
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_pred.ll351
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_shift.ll723
-rw-r--r--test/CodeGen/Hexagon/newvaluestore.ll2
-rw-r--r--test/CodeGen/Hexagon/pred-absolute-store.ll2
-rw-r--r--test/CodeGen/Hexagon/struct_args_large.ll2
-rw-r--r--test/CodeGen/Inputs/DbgValueOtherTargets.ll30
-rw-r--r--test/CodeGen/Mips/2008-08-01-AsmInline.ll2
-rw-r--r--test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll8
-rw-r--r--test/CodeGen/Mips/Fast-ISel/callabi.ll2
-rw-r--r--test/CodeGen/Mips/Fast-ISel/overflt.ll64
-rw-r--r--test/CodeGen/Mips/Fast-ISel/retabi.ll80
-rw-r--r--test/CodeGen/Mips/abiflags32.ll2
-rw-r--r--test/CodeGen/Mips/atomic.ll53
-rw-r--r--test/CodeGen/Mips/blockaddr.ll8
-rw-r--r--test/CodeGen/Mips/brsize3.ll2
-rw-r--r--test/CodeGen/Mips/brsize3a.ll2
-rw-r--r--test/CodeGen/Mips/cconv/arguments-float.ll12
-rw-r--r--test/CodeGen/Mips/cconv/arguments-fp128.ll8
-rw-r--r--test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll12
-rw-r--r--test/CodeGen/Mips/cconv/arguments-hard-float.ll12
-rw-r--r--test/CodeGen/Mips/cconv/arguments-hard-fp128.ll8
-rw-r--r--test/CodeGen/Mips/cconv/arguments-struct.ll41
-rw-r--r--test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll282
-rw-r--r--test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll149
-rw-r--r--test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll161
-rw-r--r--test/CodeGen/Mips/cconv/arguments-varargs.ll12
-rw-r--r--test/CodeGen/Mips/cconv/arguments.ll12
-rw-r--r--test/CodeGen/Mips/cconv/callee-saved-float.ll24
-rw-r--r--test/CodeGen/Mips/cconv/callee-saved.ll24
-rw-r--r--test/CodeGen/Mips/cconv/memory-layout.ll12
-rw-r--r--test/CodeGen/Mips/cconv/reserved-space.ll12
-rw-r--r--test/CodeGen/Mips/cconv/return-float.ll12
-rw-r--r--test/CodeGen/Mips/cconv/return-hard-float.ll12
-rw-r--r--test/CodeGen/Mips/cconv/return-hard-fp128.ll8
-rw-r--r--test/CodeGen/Mips/cconv/return-hard-struct-f128.ll8
-rw-r--r--test/CodeGen/Mips/cconv/return-struct.ll12
-rw-r--r--test/CodeGen/Mips/cconv/return.ll12
-rw-r--r--test/CodeGen/Mips/cconv/stack-alignment.ll12
-rw-r--r--test/CodeGen/Mips/ci2.ll2
-rw-r--r--test/CodeGen/Mips/const1.ll2
-rw-r--r--test/CodeGen/Mips/const4a.ll2
-rw-r--r--test/CodeGen/Mips/const6.ll2
-rw-r--r--test/CodeGen/Mips/const6a.ll2
-rw-r--r--test/CodeGen/Mips/fcmp.ll90
-rw-r--r--test/CodeGen/Mips/fcopysign-f32-f64.ll6
-rw-r--r--test/CodeGen/Mips/fcopysign.ll6
-rw-r--r--test/CodeGen/Mips/fmadd1.ll76
-rw-r--r--test/CodeGen/Mips/fp-indexed-ls.ll8
-rw-r--r--test/CodeGen/Mips/fptr2.ll20
-rw-r--r--test/CodeGen/Mips/fpxx.ll8
-rw-r--r--test/CodeGen/Mips/global-address.ll8
-rw-r--r--test/CodeGen/Mips/inlineasm-assembler-directives.ll23
-rw-r--r--test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll6
-rw-r--r--test/CodeGen/Mips/inlineasm-cnstrnt-reg64.ll2
-rw-r--r--test/CodeGen/Mips/inlineasm64.ll2
-rw-r--r--test/CodeGen/Mips/inlineasmmemop.ll35
-rw-r--r--test/CodeGen/Mips/largeimmprinting.ll4
-rw-r--r--test/CodeGen/Mips/lcb2.ll22
-rw-r--r--test/CodeGen/Mips/lcb3c.ll4
-rw-r--r--test/CodeGen/Mips/lcb4a.ll16
-rw-r--r--test/CodeGen/Mips/lcb5.ll36
-rw-r--r--test/CodeGen/Mips/llvm-ir/add.ll123
-rw-r--r--test/CodeGen/Mips/llvm-ir/and.ll99
-rw-r--r--test/CodeGen/Mips/llvm-ir/ashr.ll200
-rw-r--r--test/CodeGen/Mips/llvm-ir/call.ll4
-rw-r--r--test/CodeGen/Mips/llvm-ir/indirectbr.ll4
-rw-r--r--test/CodeGen/Mips/llvm-ir/lshr.ll188
-rw-r--r--test/CodeGen/Mips/llvm-ir/mul.ll109
-rw-r--r--test/CodeGen/Mips/llvm-ir/or.ll99
-rw-r--r--test/CodeGen/Mips/llvm-ir/ret.ll4
-rw-r--r--test/CodeGen/Mips/llvm-ir/sdiv.ll144
-rw-r--r--test/CodeGen/Mips/llvm-ir/select.ll712
-rw-r--r--test/CodeGen/Mips/llvm-ir/shl.ll200
-rw-r--r--test/CodeGen/Mips/llvm-ir/srem.ll139
-rw-r--r--test/CodeGen/Mips/llvm-ir/sub.ll122
-rw-r--r--test/CodeGen/Mips/llvm-ir/udiv.ll116
-rw-r--r--test/CodeGen/Mips/llvm-ir/urem.ll155
-rw-r--r--test/CodeGen/Mips/llvm-ir/xor.ll99
-rw-r--r--test/CodeGen/Mips/load-store-left-right.ll16
-rw-r--r--test/CodeGen/Mips/longbranch.ll11
-rw-r--r--test/CodeGen/Mips/mbrsize4a.ll2
-rw-r--r--test/CodeGen/Mips/micromips-and16.ll18
-rw-r--r--test/CodeGen/Mips/micromips-atomic.ll2
-rw-r--r--test/CodeGen/Mips/micromips-atomic1.ll29
-rw-r--r--test/CodeGen/Mips/micromips-compact-branches.ll19
-rw-r--r--test/CodeGen/Mips/micromips-compact-jump.ll11
-rw-r--r--test/CodeGen/Mips/micromips-delay-slot-jr.ll46
-rw-r--r--test/CodeGen/Mips/micromips-delay-slot.ll14
-rw-r--r--test/CodeGen/Mips/micromips-li.ll18
-rw-r--r--test/CodeGen/Mips/micromips-or16.ll18
-rw-r--r--test/CodeGen/Mips/micromips-sw-lw-16.ll27
-rw-r--r--test/CodeGen/Mips/micromips-xor16.ll18
-rw-r--r--test/CodeGen/Mips/mips64-sret.ll2
-rw-r--r--test/CodeGen/Mips/mips64directive.ll4
-rw-r--r--test/CodeGen/Mips/mips64ext.ll4
-rw-r--r--test/CodeGen/Mips/mips64extins.ll2
-rw-r--r--test/CodeGen/Mips/mips64fpimm0.ll4
-rw-r--r--test/CodeGen/Mips/mips64fpldst.ll8
-rw-r--r--test/CodeGen/Mips/mips64intldst.ll8
-rw-r--r--test/CodeGen/Mips/mips64sinttofpsf.ll15
-rw-r--r--test/CodeGen/Mips/named-register-n32.ll18
-rw-r--r--test/CodeGen/Mips/named-register-n64.ll17
-rw-r--r--test/CodeGen/Mips/named-register-o32.ll17
-rw-r--r--test/CodeGen/Mips/no-odd-spreg-msa.ll131
-rw-r--r--test/CodeGen/Mips/octeon.ll152
-rw-r--r--test/CodeGen/Mips/powif64_16.ll8
-rw-r--r--test/CodeGen/Mips/remat-immed-load.ll4
-rw-r--r--test/CodeGen/Mips/start-asm-file.ll16
-rw-r--r--test/CodeGen/NVPTX/annotations.ll29
-rw-r--r--test/CodeGen/NVPTX/bug21465.ll2
-rw-r--r--test/CodeGen/NVPTX/bug22246.ll14
-rw-r--r--test/CodeGen/NVPTX/bug22322.ll62
-rw-r--r--test/CodeGen/NVPTX/call-with-alloca-buffer.ll2
-rw-r--r--test/CodeGen/NVPTX/calling-conv.ll2
-rw-r--r--test/CodeGen/NVPTX/fma-assoc.ll25
-rw-r--r--test/CodeGen/NVPTX/fma.ll25
-rw-r--r--test/CodeGen/NVPTX/generic-to-nvvm.ll2
-rw-r--r--test/CodeGen/NVPTX/i1-global.ll2
-rw-r--r--test/CodeGen/NVPTX/i1-param.ll2
-rw-r--r--test/CodeGen/NVPTX/managed.ll2
-rw-r--r--test/CodeGen/NVPTX/noduplicate-syncthreads.ll4
-rw-r--r--test/CodeGen/NVPTX/nounroll.ll37
-rw-r--r--test/CodeGen/NVPTX/nvcl-param-align.ll16
-rw-r--r--test/CodeGen/NVPTX/refl1.ll2
-rw-r--r--test/CodeGen/NVPTX/simple-call.ll2
-rw-r--r--test/CodeGen/NVPTX/surf-read-cuda.ll6
-rw-r--r--test/CodeGen/NVPTX/surf-read.ll4
-rw-r--r--test/CodeGen/NVPTX/surf-write-cuda.ll6
-rw-r--r--test/CodeGen/NVPTX/surf-write.ll4
-rw-r--r--test/CodeGen/NVPTX/tex-read-cuda.ll6
-rw-r--r--test/CodeGen/NVPTX/tex-read.ll6
-rw-r--r--test/CodeGen/NVPTX/texsurf-queries.ll4
-rw-r--r--test/CodeGen/NVPTX/vector-global.ll9
-rw-r--r--test/CodeGen/NVPTX/weak-linkage.ll8
-rw-r--r--test/CodeGen/PowerPC/2007-03-24-cntlzd.ll10
-rw-r--r--test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll2
-rw-r--r--test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll2
-rw-r--r--test/CodeGen/PowerPC/Frames-large.ll9
-rw-r--r--test/CodeGen/PowerPC/aa-tbaa.ll6
-rw-r--r--test/CodeGen/PowerPC/add-fi.ll24
-rw-r--r--test/CodeGen/PowerPC/addi-licm.ll64
-rw-r--r--test/CodeGen/PowerPC/arr-fp-arg-no-copy.ll23
-rw-r--r--test/CodeGen/PowerPC/asm-Zy.ll2
-rw-r--r--test/CodeGen/PowerPC/asm-constraints.ll13
-rw-r--r--test/CodeGen/PowerPC/bperm.ll279
-rw-r--r--test/CodeGen/PowerPC/cc.ll2
-rw-r--r--test/CodeGen/PowerPC/cmpb-ppc32.ll50
-rw-r--r--test/CodeGen/PowerPC/cmpb.ll204
-rw-r--r--test/CodeGen/PowerPC/code-align.ll110
-rw-r--r--test/CodeGen/PowerPC/constants-i64.ll84
-rw-r--r--test/CodeGen/PowerPC/crsave.ll4
-rw-r--r--test/CodeGen/PowerPC/ctrloops.ll25
-rw-r--r--test/CodeGen/PowerPC/dbg.ll44
-rw-r--r--test/CodeGen/PowerPC/early-ret2.ll2
-rw-r--r--test/CodeGen/PowerPC/fast-isel-const.ll27
-rw-r--r--test/CodeGen/PowerPC/fdiv-combine.ll39
-rw-r--r--test/CodeGen/PowerPC/flt-preinc.ll40
-rw-r--r--test/CodeGen/PowerPC/fma-assoc.ll79
-rw-r--r--test/CodeGen/PowerPC/fma-ext.ll93
-rw-r--r--test/CodeGen/PowerPC/fp-to-int-ext.ll69
-rw-r--r--test/CodeGen/PowerPC/fp-to-int-to-fp.ll70
-rw-r--r--test/CodeGen/PowerPC/glob-comp-aa-crash.ll14
-rw-r--r--test/CodeGen/PowerPC/i1-ext-fold.ll54
-rw-r--r--test/CodeGen/PowerPC/ia-mem-r0.ll94
-rw-r--r--test/CodeGen/PowerPC/ia-neg-const.ll25
-rw-r--r--test/CodeGen/PowerPC/in-asm-f64-reg.ll2
-rw-r--r--test/CodeGen/PowerPC/inlineasm-i64-reg.ll2
-rw-r--r--test/CodeGen/PowerPC/lbz-from-ld-shift.ll18
-rw-r--r--test/CodeGen/PowerPC/ld-st-upd.ll19
-rw-r--r--test/CodeGen/PowerPC/ldtoc-inv.ll39
-rw-r--r--test/CodeGen/PowerPC/loop-data-prefetch.ll29
-rw-r--r--test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll2
-rw-r--r--test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll2
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r0.ll2
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r1-64.ll2
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r1.ll2
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r13-64.ll2
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r13.ll2
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll11
-rw-r--r--test/CodeGen/PowerPC/named-reg-alloc-r2.ll8
-rw-r--r--test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll96
-rw-r--r--test/CodeGen/PowerPC/no-pref-jumps.ll36
-rw-r--r--test/CodeGen/PowerPC/p8-isel-sched.ll33
-rw-r--r--test/CodeGen/PowerPC/post-ra-ec.ll47
-rw-r--r--test/CodeGen/PowerPC/ppc32-cyclecounter.ll20
-rw-r--r--test/CodeGen/PowerPC/ppc32-lshrti3.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc32-pic-large.ll12
-rw-r--r--test/CodeGen/PowerPC/ppc32-pic.ll16
-rw-r--r--test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll19
-rw-r--r--test/CodeGen/PowerPC/ppc64-anyregcc.ll367
-rw-r--r--test/CodeGen/PowerPC/ppc64-calls.ll19
-rw-r--r--test/CodeGen/PowerPC/ppc64-elf-abi.ll8
-rw-r--r--test/CodeGen/PowerPC/ppc64-fastcc-fast-isel.ll56
-rw-r--r--test/CodeGen/PowerPC/ppc64-fastcc.ll540
-rw-r--r--test/CodeGen/PowerPC/ppc64-func-desc-hoist.ll47
-rw-r--r--test/CodeGen/PowerPC/ppc64-gep-opt.ll157
-rw-r--r--test/CodeGen/PowerPC/ppc64-icbt-pwr7.ll19
-rw-r--r--test/CodeGen/PowerPC/ppc64-icbt-pwr8.ll16
-rw-r--r--test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll69
-rw-r--r--test/CodeGen/PowerPC/ppc64-patchpoint.ll97
-rw-r--r--test/CodeGen/PowerPC/ppc64-r2-alloc.ll81
-rw-r--r--test/CodeGen/PowerPC/ppc64-stackmap-nops.ll24
-rw-r--r--test/CodeGen/PowerPC/ppc64-stackmap.ll289
-rw-r--r--test/CodeGen/PowerPC/ppc64-vaarg-int.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc64le-aggregates.ll49
-rw-r--r--test/CodeGen/PowerPC/ppc64le-calls.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc64le-localentry.ll5
-rw-r--r--test/CodeGen/PowerPC/ppcf128-endian.ll2
-rw-r--r--test/CodeGen/PowerPC/pr17168.ll932
-rw-r--r--test/CodeGen/PowerPC/preincprep-invoke.ll50
-rw-r--r--test/CodeGen/PowerPC/qpx-bv-sint.ll33
-rw-r--r--test/CodeGen/PowerPC/qpx-bv.ll37
-rw-r--r--test/CodeGen/PowerPC/qpx-func-clobber.ll22
-rw-r--r--test/CodeGen/PowerPC/qpx-load.ll26
-rw-r--r--test/CodeGen/PowerPC/qpx-recipest.ll194
-rw-r--r--test/CodeGen/PowerPC/qpx-rounding-ops.ll109
-rw-r--r--test/CodeGen/PowerPC/qpx-s-load.ll26
-rw-r--r--test/CodeGen/PowerPC/qpx-s-sel.ll144
-rw-r--r--test/CodeGen/PowerPC/qpx-s-store.ll25
-rw-r--r--test/CodeGen/PowerPC/qpx-sel.ll152
-rw-r--r--test/CodeGen/PowerPC/qpx-store.ll25
-rw-r--r--test/CodeGen/PowerPC/qpx-unalperm.ll64
-rw-r--r--test/CodeGen/PowerPC/retaddr2.ll24
-rw-r--r--test/CodeGen/PowerPC/rlwimi-and.ll6
-rw-r--r--test/CodeGen/PowerPC/rlwimi2.ll4
-rw-r--r--test/CodeGen/PowerPC/rm-zext.ll89
-rw-r--r--test/CodeGen/PowerPC/sdiv-pow2.ll67
-rw-r--r--test/CodeGen/PowerPC/stack-realign.ll8
-rw-r--r--test/CodeGen/PowerPC/subreg-postra-2.ll175
-rw-r--r--test/CodeGen/PowerPC/subreg-postra.ll168
-rw-r--r--test/CodeGen/PowerPC/tls-cse.ll52
-rw-r--r--test/CodeGen/PowerPC/tls-pic.ll16
-rw-r--r--test/CodeGen/PowerPC/tls-store2.ll11
-rw-r--r--test/CodeGen/PowerPC/toc-load-sched-bug.ll96
-rw-r--r--test/CodeGen/PowerPC/unwind-dw2-g.ll24
-rw-r--r--test/CodeGen/PowerPC/vec-abi-align.ll32
-rw-r--r--test/CodeGen/PowerPC/vec_clz.ll40
-rw-r--r--test/CodeGen/PowerPC/vec_misaligned.ll2
-rw-r--r--test/CodeGen/PowerPC/vec_popcnt.ll72
-rw-r--r--test/CodeGen/PowerPC/vec_shuffle_le.ll2
-rw-r--r--test/CodeGen/PowerPC/vec_veqv_vnand_vorc.ll29
-rw-r--r--test/CodeGen/PowerPC/vsel-prom.ll23
-rw-r--r--test/CodeGen/PowerPC/vsx-args.ll1
-rw-r--r--test/CodeGen/PowerPC/vsx-fma-m.ll112
-rw-r--r--test/CodeGen/PowerPC/vsx-infl-copy1.ll133
-rw-r--r--test/CodeGen/PowerPC/vsx-infl-copy2.ll114
-rw-r--r--test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll172
-rw-r--r--test/CodeGen/PowerPC/vsx-ldst.ll10
-rw-r--r--test/CodeGen/PowerPC/vsx-p8.ll25
-rw-r--r--test/CodeGen/PowerPC/vsx-self-copy.ll1
-rw-r--r--test/CodeGen/PowerPC/vsx-spill-norwstore.ll63
-rw-r--r--test/CodeGen/PowerPC/vsx-spill.ll31
-rw-r--r--test/CodeGen/PowerPC/vsx.ll501
-rw-r--r--test/CodeGen/PowerPC/vsx_insert_extract_le.ll52
-rw-r--r--test/CodeGen/PowerPC/vsx_shuffle_le.ll207
-rw-r--r--test/CodeGen/PowerPC/xxleqv_xxlnand_xxlorc.ll52
-rw-r--r--test/CodeGen/PowerPC/zext-free.ll37
-rw-r--r--test/CodeGen/R600/128bit-kernel-args.ll33
-rw-r--r--test/CodeGen/R600/32-bit-local-address-space.ll5
-rw-r--r--test/CodeGen/R600/64bit-kernel-args.ll13
-rw-r--r--test/CodeGen/R600/add-debug.ll3
-rw-r--r--test/CodeGen/R600/add.ll143
-rw-r--r--test/CodeGen/R600/add_i64.ll2
-rw-r--r--test/CodeGen/R600/address-space.ll6
-rw-r--r--test/CodeGen/R600/and.ll154
-rw-r--r--test/CodeGen/R600/anyext.ll3
-rw-r--r--test/CodeGen/R600/array-ptr-calc-i32.ll4
-rw-r--r--test/CodeGen/R600/array-ptr-calc-i64.ll2
-rw-r--r--test/CodeGen/R600/atomic_cmp_swap_local.ll85
-rw-r--r--test/CodeGen/R600/atomic_load_add.ll3
-rw-r--r--test/CodeGen/R600/atomic_load_sub.ll3
-rw-r--r--test/CodeGen/R600/basic-branch.ll3
-rw-r--r--test/CodeGen/R600/basic-loop.ll4
-rw-r--r--test/CodeGen/R600/bfi_int.ll31
-rw-r--r--test/CodeGen/R600/bitcast.ll3
-rw-r--r--test/CodeGen/R600/bswap.ll3
-rw-r--r--test/CodeGen/R600/build_vector.ll45
-rw-r--r--test/CodeGen/R600/call.ll3
-rw-r--r--test/CodeGen/R600/call_fs.ll16
-rw-r--r--test/CodeGen/R600/cf_end.ll10
-rw-r--r--test/CodeGen/R600/codegen-prepare-addrmode-sext.ll2
-rw-r--r--test/CodeGen/R600/commute_modifiers.ll14
-rw-r--r--test/CodeGen/R600/concat_vectors.ll3
-rw-r--r--test/CodeGen/R600/copy-illegal-type.ll3
-rw-r--r--test/CodeGen/R600/copy-to-reg.ll3
-rw-r--r--test/CodeGen/R600/ctlz_zero_undef.ll3
-rw-r--r--test/CodeGen/R600/ctpop.ll177
-rw-r--r--test/CodeGen/R600/ctpop64.ll77
-rw-r--r--test/CodeGen/R600/cttz_zero_undef.ll3
-rw-r--r--test/CodeGen/R600/cvt_f32_ubyte.ll39
-rw-r--r--test/CodeGen/R600/cvt_flr_i32_f32.ll86
-rw-r--r--test/CodeGen/R600/cvt_rpi_i32_f32.ll83
-rw-r--r--test/CodeGen/R600/default-fp-mode.ll21
-rw-r--r--test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll4
-rw-r--r--test/CodeGen/R600/ds_read2.ll2
-rw-r--r--test/CodeGen/R600/ds_read2_offset_order.ll45
-rw-r--r--test/CodeGen/R600/ds_read2st64.ll6
-rw-r--r--test/CodeGen/R600/ds_write2.ll28
-rw-r--r--test/CodeGen/R600/ds_write2st64.ll16
-rw-r--r--test/CodeGen/R600/elf.ll26
-rw-r--r--test/CodeGen/R600/elf.r600.ll18
-rw-r--r--test/CodeGen/R600/empty-function.ll3
-rw-r--r--test/CodeGen/R600/endcf-loop-header.ll34
-rw-r--r--test/CodeGen/R600/extload-private.ll46
-rw-r--r--test/CodeGen/R600/extload.ll94
-rw-r--r--test/CodeGen/R600/extract_vector_elt_i16.ll3
-rw-r--r--test/CodeGen/R600/fabs.f64.ll2
-rw-r--r--test/CodeGen/R600/fabs.ll35
-rw-r--r--test/CodeGen/R600/fadd.ll3
-rw-r--r--test/CodeGen/R600/fadd64.ll3
-rw-r--r--test/CodeGen/R600/fceil.ll3
-rw-r--r--test/CodeGen/R600/fceil64.ll20
-rw-r--r--test/CodeGen/R600/fcmp64.ll13
-rw-r--r--test/CodeGen/R600/fconst64.ll3
-rw-r--r--test/CodeGen/R600/fcopysign.f32.ll21
-rw-r--r--test/CodeGen/R600/fcopysign.f64.ll23
-rw-r--r--test/CodeGen/R600/fdiv.f64.ll96
-rw-r--r--test/CodeGen/R600/fdiv.ll3
-rw-r--r--test/CodeGen/R600/fdiv64.ll14
-rw-r--r--test/CodeGen/R600/ffloor.f64.ll106
-rw-r--r--test/CodeGen/R600/ffloor.ll131
-rw-r--r--test/CodeGen/R600/flat-address-space.ll6
-rw-r--r--test/CodeGen/R600/floor.ll7
-rw-r--r--test/CodeGen/R600/fma-combine.ll368
-rw-r--r--test/CodeGen/R600/fma.f64.ll3
-rw-r--r--test/CodeGen/R600/fma.ll2
-rw-r--r--test/CodeGen/R600/fmax3.ll9
-rw-r--r--test/CodeGen/R600/fmax_legacy.f64.ll67
-rw-r--r--test/CodeGen/R600/fmax_legacy.ll51
-rw-r--r--test/CodeGen/R600/fmaxnum.f64.ll3
-rw-r--r--test/CodeGen/R600/fmaxnum.ll3
-rw-r--r--test/CodeGen/R600/fmin3.ll10
-rw-r--r--test/CodeGen/R600/fmin_legacy.f64.ll77
-rw-r--r--test/CodeGen/R600/fmin_legacy.ll51
-rw-r--r--test/CodeGen/R600/fminnum.f64.ll3
-rw-r--r--test/CodeGen/R600/fminnum.ll3
-rw-r--r--test/CodeGen/R600/fmul.ll3
-rw-r--r--test/CodeGen/R600/fmul64.ll3
-rw-r--r--test/CodeGen/R600/fmuladd.ll18
-rw-r--r--test/CodeGen/R600/fnearbyint.ll5
-rw-r--r--test/CodeGen/R600/fneg-fabs.f64.ll9
-rw-r--r--test/CodeGen/R600/fneg-fabs.ll3
-rw-r--r--test/CodeGen/R600/fneg.f64.ll27
-rw-r--r--test/CodeGen/R600/fneg.ll24
-rw-r--r--test/CodeGen/R600/fp-classify.ll131
-rw-r--r--test/CodeGen/R600/fp16_to_fp.ll3
-rw-r--r--test/CodeGen/R600/fp32_to_fp16.ll3
-rw-r--r--test/CodeGen/R600/fp_to_sint.f64.ll4
-rw-r--r--test/CodeGen/R600/fp_to_sint.ll18
-rw-r--r--test/CodeGen/R600/fp_to_uint.f64.ll4
-rw-r--r--test/CodeGen/R600/fp_to_uint.ll32
-rw-r--r--test/CodeGen/R600/fpext.ll44
-rw-r--r--test/CodeGen/R600/fptrunc.ll44
-rw-r--r--test/CodeGen/R600/frem.ll63
-rw-r--r--test/CodeGen/R600/fsqrt.ll7
-rw-r--r--test/CodeGen/R600/fsub.ll3
-rw-r--r--test/CodeGen/R600/fsub64.ll107
-rw-r--r--test/CodeGen/R600/ftrunc.f64.ll9
-rw-r--r--test/CodeGen/R600/ftrunc.ll3
-rw-r--r--test/CodeGen/R600/gep-address-space.ll5
-rw-r--r--test/CodeGen/R600/global-directive.ll3
-rw-r--r--test/CodeGen/R600/global-extload-i1.ll302
-rw-r--r--test/CodeGen/R600/global-extload-i16.ll302
-rw-r--r--test/CodeGen/R600/global-extload-i32.ll457
-rw-r--r--test/CodeGen/R600/global-extload-i8.ll299
-rw-r--r--test/CodeGen/R600/global-zero-initializer.ll3
-rw-r--r--test/CodeGen/R600/global_atomics.ll82
-rw-r--r--test/CodeGen/R600/gv-const-addrspace-fail.ll2
-rw-r--r--test/CodeGen/R600/gv-const-addrspace.ll8
-rw-r--r--test/CodeGen/R600/half.ll3
-rw-r--r--test/CodeGen/R600/hsa.ll14
-rw-r--r--test/CodeGen/R600/i1-copy-implicit-def.ll3
-rw-r--r--test/CodeGen/R600/i1-copy-phi.ll3
-rw-r--r--test/CodeGen/R600/icmp64.ll3
-rw-r--r--test/CodeGen/R600/imm.ll488
-rw-r--r--test/CodeGen/R600/indirect-addressing-si.ll3
-rw-r--r--test/CodeGen/R600/indirect-private-64.ll6
-rw-r--r--test/CodeGen/R600/infinite-loop.ll3
-rw-r--r--test/CodeGen/R600/inline-asm.ll12
-rw-r--r--test/CodeGen/R600/inline-calls.ll3
-rw-r--r--test/CodeGen/R600/input-mods.ll18
-rw-r--r--test/CodeGen/R600/insert_subreg.ll3
-rw-r--r--test/CodeGen/R600/insert_vector_elt.ll3
-rw-r--r--test/CodeGen/R600/kernel-args.ll540
-rw-r--r--test/CodeGen/R600/large-alloca.ll3
-rw-r--r--test/CodeGen/R600/large-constant-initializer.ll3
-rw-r--r--test/CodeGen/R600/lds-initializer.ll3
-rw-r--r--test/CodeGen/R600/lds-zero-initializer.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.abs.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll4
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll5
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfi.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.bfm.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.brev.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.clamp.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.class.ll497
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll22
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll166
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.div_scale.ll87
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.fract.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imad24.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imax.ll5
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imin.ll5
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imul24.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.kill.ll5
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.ldexp.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll2
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll5
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rcp.ll9
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll14
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll11
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.rsq.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll3
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.trunc.ll13
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umad24.ll4
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umax.ll5
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umin.ll5
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umul24.ll3
-rw-r--r--test/CodeGen/R600/llvm.SI.fs.interp.constant.ll21
-rw-r--r--test/CodeGen/R600/llvm.SI.fs.interp.ll30
-rw-r--r--test/CodeGen/R600/llvm.SI.gather4.ll3
-rw-r--r--test/CodeGen/R600/llvm.SI.getlod.ll3
-rw-r--r--test/CodeGen/R600/llvm.SI.image.ll3
-rw-r--r--test/CodeGen/R600/llvm.SI.image.sample.ll23
-rw-r--r--test/CodeGen/R600/llvm.SI.image.sample.o.ll23
-rw-r--r--test/CodeGen/R600/llvm.SI.imageload.ll9
-rw-r--r--test/CodeGen/R600/llvm.SI.load.dword.ll53
-rw-r--r--test/CodeGen/R600/llvm.SI.resinfo.ll3
-rw-r--r--test/CodeGen/R600/llvm.SI.sample-masked.ll3
-rw-r--r--test/CodeGen/R600/llvm.SI.sample.ll3
-rw-r--r--test/CodeGen/R600/llvm.SI.sampled.ll3
-rw-r--r--test/CodeGen/R600/llvm.SI.sendmsg-m0.ll20
-rw-r--r--test/CodeGen/R600/llvm.SI.sendmsg.ll3
-rw-r--r--test/CodeGen/R600/llvm.SI.tbuffer.store.ll3
-rw-r--r--test/CodeGen/R600/llvm.SI.tid.ll8
-rw-r--r--test/CodeGen/R600/llvm.amdgpu.kilp.ll5
-rw-r--r--test/CodeGen/R600/llvm.amdgpu.lrp.ll3
-rw-r--r--test/CodeGen/R600/llvm.cos.ll3
-rw-r--r--test/CodeGen/R600/llvm.exp2.ll91
-rw-r--r--test/CodeGen/R600/llvm.floor.ll54
-rw-r--r--test/CodeGen/R600/llvm.log2.ll91
-rw-r--r--test/CodeGen/R600/llvm.memcpy.ll37
-rw-r--r--test/CodeGen/R600/llvm.rint.f64.ll5
-rw-r--r--test/CodeGen/R600/llvm.rint.ll3
-rw-r--r--test/CodeGen/R600/llvm.round.f64.ll74
-rw-r--r--test/CodeGen/R600/llvm.round.ll78
-rw-r--r--test/CodeGen/R600/llvm.sin.ll6
-rw-r--r--test/CodeGen/R600/llvm.sqrt.ll59
-rw-r--r--test/CodeGen/R600/llvm.trunc.ll13
-rw-r--r--test/CodeGen/R600/load-i1.ll74
-rw-r--r--test/CodeGen/R600/load.ll597
-rw-r--r--test/CodeGen/R600/load.vec.ll21
-rw-r--r--test/CodeGen/R600/load64.ll3
-rw-r--r--test/CodeGen/R600/local-64.ll53
-rw-r--r--test/CodeGen/R600/local-atomics.ll271
-rw-r--r--test/CodeGen/R600/local-atomics64.ll237
-rw-r--r--test/CodeGen/R600/local-memory-two-objects.ll38
-rw-r--r--test/CodeGen/R600/local-memory.ll8
-rw-r--r--test/CodeGen/R600/loop-address.ll8
-rw-r--r--test/CodeGen/R600/loop-idiom.ll3
-rw-r--r--test/CodeGen/R600/lshl.ll3
-rw-r--r--test/CodeGen/R600/lshr.ll3
-rw-r--r--test/CodeGen/R600/m0-spill.ll3
-rw-r--r--test/CodeGen/R600/mad-combine.ll567
-rw-r--r--test/CodeGen/R600/mad-sub.ll6
-rw-r--r--test/CodeGen/R600/mad_int24.ll3
-rw-r--r--test/CodeGen/R600/mad_uint24.ll3
-rw-r--r--test/CodeGen/R600/madak.ll193
-rw-r--r--test/CodeGen/R600/madmk.ll181
-rw-r--r--test/CodeGen/R600/max.ll2
-rw-r--r--test/CodeGen/R600/max3.ll2
-rw-r--r--test/CodeGen/R600/min.ll23
-rw-r--r--test/CodeGen/R600/min3.ll2
-rw-r--r--test/CodeGen/R600/missing-store.ll2
-rw-r--r--test/CodeGen/R600/mubuf.ll73
-rw-r--r--test/CodeGen/R600/mul.ll3
-rw-r--r--test/CodeGen/R600/mul_int24.ll3
-rw-r--r--test/CodeGen/R600/mul_uint24.ll3
-rw-r--r--test/CodeGen/R600/mulhu.ll5
-rw-r--r--test/CodeGen/R600/no-initializer-constant-addrspace.ll3
-rw-r--r--test/CodeGen/R600/no-shrink-extloads.ll191
-rw-r--r--test/CodeGen/R600/operand-folding.ll113
-rw-r--r--test/CodeGen/R600/operand-spacing.ll13
-rw-r--r--test/CodeGen/R600/or.ll59
-rw-r--r--test/CodeGen/R600/private-memory-atomics.ll3
-rw-r--r--test/CodeGen/R600/private-memory-broken.ll3
-rw-r--r--test/CodeGen/R600/private-memory.ll12
-rw-r--r--test/CodeGen/R600/r600-encoding.ll12
-rw-r--r--test/CodeGen/R600/register-count-comments.ll2
-rw-r--r--test/CodeGen/R600/reorder-stores.ll3
-rw-r--r--test/CodeGen/R600/rotl.i64.ll27
-rw-r--r--test/CodeGen/R600/rotl.ll3
-rw-r--r--test/CodeGen/R600/rotr.i64.ll27
-rw-r--r--test/CodeGen/R600/rotr.ll3
-rw-r--r--test/CodeGen/R600/rsq.ll40
-rw-r--r--test/CodeGen/R600/s_movk_i32.ll3
-rw-r--r--test/CodeGen/R600/saddo.ll3
-rw-r--r--test/CodeGen/R600/salu-to-valu.ll2
-rw-r--r--test/CodeGen/R600/scalar_to_vector.ll3
-rw-r--r--test/CodeGen/R600/schedule-global-loads.ll4
-rw-r--r--test/CodeGen/R600/schedule-kernel-arg-loads.ll10
-rw-r--r--test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll3
-rw-r--r--test/CodeGen/R600/scratch-buffer.ll87
-rw-r--r--test/CodeGen/R600/sdiv.ll5
-rw-r--r--test/CodeGen/R600/sdivrem24.ll3
-rw-r--r--test/CodeGen/R600/sdivrem64.ll225
-rw-r--r--test/CodeGen/R600/select-i1.ll3
-rw-r--r--test/CodeGen/R600/select-vectors.ll3
-rw-r--r--test/CodeGen/R600/select64.ll20
-rw-r--r--test/CodeGen/R600/selectcc-opt.ll3
-rw-r--r--test/CodeGen/R600/selectcc.ll3
-rw-r--r--test/CodeGen/R600/setcc-opt.ll237
-rw-r--r--test/CodeGen/R600/setcc.ll96
-rw-r--r--test/CodeGen/R600/setcc64.ll46
-rw-r--r--test/CodeGen/R600/seto.ll3
-rw-r--r--test/CodeGen/R600/setuo.ll3
-rw-r--r--test/CodeGen/R600/sext-in-reg.ll2
-rw-r--r--test/CodeGen/R600/sgpr-control-flow.ll43
-rw-r--r--test/CodeGen/R600/sgpr-copy-duplicate-operand.ll3
-rw-r--r--test/CodeGen/R600/sgpr-copy.ll11
-rw-r--r--test/CodeGen/R600/shl.ll228
-rw-r--r--test/CodeGen/R600/shl_add_constant.ll2
-rw-r--r--test/CodeGen/R600/shl_add_ptr.ll14
-rw-r--r--test/CodeGen/R600/si-annotate-cf-assertion.ll3
-rw-r--r--test/CodeGen/R600/si-lod-bias.ll7
-rw-r--r--test/CodeGen/R600/si-sgpr-spill.ll6
-rw-r--r--test/CodeGen/R600/si-triv-disjoint-mem-access.ll18
-rw-r--r--test/CodeGen/R600/si-vector-hang.ll21
-rw-r--r--test/CodeGen/R600/sign_extend.ll8
-rw-r--r--test/CodeGen/R600/simplify-demanded-bits-build-pair.ll3
-rw-r--r--test/CodeGen/R600/sint_to_fp.f64.ll15
-rw-r--r--test/CodeGen/R600/sint_to_fp.ll3
-rw-r--r--test/CodeGen/R600/smrd.ll58
-rw-r--r--test/CodeGen/R600/split-scalar-i64-add.ll2
-rw-r--r--test/CodeGen/R600/sra.ll271
-rw-r--r--test/CodeGen/R600/srem.ll64
-rw-r--r--test/CodeGen/R600/srl.ll279
-rw-r--r--test/CodeGen/R600/ssubo.ll3
-rw-r--r--test/CodeGen/R600/store-barrier.ll4
-rw-r--r--test/CodeGen/R600/store-v3i32.ll3
-rw-r--r--test/CodeGen/R600/store-v3i64.ll3
-rw-r--r--test/CodeGen/R600/store-vector-ptrs.ll3
-rw-r--r--test/CodeGen/R600/store.ll328
-rw-r--r--test/CodeGen/R600/store.r600.ll10
-rw-r--r--test/CodeGen/R600/sub.ll87
-rw-r--r--test/CodeGen/R600/subreg-coalescer-crash.ll109
-rw-r--r--test/CodeGen/R600/swizzle-export.ll18
-rw-r--r--test/CodeGen/R600/trunc-cmp-constant.ll170
-rw-r--r--test/CodeGen/R600/trunc-store-i1.ll3
-rw-r--r--test/CodeGen/R600/trunc.ll35
-rw-r--r--test/CodeGen/R600/tti-unroll-prefs.ll58
-rw-r--r--test/CodeGen/R600/uaddo.ll3
-rw-r--r--test/CodeGen/R600/udiv.ll27
-rw-r--r--test/CodeGen/R600/udivrem.ll55
-rw-r--r--test/CodeGen/R600/udivrem24.ll3
-rw-r--r--test/CodeGen/R600/udivrem64.ll149
-rw-r--r--test/CodeGen/R600/uint_to_fp.f64.ll111
-rw-r--r--test/CodeGen/R600/uint_to_fp.ll36
-rw-r--r--test/CodeGen/R600/unaligned-load-store.ll207
-rw-r--r--test/CodeGen/R600/unhandled-loop-condition-assertion.ll3
-rw-r--r--test/CodeGen/R600/urecip.ll3
-rw-r--r--test/CodeGen/R600/urem.ll100
-rw-r--r--test/CodeGen/R600/use-sgpr-multiple-times.ll73
-rw-r--r--test/CodeGen/R600/usubo.ll5
-rw-r--r--test/CodeGen/R600/v_cndmask.ll3
-rw-r--r--test/CodeGen/R600/valu-i1.ll153
-rw-r--r--test/CodeGen/R600/vector-alloca.ll6
-rw-r--r--test/CodeGen/R600/vertex-fetch-encoding.ll16
-rw-r--r--test/CodeGen/R600/vop-shrink.ll3
-rw-r--r--test/CodeGen/R600/vselect.ll59
-rw-r--r--test/CodeGen/R600/wait.ll12
-rw-r--r--test/CodeGen/R600/work-item-intrinsics.ll72
-rw-r--r--test/CodeGen/R600/wrong-transalu-pos-fix.ll6
-rw-r--r--test/CodeGen/R600/xor.ll108
-rw-r--r--test/CodeGen/R600/zero_extend.ll31
-rw-r--r--test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll2
-rw-r--r--test/CodeGen/SPARC/float.ll4
-rw-r--r--test/CodeGen/SPARC/fp128.ll32
-rw-r--r--test/CodeGen/SPARC/inlineasm.ll2
-rw-r--r--test/CodeGen/SPARC/mult-alt-generic-sparc.ll2
-rw-r--r--test/CodeGen/SPARC/setjmp.ll10
-rw-r--r--test/CodeGen/SystemZ/alias-01.ll6
-rw-r--r--test/CodeGen/SystemZ/and-08.ll10
-rw-r--r--test/CodeGen/SystemZ/asm-01.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-02.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-03.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-04.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-05.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-06.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-07.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-08.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-09.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-10.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-11.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-12.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-13.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-14.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-15.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-16.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-17.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-18.ll2
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-04.ll2
-rw-r--r--test/CodeGen/SystemZ/int-cmp-44.ll2
-rw-r--r--test/CodeGen/SystemZ/int-cmp-45.ll2
-rw-r--r--test/CodeGen/SystemZ/memchr-02.ll2
-rw-r--r--test/CodeGen/SystemZ/memcpy-02.ll10
-rw-r--r--test/CodeGen/SystemZ/tls-01.ll6
-rw-r--r--test/CodeGen/SystemZ/tls-02.ll18
-rw-r--r--test/CodeGen/SystemZ/tls-03.ll23
-rw-r--r--test/CodeGen/SystemZ/tls-04.ll28
-rw-r--r--test/CodeGen/SystemZ/tls-05.ll15
-rw-r--r--test/CodeGen/SystemZ/tls-06.ll17
-rw-r--r--test/CodeGen/SystemZ/tls-07.ll16
-rw-r--r--test/CodeGen/Thumb/2010-07-15-debugOrdering.ll212
-rw-r--r--test/CodeGen/Thumb/fastcc.ll2
-rw-r--r--test/CodeGen/Thumb/iabs.ll2
-rw-r--r--test/CodeGen/Thumb/stack-access.ll74
-rw-r--r--test/CodeGen/Thumb/stm-merge.ll9
-rw-r--r--test/CodeGen/Thumb/vargs.ll16
-rw-r--r--test/CodeGen/Thumb2/aligned-spill.ll8
-rw-r--r--test/CodeGen/Thumb2/constant-islands-jump-table.ll47
-rw-r--r--test/CodeGen/Thumb2/constant-islands-new-island-padding.ll42
-rw-r--r--test/CodeGen/Thumb2/ifcvt-neon.ll6
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmn.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-spill-q.ll2
-rw-r--r--test/CodeGen/X86/2006-05-22-FPSetEQ.ll9
-rw-r--r--test/CodeGen/X86/2006-10-07-ScalarSSEMiscompile.ll15
-rw-r--r--test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll64
-rw-r--r--test/CodeGen/X86/2007-06-15-IntToMMX.ll19
-rw-r--r--test/CodeGen/X86/2008-10-06-MMXISelBug.ll12
-rw-r--r--test/CodeGen/X86/2009-01-25-NoSSE.ll4
-rw-r--r--test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll54
-rw-r--r--test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll9
-rw-r--r--test/CodeGen/X86/2009-06-07-ExpandMMXBitcast.ll10
-rw-r--r--test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll2
-rw-r--r--test/CodeGen/X86/2009-08-02-mmx-scalar-to-vector.ll12
-rw-r--r--test/CodeGen/X86/2009-10-16-Scope.ll22
-rw-r--r--test/CodeGen/X86/2010-01-18-DbgValue.ll46
-rw-r--r--test/CodeGen/X86/2010-02-01-DbgValueCrash.ll36
-rw-r--r--test/CodeGen/X86/2010-02-11-NonTemporal.ll2
-rw-r--r--test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll22
-rw-r--r--test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll100
-rw-r--r--test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll2
-rw-r--r--test/CodeGen/X86/2010-05-25-DotDebugLoc.ll142
-rw-r--r--test/CodeGen/X86/2010-05-26-DotDebugLoc.ll80
-rw-r--r--test/CodeGen/X86/2010-05-28-Crash.ll48
-rw-r--r--test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll74
-rw-r--r--test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll2
-rw-r--r--test/CodeGen/X86/2010-06-25-asm-RA-crash.ll2
-rw-r--r--test/CodeGen/X86/2010-06-28-FastAllocTiedOperand.ll2
-rw-r--r--test/CodeGen/X86/2010-07-06-DbgCrash.ll36
-rw-r--r--test/CodeGen/X86/2010-08-04-StackVariable.ll110
-rw-r--r--test/CodeGen/X86/2010-09-16-EmptyFilename.ll36
-rw-r--r--test/CodeGen/X86/2010-09-16-asmcrash.ll2
-rw-r--r--test/CodeGen/X86/2010-11-02-DbgParameter.ll42
-rw-r--r--test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll78
-rw-r--r--test/CodeGen/X86/2011-06-14-mmx-inlineasm.ll4
-rw-r--r--test/CodeGen/X86/2011-10-19-widen_vselect.ll2
-rw-r--r--test/CodeGen/X86/2011-11-30-or.ll14
-rw-r--r--test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll2
-rw-r--r--test/CodeGen/X86/2012-05-19-avx2-store.ll13
-rw-r--r--test/CodeGen/X86/2012-07-15-broadcastfold.ll1
-rw-r--r--test/CodeGen/X86/2012-11-30-handlemove-dbg.ll26
-rw-r--r--test/CodeGen/X86/2012-11-30-misched-dbg.ll68
-rw-r--r--test/CodeGen/X86/2012-11-30-regpres-dbg.ll22
-rw-r--r--test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll6
-rw-r--r--test/CodeGen/X86/MachineBranchProb.ll2
-rw-r--r--test/CodeGen/X86/MachineSink-DbgValue.ll52
-rw-r--r--test/CodeGen/X86/MergeConsecutiveStores.ll133
-rw-r--r--test/CodeGen/X86/StackColoring-dbg.ll14
-rw-r--r--test/CodeGen/X86/SwizzleShuff.ll15
-rw-r--r--test/CodeGen/X86/asm-label.ll6
-rw-r--r--test/CodeGen/X86/atomic16.ll24
-rw-r--r--test/CodeGen/X86/avx-cvt.ll17
-rw-r--r--test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll14
-rw-r--r--test/CodeGen/X86/avx-intrinsics-x86.ll32
-rw-r--r--test/CodeGen/X86/avx-splat.ll6
-rw-r--r--test/CodeGen/X86/avx-trunc.ll6
-rw-r--r--test/CodeGen/X86/avx-vperm2x128.ll19
-rw-r--r--test/CodeGen/X86/avx.ll2
-rw-r--r--test/CodeGen/X86/avx1-stack-reload-folding.ll68
-rw-r--r--test/CodeGen/X86/avx2-conversions.ll2
-rw-r--r--test/CodeGen/X86/avx2-gather.ll27
-rw-r--r--test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll31
-rw-r--r--test/CodeGen/X86/avx2-intrinsics-x86.ll32
-rw-r--r--test/CodeGen/X86/avx2-nontemporal.ll2
-rw-r--r--test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll110
-rw-r--r--test/CodeGen/X86/avx2-vbroadcast.ll2
-rw-r--r--test/CodeGen/X86/avx512-arith.ll190
-rw-r--r--test/CodeGen/X86/avx512-fma-intrinsics.ll351
-rwxr-xr-xtest/CodeGen/X86/avx512-i1test.ll45
-rw-r--r--test/CodeGen/X86/avx512-insert-extract.ll19
-rw-r--r--test/CodeGen/X86/avx512-intel-ocl.ll105
-rw-r--r--test/CodeGen/X86/avx512-intrinsics.ll692
-rw-r--r--test/CodeGen/X86/avx512-logic.ll101
-rw-r--r--test/CodeGen/X86/avx512-mask-op.ll84
-rw-r--r--test/CodeGen/X86/avx512-nontemporal.ll2
-rw-r--r--test/CodeGen/X86/avx512-round.ll106
-rw-r--r--test/CodeGen/X86/avx512-vbroadcast.ll120
-rw-r--r--test/CodeGen/X86/avx512-vec-cmp.ll18
-rw-r--r--test/CodeGen/X86/avx512bw-arith.ll102
-rw-r--r--test/CodeGen/X86/avx512bw-intrinsics.ll186
-rw-r--r--test/CodeGen/X86/avx512bw-vec-cmp.ll8
-rw-r--r--test/CodeGen/X86/avx512bwvl-arith.ll206
-rw-r--r--test/CodeGen/X86/avx512bwvl-intrinsics.ll657
-rw-r--r--test/CodeGen/X86/avx512er-intrinsics.ll41
-rw-r--r--test/CodeGen/X86/avx512vl-arith.ll794
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics.ll555
-rw-r--r--test/CodeGen/X86/avx512vl-logic.ll137
-rw-r--r--test/CodeGen/X86/avx512vl-nontemporal.ll2
-rw-r--r--test/CodeGen/X86/avx512vl-vec-cmp.ll16
-rw-r--r--test/CodeGen/X86/barrier.ll3
-rw-r--r--test/CodeGen/X86/bitcast-mmx.ll77
-rw-r--r--test/CodeGen/X86/block-placement.ll6
-rw-r--r--test/CodeGen/X86/break-avx-dep.ll29
-rw-r--r--test/CodeGen/X86/break-false-dep.ll201
-rw-r--r--test/CodeGen/X86/break-sse-dep.ll62
-rw-r--r--test/CodeGen/X86/bswap-vector.ll366
-rw-r--r--test/CodeGen/X86/chain_order.ll16
-rw-r--r--test/CodeGen/X86/clobber-fi0.ll2
-rw-r--r--test/CodeGen/X86/cmov.ll2
-rw-r--r--test/CodeGen/X86/cmpxchg-clobber-flags.ll29
-rw-r--r--test/CodeGen/X86/coalesce_commute_subreg.ll51
-rw-r--r--test/CodeGen/X86/coalescer-dce.ll2
-rw-r--r--test/CodeGen/X86/codegen-prepare-extload.ll348
-rw-r--r--test/CodeGen/X86/coff-comdat.ll50
-rw-r--r--test/CodeGen/X86/coff-comdat2.ll2
-rw-r--r--test/CodeGen/X86/coff-comdat3.ll2
-rw-r--r--test/CodeGen/X86/combine-and.ll148
-rw-r--r--test/CodeGen/X86/combine-or.ll46
-rw-r--r--test/CodeGen/X86/commute-clmul.ll60
-rw-r--r--test/CodeGen/X86/commute-fcmp.ll340
-rw-r--r--test/CodeGen/X86/commute-xop.ll184
-rw-r--r--test/CodeGen/X86/compact-unwind.ll88
-rw-r--r--test/CodeGen/X86/constant-combines.ll35
-rw-r--r--test/CodeGen/X86/constant-hoisting-optnone.ll21
-rw-r--r--test/CodeGen/X86/copysign-constant-magnitude.ll105
-rw-r--r--test/CodeGen/X86/copysign-zero.ll14
-rw-r--r--test/CodeGen/X86/cppeh-catch-all.ll83
-rw-r--r--test/CodeGen/X86/cppeh-catch-scalar.ll123
-rw-r--r--test/CodeGen/X86/cppeh-frame-vars.ll261
-rw-r--r--test/CodeGen/X86/cpus.ll35
-rw-r--r--test/CodeGen/X86/crash-O0.ll22
-rw-r--r--test/CodeGen/X86/crash.ll4
-rw-r--r--test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll214
-rw-r--r--test/CodeGen/X86/dbg-changes-codegen.ll14
-rw-r--r--test/CodeGen/X86/dbg-combine.ll113
-rw-r--r--test/CodeGen/X86/dllexport-x86_64.ll7
-rw-r--r--test/CodeGen/X86/dllexport.ll12
-rw-r--r--test/CodeGen/X86/dwarf-comp-dir.ll14
-rw-r--r--test/CodeGen/X86/dwarf-eh-prepare.ll51
-rw-r--r--test/CodeGen/X86/elf-comdat.ll4
-rw-r--r--test/CodeGen/X86/elf-comdat2.ll2
-rw-r--r--test/CodeGen/X86/equiv_with_fndef.ll10
-rw-r--r--test/CodeGen/X86/equiv_with_vardef.ll8
-rw-r--r--test/CodeGen/X86/extractelement-load.ll14
-rw-r--r--test/CodeGen/X86/f16c-intrinsics.ll17
-rw-r--r--test/CodeGen/X86/fast-isel-branch_weights.ll2
-rw-r--r--test/CodeGen/X86/fast-isel-call-bool.ll18
-rw-r--r--test/CodeGen/X86/fast-isel-cmp-branch.ll2
-rw-r--r--test/CodeGen/X86/fast-isel-double-half-convertion.ll23
-rw-r--r--test/CodeGen/X86/fast-isel-float-half-convertion.ll28
-rw-r--r--test/CodeGen/X86/fast-isel-fptrunc-fpext.ll65
-rw-r--r--test/CodeGen/X86/fast-isel-gep.ll2
-rw-r--r--test/CodeGen/X86/fast-isel-int-float-conversion.ll45
-rw-r--r--test/CodeGen/X86/fastmath-float-half-conversion.ll52
-rw-r--r--test/CodeGen/X86/float-conv-elim.ll32
-rw-r--r--test/CodeGen/X86/fold-load-unops.ll57
-rw-r--r--test/CodeGen/X86/fold-tied-op.ll168
-rw-r--r--test/CodeGen/X86/fold-vex.ll39
-rw-r--r--test/CodeGen/X86/force-align-stack-alloca.ll4
-rw-r--r--test/CodeGen/X86/fp-double-rounding.ll31
-rw-r--r--test/CodeGen/X86/fpstack-debuginstr-kill.ll54
-rw-r--r--test/CodeGen/X86/frameaddr.ll28
-rw-r--r--test/CodeGen/X86/frameallocate.ll43
-rw-r--r--test/CodeGen/X86/gather-addresses.ll83
-rw-r--r--test/CodeGen/X86/gcc_except_table.ll2
-rw-r--r--test/CodeGen/X86/ghc-cc.ll10
-rw-r--r--test/CodeGen/X86/ghc-cc64.ll10
-rw-r--r--test/CodeGen/X86/global-sections-comdat.ll46
-rw-r--r--test/CodeGen/X86/global-sections.ll92
-rw-r--r--test/CodeGen/X86/hoist-invariant-load.ll2
-rw-r--r--test/CodeGen/X86/huge-stack-offset.ll59
-rw-r--r--test/CodeGen/X86/i1narrowfail.ll10
-rw-r--r--test/CodeGen/X86/ident-metadata.ll4
-rw-r--r--test/CodeGen/X86/imul.ll110
-rw-r--r--test/CodeGen/X86/imul64-lea.ll25
-rw-r--r--test/CodeGen/X86/inalloca-ctor.ll8
-rw-r--r--test/CodeGen/X86/inalloca-invoke.ll4
-rw-r--r--test/CodeGen/X86/inalloca-stdcall.ll3
-rw-r--r--test/CodeGen/X86/init-priority.ll51
-rw-r--r--test/CodeGen/X86/inline-asm-flag-clobber.ll2
-rw-r--r--test/CodeGen/X86/insertps-O0-bug.ll52
-rw-r--r--test/CodeGen/X86/large-code-model-isel.ll13
-rw-r--r--test/CodeGen/X86/lea-2.ll2
-rw-r--r--test/CodeGen/X86/logical-load-fold.ll53
-rw-r--r--test/CodeGen/X86/lower-vec-shift-2.ll157
-rw-r--r--test/CodeGen/X86/lzcnt-tzcnt.ll131
-rw-r--r--test/CodeGen/X86/macho-comdat.ll2
-rw-r--r--test/CodeGen/X86/masked_memop.ll219
-rw-r--r--test/CodeGen/X86/mem-intrin-base-reg.ll2
-rw-r--r--test/CodeGen/X86/misched-code-difference-with-debug.ll90
-rw-r--r--test/CodeGen/X86/misched-copy.ll8
-rw-r--r--test/CodeGen/X86/misched-crash.ll2
-rw-r--r--test/CodeGen/X86/mmx-arg-passing-x86-64.ll56
-rw-r--r--test/CodeGen/X86/mmx-arg-passing.ll45
-rw-r--r--test/CodeGen/X86/mmx-arg-passing2.ll28
-rw-r--r--test/CodeGen/X86/mmx-arith.ll543
-rw-r--r--test/CodeGen/X86/mmx-bitcast-to-i64.ll31
-rw-r--r--test/CodeGen/X86/mmx-bitcast.ll109
-rw-r--r--test/CodeGen/X86/mmx-emms.ll11
-rw-r--r--test/CodeGen/X86/mmx-fold-load.ll282
-rw-r--r--test/CodeGen/X86/mmx-insert-element.ll9
-rw-r--r--test/CodeGen/X86/mmx-intrinsics.ll (renamed from test/CodeGen/X86/mmx-builtins.ll)9
-rw-r--r--test/CodeGen/X86/mmx-pinsrw.ll17
-rw-r--r--test/CodeGen/X86/mmx-punpckhdq.ll31
-rw-r--r--test/CodeGen/X86/mmx-s2v.ll15
-rw-r--r--test/CodeGen/X86/mmx-shift.ll39
-rw-r--r--test/CodeGen/X86/mmx-shuffle.ll31
-rw-r--r--test/CodeGen/X86/movntdq-no-avx.ll2
-rw-r--r--test/CodeGen/X86/movtopush.ll346
-rw-r--r--test/CodeGen/X86/musttail-fastcall.ll109
-rw-r--r--test/CodeGen/X86/musttail-varargs.ll21
-rw-r--r--test/CodeGen/X86/named-reg-alloc.ll2
-rw-r--r--test/CodeGen/X86/named-reg-notareg.ll2
-rw-r--r--test/CodeGen/X86/no-compact-unwind.ll64
-rw-r--r--test/CodeGen/X86/non-unique-sections.ll15
-rw-r--r--test/CodeGen/X86/nontemporal-2.ll2
-rw-r--r--test/CodeGen/X86/nontemporal.ll2
-rw-r--r--test/CodeGen/X86/norex-subreg.ll4
-rw-r--r--test/CodeGen/X86/nosse-varargs.ll7
-rw-r--r--test/CodeGen/X86/null-streamer.ll26
-rw-r--r--test/CodeGen/X86/objc-gc-module-flags.ll8
-rw-r--r--test/CodeGen/X86/odr_comdat.ll16
-rw-r--r--test/CodeGen/X86/palignr.ll4
-rw-r--r--test/CodeGen/X86/peep-test-2.ll2
-rw-r--r--test/CodeGen/X86/phys_subreg_coalesce-3.ll2
-rw-r--r--test/CodeGen/X86/pic_jumptable.ll2
-rw-r--r--test/CodeGen/X86/pmul.ll121
-rw-r--r--test/CodeGen/X86/pointer-vector.ll3
-rw-r--r--test/CodeGen/X86/pr11468.ll2
-rw-r--r--test/CodeGen/X86/pr12360.ll2
-rw-r--r--test/CodeGen/X86/pr15267.ll75
-rw-r--r--test/CodeGen/X86/pr18846.ll12
-rw-r--r--test/CodeGen/X86/pr21792.ll41
-rw-r--r--test/CodeGen/X86/pr22019.ll23
-rw-r--r--test/CodeGen/X86/pr22103.ll19
-rw-r--r--test/CodeGen/X86/pre-ra-sched.ll2
-rw-r--r--test/CodeGen/X86/prefixdata.ll9
-rw-r--r--test/CodeGen/X86/prologuedata.ll17
-rw-r--r--test/CodeGen/X86/pshufb-mask-comments.ll22
-rw-r--r--test/CodeGen/X86/psubus.ll316
-rw-r--r--test/CodeGen/X86/ragreedy-bug.ll48
-rw-r--r--test/CodeGen/X86/ragreedy-hoist-spill.ll19
-rw-r--r--test/CodeGen/X86/regalloc-reconcile-broken-hints.ll145
-rw-r--r--test/CodeGen/X86/remat-phys-dead.ll2
-rw-r--r--test/CodeGen/X86/scalar_sse_minmax.ll61
-rw-r--r--test/CodeGen/X86/scev-interchange.ll2
-rw-r--r--test/CodeGen/X86/segmented-stacks.ll123
-rw-r--r--test/CodeGen/X86/seh-basic.ll175
-rw-r--r--test/CodeGen/X86/seh-catch-all.ll33
-rw-r--r--test/CodeGen/X86/seh-filter.ll21
-rwxr-xr-xtest/CodeGen/X86/seh-finally.ll45
-rw-r--r--test/CodeGen/X86/seh-safe-div.ll197
-rw-r--r--test/CodeGen/X86/selectiondag-crash.ll15
-rw-r--r--test/CodeGen/X86/shrink-compare.ll148
-rw-r--r--test/CodeGen/X86/sibcall-4.ll4
-rw-r--r--test/CodeGen/X86/sibcall-5.ll2
-rw-r--r--test/CodeGen/X86/sibcall-win64.ll42
-rw-r--r--test/CodeGen/X86/sibcall.ll87
-rw-r--r--test/CodeGen/X86/sincos-opt.ll5
-rw-r--r--test/CodeGen/X86/sink-blockfreq.ll6
-rw-r--r--test/CodeGen/X86/sink-hoist.ll2
-rw-r--r--test/CodeGen/X86/sjlj-baseptr.ll37
-rw-r--r--test/CodeGen/X86/slow-div.ll28
-rw-r--r--test/CodeGen/X86/slow-incdec.ll8
-rw-r--r--test/CodeGen/X86/small-byval-memcpy.ll41
-rw-r--r--test/CodeGen/X86/splat-const.ll40
-rw-r--r--test/CodeGen/X86/sret-implicit.ll10
-rw-r--r--test/CodeGen/X86/sse-domains.ll42
-rw-r--r--test/CodeGen/X86/sse-minmax.ll152
-rw-r--r--test/CodeGen/X86/sse-scalar-fp-arith.ll149
-rw-r--r--test/CodeGen/X86/sse-unaligned-mem-feature.ll (renamed from test/CodeGen/X86/2010-01-07-UAMemFeature.ll)6
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll31
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-x86.ll32
-rw-r--r--test/CodeGen/X86/sse2.ll16
-rw-r--r--test/CodeGen/X86/sse3.ll51
-rw-r--r--test/CodeGen/X86/sse41-pmovxrm-intrinsics.ll123
-rw-r--r--test/CodeGen/X86/sse41.ll288
-rw-r--r--test/CodeGen/X86/sse4a.ll1
-rw-r--r--test/CodeGen/X86/sse_partial_update.ll66
-rw-r--r--test/CodeGen/X86/stack-align.ll22
-rw-r--r--test/CodeGen/X86/stack-folding-fp-avx1.ll1811
-rw-r--r--test/CodeGen/X86/stack-folding-fp-sse42.ll1089
-rw-r--r--test/CodeGen/X86/stack-folding-int-avx1.ll1152
-rw-r--r--test/CodeGen/X86/stack-folding-int-avx2.ll1200
-rw-r--r--test/CodeGen/X86/stack-folding-int-sse42.ll1143
-rw-r--r--test/CodeGen/X86/stack-folding-xop.ll718
-rw-r--r--test/CodeGen/X86/stack-probe-size.ll78
-rw-r--r--test/CodeGen/X86/stack-protector-dbginfo.ll144
-rw-r--r--test/CodeGen/X86/stack-protector-weight.ll36
-rw-r--r--test/CodeGen/X86/stackpointer.ll2
-rw-r--r--test/CodeGen/X86/statepoint-call-lowering.ll104
-rw-r--r--test/CodeGen/X86/statepoint-forward.ll107
-rw-r--r--test/CodeGen/X86/statepoint-stack-usage.ll60
-rw-r--r--test/CodeGen/X86/statepoint-stackmap-format.ll109
-rw-r--r--test/CodeGen/X86/switch-bt.ll58
-rw-r--r--test/CodeGen/X86/switch-default-only.ll14
-rw-r--r--test/CodeGen/X86/switch-jump-table.ll52
-rw-r--r--test/CodeGen/X86/tail-call-win64.ll36
-rw-r--r--test/CodeGen/X86/tailcall-64.ll4
-rw-r--r--test/CodeGen/X86/tailcall-returndup-void.ll10
-rw-r--r--test/CodeGen/X86/tls-models.ll8
-rw-r--r--test/CodeGen/X86/trap.ll20
-rw-r--r--test/CodeGen/X86/uint_to_fp-2.ll2
-rw-r--r--test/CodeGen/X86/unaligned-32-byte-memops.ll288
-rw-r--r--test/CodeGen/X86/unknown-location.ll26
-rw-r--r--test/CodeGen/X86/utf16-cfstrings.ll8
-rw-r--r--test/CodeGen/X86/v2f32.ll6
-rw-r--r--test/CodeGen/X86/vaargs.ll2
-rw-r--r--test/CodeGen/X86/vec-loadsingles-alignment.ll35
-rw-r--r--test/CodeGen/X86/vec_cast2.ll33
-rw-r--r--test/CodeGen/X86/vec_clear.ll13
-rw-r--r--test/CodeGen/X86/vec_compare.ll52
-rw-r--r--test/CodeGen/X86/vec_extract-avx.ll82
-rw-r--r--test/CodeGen/X86/vec_extract-mmx.ll71
-rw-r--r--test/CodeGen/X86/vec_fabs.ll2
-rw-r--r--test/CodeGen/X86/vec_fneg.ll2
-rw-r--r--test/CodeGen/X86/vec_insert-5.ll36
-rw-r--r--test/CodeGen/X86/vec_insert-mmx.ll58
-rw-r--r--test/CodeGen/X86/vec_loadsingles.ll153
-rw-r--r--test/CodeGen/X86/vec_split.ll6
-rw-r--r--test/CodeGen/X86/vector-blend.ll391
-rw-r--r--test/CodeGen/X86/vector-ctpop.ll159
-rw-r--r--test/CodeGen/X86/vector-idiv.ll436
-rw-r--r--test/CodeGen/X86/vector-sext.ll226
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v16.ll636
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v2.ll187
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v4.ll819
-rw-r--r--test/CodeGen/X86/vector-shuffle-128-v8.ll664
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v16.ll475
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v32.ll728
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v4.ll268
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v8.ll505
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v16.ll40
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v8.ll172
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining.ll1154
-rw-r--r--test/CodeGen/X86/vector-shuffle-mmx.ll106
-rw-r--r--test/CodeGen/X86/vector-shuffle-sse1.ll34
-rw-r--r--test/CodeGen/X86/vector-trunc.ll223
-rw-r--r--test/CodeGen/X86/vector-zext.ll371
-rw-r--r--test/CodeGen/X86/vector-zmov.ll37
-rw-r--r--test/CodeGen/X86/viabs.ll8
-rw-r--r--test/CodeGen/X86/vselect-2.ll53
-rw-r--r--test/CodeGen/X86/vselect-avx.ll31
-rw-r--r--test/CodeGen/X86/vselect-minmax.ll2790
-rw-r--r--test/CodeGen/X86/vselect.ll47
-rw-r--r--test/CodeGen/X86/vshift-4.ll2
-rw-r--r--test/CodeGen/X86/vshift-6.ll2
-rw-r--r--test/CodeGen/X86/widen_conversions.ll2
-rw-r--r--test/CodeGen/X86/widen_load-0.ll2
-rw-r--r--test/CodeGen/X86/widen_load-1.ll4
-rw-r--r--test/CodeGen/X86/widen_load-2.ll20
-rw-r--r--test/CodeGen/X86/widen_shuffle-1.ll4
-rw-r--r--test/CodeGen/X86/win64_alloca_dynalloca.ll38
-rw-r--r--test/CodeGen/X86/win64_call_epi.ll2
-rw-r--r--test/CodeGen/X86/win64_eh.ll41
-rw-r--r--test/CodeGen/X86/win64_frame.ll122
-rw-r--r--test/CodeGen/X86/win_chkstk.ll5
-rw-r--r--test/CodeGen/X86/win_cst_pool.ll10
-rw-r--r--test/CodeGen/X86/win_eh_prepare.ll80
-rw-r--r--test/CodeGen/X86/x32-lea-1.ll10
-rw-r--r--test/CodeGen/X86/x86-64-and-mask.ll2
-rw-r--r--test/CodeGen/X86/x86-64-baseptr.ll26
-rw-r--r--test/CodeGen/X86/x86-64-psub.ll2
-rw-r--r--test/CodeGen/X86/x86-inline-asm-validation.ll34
-rw-r--r--test/CodeGen/X86/x86-shifts.ll2
-rw-r--r--test/CodeGen/X86/xaluo.ll2
-rw-r--r--test/CodeGen/X86/xop-intrinsics-x86_64.ll130
-rw-r--r--test/CodeGen/X86/xor.ll6
-rw-r--r--test/CodeGen/XCore/dwarf_debug.ll28
1182 files changed, 65040 insertions, 12539 deletions
diff --git a/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll b/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll
index 4da33a0..73ee522 100644
--- a/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll
+++ b/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll
@@ -16,7 +16,7 @@ for.body: ; preds = %for.body, %entry
%add53 = add nsw i64 %n1, 0, !dbg !52
%add55 = add nsw i64 %n1, 0, !dbg !53
%mul63 = mul nsw i64 %add53, -20995, !dbg !54
- tail call void @llvm.dbg.value(metadata !{i64 %mul63}, i64 0, metadata !30, metadata !{metadata !"0x102"}), !dbg !55
+ tail call void @llvm.dbg.value(metadata i64 %mul63, i64 0, metadata !30, metadata !{!"0x102"}), !dbg !55
%mul65 = mul nsw i64 %add55, -3196, !dbg !56
%add67 = add nsw i64 0, %mul65, !dbg !57
%add80 = add i64 0, 1024, !dbg !58
@@ -44,63 +44,63 @@ attributes #1 = { nounwind readnone }
!llvm.module.flags = !{!36, !37}
!llvm.ident = !{!38}
-!0 = metadata !{metadata !"0x11\0012\00clang version 3.6.0 \001\00\000\00\001", metadata !1, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2} ; [ DW_TAG_compile_unit ] [] [] []
-!1 = metadata !{metadata !"test.c", metadata !""}
-!2 = metadata !{}
-!3 = metadata !{metadata !4}
-!4 = metadata !{metadata !"0x2e\00\00\00\00140\000\001\000\006\00256\001\00141", metadata !1, metadata !5, metadata !6, null, void ()* @test, null, null, metadata !12} ; [ DW_TAG_subprogram ] [] [] [def] [scope 141] []
-!5 = metadata !{metadata !"0x29", metadata !1} ; [ DW_TAG_file_type ] [] []
-!6 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !7, null, null, null} ; [ DW_TAG_subroutine_type ] [] [] [from ]
-!7 = metadata !{null, metadata !8}
-!8 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !9} ; [ DW_TAG_pointer_type ] [] [] []
-!9 = metadata !{metadata !"0x16\00\0030\000\000\000\000", metadata !10, null, metadata !11} ; [ DW_TAG_typedef ] [] [] [] [from int]
-!10 = metadata !{metadata !"", metadata !""}
-!11 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [] [int] []
-!12 = metadata !{metadata !13, metadata !14, metadata !18, metadata !19, metadata !20, metadata !21, metadata !22, metadata !23, metadata !24, metadata !25, metadata !26, metadata !27, metadata !28, metadata !29, metadata !30, metadata !31, metadata !32, metadata !33, metadata !34, metadata !35}
-!13 = metadata !{metadata !"0x101\00\0016777356\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_arg_variable ] [] [data] []
-!14 = metadata !{metadata !"0x100\00\00142\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [] [] []
-!15 = metadata !{metadata !"0x16\00\00183\000\000\000\000", metadata !16, null, metadata !17} ; [ DW_TAG_typedef ] [] [INT32] [] [from long int]
-!16 = metadata !{metadata !"", metadata !""}
-!17 = metadata !{metadata !"0x24\00\000\0064\0064\000\000\005", null, null} ; [ DW_TAG_base_type ] [] [long int] []
-!18 = metadata !{metadata !"0x100\00\00142\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [] [] []
-!19 = metadata !{metadata !"0x100\00\00142\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [] [] []
-!20 = metadata !{metadata !"0x100\00\00142\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [] [] []
-!21 = metadata !{metadata !"0x100\00\00142\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [] [] []
-!22 = metadata !{metadata !"0x100\00\00142\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [] [] []
-!23 = metadata !{metadata !"0x100\00\00142\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [] [] []
-!24 = metadata !{metadata !"0x100\00\00142\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [ ] [] []
-!25 = metadata !{metadata !"0x100\00\00143\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [ ] [] []
-!26 = metadata !{metadata !"0x100\00\00143\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [ ] [] []
-!27 = metadata !{metadata !"0x100\00\00143\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [ ] [] []
-!28 = metadata !{metadata !"0x100\00\00143\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [ ] [] []
-!29 = metadata !{metadata !"0x100\00\00144\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [ ] [] []
-!30 = metadata !{metadata !"0x100\00\00144\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [ ] [] []
-!31 = metadata !{metadata !"0x100\00\00144\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [ ] [] []
-!32 = metadata !{metadata !"0x100\00\00144\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [ ] [] []
-!33 = metadata !{metadata !"0x100\00\00144\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [ ] [] []
-!34 = metadata !{metadata !"0x100\00\00145\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [ ] [] []
-!35 = metadata !{metadata !"0x100\00\00146\000", metadata !4, metadata !5, metadata !11} ; [ DW_TAG_auto_variable ] [ ] [] []
-!36 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
-!37 = metadata !{i32 2, metadata !"Debug Info Version", i32 2}
-!38 = metadata !{metadata !"clang version 3.6.0 "}
-!39 = metadata !{i32 154, i32 8, metadata !40, null}
-!40 = metadata !{metadata !"0xb\00154\008\002", metadata !1, metadata !41} ; [ DW_TAG_lexical_block ] [ ] []
-!41 = metadata !{metadata !"0xb\00154\008\001", metadata !1, metadata !42} ; [ DW_TAG_lexical_block ] [ ] []
-!42 = metadata !{metadata !"0xb\00154\003\000", metadata !1, metadata !4} ; [ DW_TAG_lexical_block ] [ ] []
-!43 = metadata !{i32 157, i32 5, metadata !44, null}
-!44 = metadata !{metadata !"0xb\00154\0042\000", metadata !1, metadata !42} ; [ DW_TAG_lexical_block ] [ ] []
-!45 = metadata !{i32 159, i32 5, metadata !44, null}
-!46 = metadata !{metadata !47, metadata !47, i64 0}
-!47 = metadata !{metadata !"int", metadata !48, i64 0}
-!48 = metadata !{metadata !"omnipotent char", metadata !49, i64 0}
-!49 = metadata !{metadata !"Simple C/C++ TBAA"}
-!50 = metadata !{i32 160, i32 5, metadata !44, null}
-!51 = metadata !{i32 161, i32 5, metadata !44, null}
-!52 = metadata !{i32 188, i32 5, metadata !44, null}
-!53 = metadata !{i32 190, i32 5, metadata !44, null}
-!54 = metadata !{i32 198, i32 5, metadata !44, null}
-!55 = metadata !{i32 144, i32 13, metadata !4, null}
-!56 = metadata !{i32 200, i32 5, metadata !44, null}
-!57 = metadata !{i32 203, i32 5, metadata !44, null}
-!58 = metadata !{i32 207, i32 5, metadata !44, null}
-!59 = metadata !{i32 208, i32 5, metadata !44, null}
+!0 = !{!"0x11\0012\00clang version 3.6.0 \001\00\000\00\001", !1, !2, !2, !3, !2, !2} ; [ DW_TAG_compile_unit ] [] [] []
+!1 = !{!"test.c", !""}
+!2 = !{}
+!3 = !{!4}
+!4 = !{!"0x2e\00\00\00\00140\000\001\000\006\00256\001\00141", !1, !5, !6, null, void ()* @test, null, null, !12} ; [ DW_TAG_subprogram ] [] [] [def] [scope 141] []
+!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [] []
+!6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [] [] [from ]
+!7 = !{null, !8}
+!8 = !{!"0xf\00\000\0064\0064\000\000", null, null, !9} ; [ DW_TAG_pointer_type ] [] [] []
+!9 = !{!"0x16\00\0030\000\000\000\000", !10, null, !11} ; [ DW_TAG_typedef ] [] [] [] [from int]
+!10 = !{!"", !""}
+!11 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [] [int] []
+!12 = !{!13, !14, !18, !19, !20, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30, !31, !32, !33, !34, !35}
+!13 = !{!"0x101\00\0016777356\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [] [data] []
+!14 = !{!"0x100\00\00142\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [] [] []
+!15 = !{!"0x16\00\00183\000\000\000\000", !16, null, !17} ; [ DW_TAG_typedef ] [] [INT32] [] [from long int]
+!16 = !{!"", !""}
+!17 = !{!"0x24\00\000\0064\0064\000\000\005", null, null} ; [ DW_TAG_base_type ] [] [long int] []
+!18 = !{!"0x100\00\00142\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [] [] []
+!19 = !{!"0x100\00\00142\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [] [] []
+!20 = !{!"0x100\00\00142\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [] [] []
+!21 = !{!"0x100\00\00142\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [] [] []
+!22 = !{!"0x100\00\00142\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [] [] []
+!23 = !{!"0x100\00\00142\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [] [] []
+!24 = !{!"0x100\00\00142\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [ ] [] []
+!25 = !{!"0x100\00\00143\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [ ] [] []
+!26 = !{!"0x100\00\00143\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [ ] [] []
+!27 = !{!"0x100\00\00143\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [ ] [] []
+!28 = !{!"0x100\00\00143\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [ ] [] []
+!29 = !{!"0x100\00\00144\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [ ] [] []
+!30 = !{!"0x100\00\00144\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [ ] [] []
+!31 = !{!"0x100\00\00144\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [ ] [] []
+!32 = !{!"0x100\00\00144\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [ ] [] []
+!33 = !{!"0x100\00\00144\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [ ] [] []
+!34 = !{!"0x100\00\00145\000", !4, !5, !8} ; [ DW_TAG_auto_variable ] [ ] [] []
+!35 = !{!"0x100\00\00146\000", !4, !5, !11} ; [ DW_TAG_auto_variable ] [ ] [] []
+!36 = !{i32 2, !"Dwarf Version", i32 4}
+!37 = !{i32 2, !"Debug Info Version", i32 2}
+!38 = !{!"clang version 3.6.0 "}
+!39 = !MDLocation(line: 154, column: 8, scope: !40)
+!40 = !{!"0xb\00154\008\002", !1, !41} ; [ DW_TAG_lexical_block ] [ ] []
+!41 = !{!"0xb\00154\008\001", !1, !42} ; [ DW_TAG_lexical_block ] [ ] []
+!42 = !{!"0xb\00154\003\000", !1, !4} ; [ DW_TAG_lexical_block ] [ ] []
+!43 = !MDLocation(line: 157, column: 5, scope: !44)
+!44 = !{!"0xb\00154\0042\000", !1, !42} ; [ DW_TAG_lexical_block ] [ ] []
+!45 = !MDLocation(line: 159, column: 5, scope: !44)
+!46 = !{!47, !47, i64 0}
+!47 = !{!"int", !48, i64 0}
+!48 = !{!"omnipotent char", !49, i64 0}
+!49 = !{!"Simple C/C++ TBAA"}
+!50 = !MDLocation(line: 160, column: 5, scope: !44)
+!51 = !MDLocation(line: 161, column: 5, scope: !44)
+!52 = !MDLocation(line: 188, column: 5, scope: !44)
+!53 = !MDLocation(line: 190, column: 5, scope: !44)
+!54 = !MDLocation(line: 198, column: 5, scope: !44)
+!55 = !MDLocation(line: 144, column: 13, scope: !4)
+!56 = !MDLocation(line: 200, column: 5, scope: !44)
+!57 = !MDLocation(line: 203, column: 5, scope: !44)
+!58 = !MDLocation(line: 207, column: 5, scope: !44)
+!59 = !MDLocation(line: 208, column: 5, scope: !44)
diff --git a/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll b/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll
new file mode 100644
index 0000000..4553251
--- /dev/null
+++ b/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll
@@ -0,0 +1,16 @@
+;RUN: llc <%s -mattr=-neon -mattr=-fp-armv8 | FileCheck %s
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64"
+
+@t = common global i32 0, align 4
+@x = common global i32 0, align 4
+
+define void @foo() {
+entry:
+;CHECK-LABEL: foo:
+;CHECK: __floatsisf
+ %0 = load i32* @x, align 4
+ %conv = sitofp i32 %0 to float
+ store float %conv, float* bitcast (i32* @t to float*), align 4
+ ret void
+}
diff --git a/test/CodeGen/AArch64/addsub-shifted.ll b/test/CodeGen/AArch64/addsub-shifted.ll
index 0a93edd..1d963f4 100644
--- a/test/CodeGen/AArch64/addsub-shifted.ll
+++ b/test/CodeGen/AArch64/addsub-shifted.ll
@@ -190,7 +190,7 @@ define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
; CHECK: ret
}
-define i32 @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
+define void @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64, i32 %v) {
; CHECK-LABEL: test_cmp:
%shift1 = shl i32 %rhs32, 13
@@ -199,40 +199,46 @@ define i32 @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsl #13
t2:
+ store volatile i32 %v, i32* @var32
%shift2 = lshr i32 %rhs32, 20
%tst2 = icmp ne i32 %lhs32, %shift2
br i1 %tst2, label %t3, label %end
; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
t3:
+ store volatile i32 %v, i32* @var32
%shift3 = ashr i32 %rhs32, 9
%tst3 = icmp ne i32 %lhs32, %shift3
br i1 %tst3, label %t4, label %end
; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, asr #9
t4:
+ store volatile i32 %v, i32* @var32
%shift4 = shl i64 %rhs64, 43
%tst4 = icmp uge i64 %lhs64, %shift4
br i1 %tst4, label %t5, label %end
; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsl #43
t5:
+ store volatile i32 %v, i32* @var32
%shift5 = lshr i64 %rhs64, 20
%tst5 = icmp ne i64 %lhs64, %shift5
br i1 %tst5, label %t6, label %end
; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
t6:
+ store volatile i32 %v, i32* @var32
%shift6 = ashr i64 %rhs64, 59
%tst6 = icmp ne i64 %lhs64, %shift6
br i1 %tst6, label %t7, label %end
; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, asr #59
t7:
- ret i32 1
-end:
+ store volatile i32 %v, i32* @var32
+ br label %end
- ret i32 0
+end:
+ ret void
; CHECK: ret
}
diff --git a/test/CodeGen/AArch64/analyze-branch.ll b/test/CodeGen/AArch64/analyze-branch.ll
index 6616b27..932cd75 100644
--- a/test/CodeGen/AArch64/analyze-branch.ll
+++ b/test/CodeGen/AArch64/analyze-branch.ll
@@ -7,8 +7,8 @@ declare void @test_true()
declare void @test_false()
; !0 corresponds to a branch being taken, !1 to not being takne.
-!0 = metadata !{metadata !"branch_weights", i32 64, i32 4}
-!1 = metadata !{metadata !"branch_weights", i32 4, i32 64}
+!0 = !{!"branch_weights", i32 64, i32 4}
+!1 = !{!"branch_weights", i32 4, i32 64}
define void @test_Bcc_fallthrough_taken(i32 %in) nounwind {
; CHECK-LABEL: test_Bcc_fallthrough_taken:
diff --git a/test/CodeGen/AArch64/analyzecmp.ll b/test/CodeGen/AArch64/analyzecmp.ll
index 8962505..0b3bcd8 100644
--- a/test/CodeGen/AArch64/analyzecmp.ll
+++ b/test/CodeGen/AArch64/analyzecmp.ll
@@ -1,9 +1,9 @@
; RUN: llc -O3 -mcpu=cortex-a57 < %s | FileCheck %s
-; CHECK-LABLE: @test
-; CHECK: tst [[CMP:x[0-9]+]], #0x8000000000000000
-; CHECK: csel [[R0:x[0-9]+]], [[S0:x[0-9]+]], [[S1:x[0-9]+]], eq
-; CHECK: csel [[R1:x[0-9]+]], [[S2:x[0-9]+]], [[S3:x[0-9]+]], eq
+; CHECK-LABEL: @test
+; CHECK: and
+; CHECK: csel
+; CHECK: csel
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
target triple = "arm64--linux-gnueabi"
diff --git a/test/CodeGen/AArch64/argument-blocks.ll b/test/CodeGen/AArch64/argument-blocks.ll
new file mode 100644
index 0000000..f1dcfa6
--- /dev/null
+++ b/test/CodeGen/AArch64/argument-blocks.ll
@@ -0,0 +1,197 @@
+; RUN: llc -mtriple=aarch64-apple-ios7.0 -o - %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-DARWINPCS
+; RUN: llc -mtriple=aarch64-linux-gnu -o - %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AAPCS
+
+declare void @callee(...)
+
+define float @test_hfa_regs(float, [2 x float] %in) {
+; CHECK-LABEL: test_hfa_regs:
+; CHECK: fadd s0, s1, s2
+
+ %lhs = extractvalue [2 x float] %in, 0
+ %rhs = extractvalue [2 x float] %in, 1
+ %sum = fadd float %lhs, %rhs
+ ret float %sum
+}
+
+; Check that the array gets allocated to a contiguous block on the stack (rather
+; than the default of 2 8-byte slots).
+define float @test_hfa_block([7 x float], [2 x float] %in) {
+; CHECK-LABEL: test_hfa_block:
+; CHECK: ldp [[LHS:s[0-9]+]], [[RHS:s[0-9]+]], [sp]
+; CHECK: fadd s0, [[LHS]], [[RHS]]
+
+ %lhs = extractvalue [2 x float] %in, 0
+ %rhs = extractvalue [2 x float] %in, 1
+ %sum = fadd float %lhs, %rhs
+ ret float %sum
+}
+
+; Check that an HFA prevents backfilling of VFP registers (i.e. %rhs must go on
+; the stack rather than in s7).
+define float @test_hfa_block_consume([7 x float], [2 x float] %in, float %rhs) {
+; CHECK-LABEL: test_hfa_block_consume:
+; CHECK-DAG: ldr [[LHS:s[0-9]+]], [sp]
+; CHECK-DAG: ldr [[RHS:s[0-9]+]], [sp, #8]
+; CHECK: fadd s0, [[LHS]], [[RHS]]
+
+ %lhs = extractvalue [2 x float] %in, 0
+ %sum = fadd float %lhs, %rhs
+ ret float %sum
+}
+
+define float @test_hfa_stackalign([8 x float], [1 x float], [2 x float] %in) {
+; CHECK-LABEL: test_hfa_stackalign:
+; CHECK-AAPCS: ldp [[LHS:s[0-9]+]], [[RHS:s[0-9]+]], [sp, #8]
+; CHECK-DARWINPCS: ldp [[LHS:s[0-9]+]], [[RHS:s[0-9]+]], [sp, #4]
+; CHECK: fadd s0, [[LHS]], [[RHS]]
+ %lhs = extractvalue [2 x float] %in, 0
+ %rhs = extractvalue [2 x float] %in, 1
+ %sum = fadd float %lhs, %rhs
+ ret float %sum
+}
+
+; An HFA that ends up on the stack should not have any effect on where
+; integer-based arguments go.
+define i64 @test_hfa_ignores_gprs([7 x float], [2 x float] %in, i64, i64 %res) {
+; CHECK-LABEL: test_hfa_ignores_gprs:
+; CHECK: mov x0, x1
+ ret i64 %res
+}
+
+; [2 x float] should not be promoted to double by the Darwin varargs handling,
+; but should go in an 8-byte aligned slot.
+define void @test_varargs_stackalign() {
+; CHECK-LABEL: test_varargs_stackalign:
+; CHECK-DARWINPCS: stp {{w[0-9]+}}, {{w[0-9]+}}, [sp, #16]
+
+ call void(...)* @callee([3 x float] undef, [2 x float] [float 1.0, float 2.0])
+ ret void
+}
+
+define i64 @test_smallstruct_block([7 x i64], [2 x i64] %in) {
+; CHECK-LABEL: test_smallstruct_block:
+; CHECK: ldp [[LHS:x[0-9]+]], [[RHS:x[0-9]+]], [sp]
+; CHECK: add x0, [[LHS]], [[RHS]]
+ %lhs = extractvalue [2 x i64] %in, 0
+ %rhs = extractvalue [2 x i64] %in, 1
+ %sum = add i64 %lhs, %rhs
+ ret i64 %sum
+}
+
+; Check that a small struct prevents backfilling of registers (i.e. %rhs
+; must go on the stack rather than in x7).
+define i64 @test_smallstruct_block_consume([7 x i64], [2 x i64] %in, i64 %rhs) {
+; CHECK-LABEL: test_smallstruct_block_consume:
+; CHECK-DAG: ldr [[LHS:x[0-9]+]], [sp]
+; CHECK-DAG: ldr [[RHS:x[0-9]+]], [sp, #16]
+; CHECK: add x0, [[LHS]], [[RHS]]
+
+ %lhs = extractvalue [2 x i64] %in, 0
+ %sum = add i64 %lhs, %rhs
+ ret i64 %sum
+}
+
+define <1 x i64> @test_v1i64_blocked([7 x double], [2 x <1 x i64>] %in) {
+; CHECK-LABEL: test_v1i64_blocked:
+; CHECK: ldr d0, [sp]
+ %val = extractvalue [2 x <1 x i64>] %in, 0
+ ret <1 x i64> %val
+}
+
+define <1 x double> @test_v1f64_blocked([7 x double], [2 x <1 x double>] %in) {
+; CHECK-LABEL: test_v1f64_blocked:
+; CHECK: ldr d0, [sp]
+ %val = extractvalue [2 x <1 x double>] %in, 0
+ ret <1 x double> %val
+}
+
+define <2 x i32> @test_v2i32_blocked([7 x double], [2 x <2 x i32>] %in) {
+; CHECK-LABEL: test_v2i32_blocked:
+; CHECK: ldr d0, [sp]
+ %val = extractvalue [2 x <2 x i32>] %in, 0
+ ret <2 x i32> %val
+}
+
+define <2 x float> @test_v2f32_blocked([7 x double], [2 x <2 x float>] %in) {
+; CHECK-LABEL: test_v2f32_blocked:
+; CHECK: ldr d0, [sp]
+ %val = extractvalue [2 x <2 x float>] %in, 0
+ ret <2 x float> %val
+}
+
+define <4 x i16> @test_v4i16_blocked([7 x double], [2 x <4 x i16>] %in) {
+; CHECK-LABEL: test_v4i16_blocked:
+; CHECK: ldr d0, [sp]
+ %val = extractvalue [2 x <4 x i16>] %in, 0
+ ret <4 x i16> %val
+}
+
+define <4 x half> @test_v4f16_blocked([7 x double], [2 x <4 x half>] %in) {
+; CHECK-LABEL: test_v4f16_blocked:
+; CHECK: ldr d0, [sp]
+ %val = extractvalue [2 x <4 x half>] %in, 0
+ ret <4 x half> %val
+}
+
+define <8 x i8> @test_v8i8_blocked([7 x double], [2 x <8 x i8>] %in) {
+; CHECK-LABEL: test_v8i8_blocked:
+; CHECK: ldr d0, [sp]
+ %val = extractvalue [2 x <8 x i8>] %in, 0
+ ret <8 x i8> %val
+}
+
+define <2 x i64> @test_v2i64_blocked([7 x double], [2 x <2 x i64>] %in) {
+; CHECK-LABEL: test_v2i64_blocked:
+; CHECK: ldr q0, [sp]
+ %val = extractvalue [2 x <2 x i64>] %in, 0
+ ret <2 x i64> %val
+}
+
+define <2 x double> @test_v2f64_blocked([7 x double], [2 x <2 x double>] %in) {
+; CHECK-LABEL: test_v2f64_blocked:
+; CHECK: ldr q0, [sp]
+ %val = extractvalue [2 x <2 x double>] %in, 0
+ ret <2 x double> %val
+}
+
+define <4 x i32> @test_v4i32_blocked([7 x double], [2 x <4 x i32>] %in) {
+; CHECK-LABEL: test_v4i32_blocked:
+; CHECK: ldr q0, [sp]
+ %val = extractvalue [2 x <4 x i32>] %in, 0
+ ret <4 x i32> %val
+}
+
+define <4 x float> @test_v4f32_blocked([7 x double], [2 x <4 x float>] %in) {
+; CHECK-LABEL: test_v4f32_blocked:
+; CHECK: ldr q0, [sp]
+ %val = extractvalue [2 x <4 x float>] %in, 0
+ ret <4 x float> %val
+}
+
+define <8 x i16> @test_v8i16_blocked([7 x double], [2 x <8 x i16>] %in) {
+; CHECK-LABEL: test_v8i16_blocked:
+; CHECK: ldr q0, [sp]
+ %val = extractvalue [2 x <8 x i16>] %in, 0
+ ret <8 x i16> %val
+}
+
+define <8 x half> @test_v8f16_blocked([7 x double], [2 x <8 x half>] %in) {
+; CHECK-LABEL: test_v8f16_blocked:
+; CHECK: ldr q0, [sp]
+ %val = extractvalue [2 x <8 x half>] %in, 0
+ ret <8 x half> %val
+}
+
+define <16 x i8> @test_v16i8_blocked([7 x double], [2 x <16 x i8>] %in) {
+; CHECK-LABEL: test_v16i8_blocked:
+; CHECK: ldr q0, [sp]
+ %val = extractvalue [2 x <16 x i8>] %in, 0
+ ret <16 x i8> %val
+}
+
+define half @test_f16_blocked([7 x double], [2 x half] %in) {
+; CHECK-LABEL: test_f16_blocked:
+; CHECK: ldr h0, [sp]
+ %val = extractvalue [2 x half] %in, 0
+ ret half %val
+}
diff --git a/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll b/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll
index e57a8c9..8b88c0b 100644
--- a/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll
+++ b/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll
@@ -11,7 +11,7 @@ if.then24: ; preds = %entry
unreachable
if.else295: ; preds = %entry
- call void @llvm.dbg.declare(metadata !{i32* %do_tab_convert}, metadata !16, metadata !{metadata !"0x102"}), !dbg !18
+ call void @llvm.dbg.declare(metadata i32* %do_tab_convert, metadata !16, metadata !{!"0x102"}), !dbg !18
store i32 0, i32* %do_tab_convert, align 4, !dbg !19
unreachable
}
@@ -21,25 +21,25 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
!llvm.dbg.gv = !{!0}
!llvm.dbg.sp = !{!1, !7, !10, !11, !12}
-!0 = metadata !{metadata !"0x34\00vsplive\00vsplive\00\00617\001\001", metadata !1, metadata !2, metadata !6, null, null} ; [ DW_TAG_variable ]
-!1 = metadata !{metadata !"0x2e\00drt_vsprintf\00drt_vsprintf\00\00616\000\001\000\006\00256\000\000", metadata !20, metadata !2, metadata !4, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!2 = metadata !{metadata !"0x29", metadata !20} ; [ DW_TAG_file_type ]
-!3 = metadata !{metadata !"0x11\0012\00clang version 3.0 (http://llvm.org/git/clang.git git:/git/puzzlebox/clang.git/ c4d1aea01c4444eb81bdbf391f1be309127c3cf1)\001\00\000\00\000", metadata !20, metadata !21, metadata !21, null, null, null} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !20, metadata !2, null, metadata !5, i32 0} ; [ DW_TAG_subroutine_type ]
-!5 = metadata !{metadata !6}
-!6 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, metadata !3} ; [ DW_TAG_base_type ]
-!7 = metadata !{metadata !"0x2e\00putc_mem\00putc_mem\00\0030\001\001\000\006\00256\000\000", metadata !20, metadata !2, metadata !8, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!8 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !20, metadata !2, null, metadata !9, i32 0} ; [ DW_TAG_subroutine_type ]
-!9 = metadata !{null}
-!10 = metadata !{metadata !"0x2e\00print_double\00print_double\00\00203\001\001\000\006\00256\000\000", metadata !20, metadata !2, metadata !4, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!11 = metadata !{metadata !"0x2e\00print_number\00print_number\00\0075\001\001\000\006\00256\000\000", metadata !20, metadata !2, metadata !4, i32 0, null, null, null, null} ; [ DW_TAG_subprogram ]
-!12 = metadata !{metadata !"0x2e\00get_flags\00get_flags\00\00508\001\001\000\006\00256\000\000", metadata !20, metadata !2, metadata !8, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!13 = metadata !{i32 653, i32 5, metadata !14, null}
-!14 = metadata !{metadata !"0xb\00652\0035\002", metadata !20, metadata !15} ; [ DW_TAG_lexical_block ]
-!15 = metadata !{metadata !"0xb\00616\001\000", metadata !20, metadata !1} ; [ DW_TAG_lexical_block ]
-!16 = metadata !{metadata !"0x100\00do_tab_convert\00853\000", metadata !17, metadata !2, metadata !6} ; [ DW_TAG_auto_variable ]
-!17 = metadata !{metadata !"0xb\00850\0012\0033", metadata !20, metadata !14} ; [ DW_TAG_lexical_block ]
-!18 = metadata !{i32 853, i32 11, metadata !17, null}
-!19 = metadata !{i32 853, i32 29, metadata !17, null}
-!20 = metadata !{metadata !"print.i", metadata !"/Volumes/Ebi/echeng/radars/r9146594"}
-!21 = metadata !{i32 0}
+!0 = !{!"0x34\00vsplive\00vsplive\00\00617\001\001", !1, !2, !6, null, null} ; [ DW_TAG_variable ]
+!1 = !{!"0x2e\00drt_vsprintf\00drt_vsprintf\00\00616\000\001\000\006\00256\000\000", !20, !2, !4, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!2 = !{!"0x29", !20} ; [ DW_TAG_file_type ]
+!3 = !{!"0x11\0012\00clang version 3.0 (http://llvm.org/git/clang.git git:/git/puzzlebox/clang.git/ c4d1aea01c4444eb81bdbf391f1be309127c3cf1)\001\00\000\00\000", !20, !21, !21, null, null, null} ; [ DW_TAG_compile_unit ]
+!4 = !{!"0x15\00\000\000\000\000\000\000", !20, !2, null, !5, i32 0} ; [ DW_TAG_subroutine_type ]
+!5 = !{!6}
+!6 = !{!"0x24\00int\000\0032\0032\000\000\005", null, !3} ; [ DW_TAG_base_type ]
+!7 = !{!"0x2e\00putc_mem\00putc_mem\00\0030\001\001\000\006\00256\000\000", !20, !2, !8, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!8 = !{!"0x15\00\000\000\000\000\000\000", !20, !2, null, !9, i32 0} ; [ DW_TAG_subroutine_type ]
+!9 = !{null}
+!10 = !{!"0x2e\00print_double\00print_double\00\00203\001\001\000\006\00256\000\000", !20, !2, !4, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!11 = !{!"0x2e\00print_number\00print_number\00\0075\001\001\000\006\00256\000\000", !20, !2, !4, i32 0, null, null, null, null} ; [ DW_TAG_subprogram ]
+!12 = !{!"0x2e\00get_flags\00get_flags\00\00508\001\001\000\006\00256\000\000", !20, !2, !8, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!13 = !MDLocation(line: 653, column: 5, scope: !14)
+!14 = !{!"0xb\00652\0035\002", !20, !15} ; [ DW_TAG_lexical_block ]
+!15 = !{!"0xb\00616\001\000", !20, !1} ; [ DW_TAG_lexical_block ]
+!16 = !{!"0x100\00do_tab_convert\00853\000", !17, !2, !6} ; [ DW_TAG_auto_variable ]
+!17 = !{!"0xb\00850\0012\0033", !20, !14} ; [ DW_TAG_lexical_block ]
+!18 = !MDLocation(line: 853, column: 11, scope: !17)
+!19 = !MDLocation(line: 853, column: 29, scope: !17)
+!20 = !{!"print.i", !"/Volumes/Ebi/echeng/radars/r9146594"}
+!21 = !{i32 0}
diff --git a/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll b/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
index 4b037db..b5b1b70 100644
--- a/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
+++ b/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
@@ -43,8 +43,8 @@ entry:
!llvm.module.flags = !{!0, !1, !2, !3}
-!0 = metadata !{i32 1, metadata !"Objective-C Version", i32 2}
-!1 = metadata !{i32 1, metadata !"Objective-C Image Info Version", i32 0}
-!2 = metadata !{i32 1, metadata !"Objective-C Image Info Section", metadata !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
-!3 = metadata !{i32 4, metadata !"Objective-C Garbage Collection", i32 0}
-!4 = metadata !{}
+!0 = !{i32 1, !"Objective-C Version", i32 2}
+!1 = !{i32 1, !"Objective-C Image Info Version", i32 0}
+!2 = !{i32 1, !"Objective-C Image Info Section", !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
+!3 = !{i32 4, !"Objective-C Garbage Collection", i32 0}
+!4 = !{}
diff --git a/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll b/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
index 7d880f3..4db1f59 100644
--- a/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
+++ b/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
@@ -61,7 +61,7 @@ entry:
!llvm.module.flags = !{!0, !1, !2, !3}
-!0 = metadata !{i32 1, metadata !"Objective-C Version", i32 2}
-!1 = metadata !{i32 1, metadata !"Objective-C Image Info Version", i32 0}
-!2 = metadata !{i32 1, metadata !"Objective-C Image Info Section", metadata !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
-!3 = metadata !{i32 4, metadata !"Objective-C Garbage Collection", i32 0}
+!0 = !{i32 1, !"Objective-C Version", i32 2}
+!1 = !{i32 1, !"Objective-C Image Info Version", i32 0}
+!2 = !{i32 1, !"Objective-C Image Info Section", !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
+!3 = !{i32 4, !"Objective-C Garbage Collection", i32 0}
diff --git a/test/CodeGen/AArch64/arm64-aapcs-be.ll b/test/CodeGen/AArch64/arm64-aapcs-be.ll
index 77e2b0f..f27570a 100644
--- a/test/CodeGen/AArch64/arm64-aapcs-be.ll
+++ b/test/CodeGen/AArch64/arm64-aapcs-be.ll
@@ -21,4 +21,20 @@ entry:
; CHECK-DAG: strh w{{[0-9]}}, [sp, #14]
; CHECK-DAG: strb w{{[0-9]}}, [sp, #7]
ret i32 %call
-} \ No newline at end of file
+}
+
+define float @test_block_addr([8 x float], [1 x float] %in) {
+; CHECK-LABEL: test_block_addr:
+; CHECK: ldr s0, [sp]
+ %val = extractvalue [1 x float] %in, 0
+ ret float %val
+}
+
+define void @test_block_addr_callee() {
+; CHECK-LABEL: test_block_addr_callee:
+; CHECK: str {{[a-z0-9]+}}, [sp]
+; CHECK: bl test_block_addr
+ %val = insertvalue [1 x float] undef, float 0.0, 0
+ call float @test_block_addr([8 x float] undef, [1 x float] %val)
+ ret void
+}
diff --git a/test/CodeGen/AArch64/arm64-abi_align.ll b/test/CodeGen/AArch64/arm64-abi_align.ll
index deb740e..e03d7fa 100644
--- a/test/CodeGen/AArch64/arm64-abi_align.ll
+++ b/test/CodeGen/AArch64/arm64-abi_align.ll
@@ -527,8 +527,8 @@ attributes #3 = { nounwind "fp-contract-model"="standard" "relocation-model"="pi
attributes #4 = { nounwind }
attributes #5 = { nobuiltin }
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"short", metadata !1}
-!4 = metadata !{i64 0, i64 4, metadata !0, i64 4, i64 2, metadata !3, i64 8, i64 4, metadata !0, i64 12, i64 2, metadata !3, i64 16, i64 4, metadata !0, i64 20, i64 2, metadata !3}
+!0 = !{!"int", !1}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{!"short", !1}
+!4 = !{i64 0, i64 4, !0, i64 4, i64 2, !3, i64 8, i64 4, !0, i64 12, i64 2, !3, i64 16, i64 4, !0, i64 20, i64 2, !3}
diff --git a/test/CodeGen/AArch64/arm64-atomic-128.ll b/test/CodeGen/AArch64/arm64-atomic-128.ll
index 3377849..642d72a 100644
--- a/test/CodeGen/AArch64/arm64-atomic-128.ll
+++ b/test/CodeGen/AArch64/arm64-atomic-128.ll
@@ -29,8 +29,7 @@ define void @fetch_and_nand(i128* %p, i128 %bits) {
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: str [[DEST_REGHI]]
-; CHECK-DAG: str [[DEST_REGLO]]
+; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
%val = atomicrmw nand i128* %p, i128 %bits release
store i128 %val, i128* @var, align 16
ret void
@@ -45,8 +44,7 @@ define void @fetch_and_or(i128* %p, i128 %bits) {
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: str [[DEST_REGHI]]
-; CHECK-DAG: str [[DEST_REGLO]]
+; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
%val = atomicrmw or i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -61,8 +59,7 @@ define void @fetch_and_add(i128* %p, i128 %bits) {
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: str [[DEST_REGHI]]
-; CHECK-DAG: str [[DEST_REGLO]]
+; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
%val = atomicrmw add i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -77,8 +74,7 @@ define void @fetch_and_sub(i128* %p, i128 %bits) {
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: str [[DEST_REGHI]]
-; CHECK-DAG: str [[DEST_REGLO]]
+; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
%val = atomicrmw sub i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -99,8 +95,7 @@ define void @fetch_and_min(i128* %p, i128 %bits) {
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: str [[DEST_REGHI]]
-; CHECK-DAG: str [[DEST_REGLO]]
+; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
%val = atomicrmw min i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -121,8 +116,7 @@ define void @fetch_and_max(i128* %p, i128 %bits) {
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: str [[DEST_REGHI]]
-; CHECK-DAG: str [[DEST_REGLO]]
+; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
%val = atomicrmw max i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -143,8 +137,7 @@ define void @fetch_and_umin(i128* %p, i128 %bits) {
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: str [[DEST_REGHI]]
-; CHECK-DAG: str [[DEST_REGLO]]
+; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
%val = atomicrmw umin i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -165,8 +158,7 @@ define void @fetch_and_umax(i128* %p, i128 %bits) {
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: str [[DEST_REGHI]]
-; CHECK-DAG: str [[DEST_REGLO]]
+; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
%val = atomicrmw umax i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
diff --git a/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll b/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll
index 664a26c..b032d9c 100644
--- a/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll
+++ b/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll
@@ -184,7 +184,7 @@ declare hidden fastcc i32 @Maze1Mech(i64, i64, i64, i64, i64, i32, i32) nounwind
; Materializable
declare hidden fastcc void @CleanNet(i64) nounwind ssp
-!0 = metadata !{metadata !"long", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"any pointer", metadata !1}
+!0 = !{!"long", !1}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{!"any pointer", !1}
diff --git a/test/CodeGen/AArch64/arm64-cse.ll b/test/CodeGen/AArch64/arm64-cse.ll
index b74ece8..508df7c 100644
--- a/test/CodeGen/AArch64/arm64-cse.ll
+++ b/test/CodeGen/AArch64/arm64-cse.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O3 < %s -aarch64-atomic-cfg-tidy=0 -aarch64-gep-opt=false | FileCheck %s
+; RUN: llc -O3 < %s -aarch64-atomic-cfg-tidy=0 -aarch64-gep-opt=false -verify-machineinstrs | FileCheck %s
target triple = "arm64-apple-ios"
; rdar://12462006
diff --git a/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll b/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll
index 8a744c5..a9b8024 100644
--- a/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll
+++ b/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll
@@ -19,6 +19,6 @@ define internal fastcc void @callee(i32* nocapture %p, i32 %a) nounwind optsize
ret void
}
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!0 = !{!"int", !1}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll b/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll
index e51c38b..e41e19e 100644
--- a/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll
+++ b/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll
@@ -6,7 +6,7 @@
; rdar://11855286
define double @foo0(<2 x i64> %a) nounwind {
; CHECK: scvtf.2d [[REG:v[0-9]+]], v0, #9
-; CHECK-NEXT: ins.d v0[0], [[REG]][1]
+; CHECK-NEXT: mov d0, [[REG]][1]
%vecext = extractelement <2 x i64> %a, i32 1
%fcvt_n = tail call double @llvm.aarch64.neon.vcvtfxs2fp.f64.i64(i64 %vecext, i32 9)
ret double %fcvt_n
diff --git a/test/CodeGen/AArch64/arm64-fold-address.ll b/test/CodeGen/AArch64/arm64-fold-address.ll
index 96cc3e9..1f0b918 100644
--- a/test/CodeGen/AArch64/arm64-fold-address.ll
+++ b/test/CodeGen/AArch64/arm64-fold-address.ll
@@ -72,8 +72,8 @@ entry:
!llvm.module.flags = !{!0, !1, !2, !3}
-!0 = metadata !{i32 1, metadata !"Objective-C Version", i32 2}
-!1 = metadata !{i32 1, metadata !"Objective-C Image Info Version", i32 0}
-!2 = metadata !{i32 1, metadata !"Objective-C Image Info Section", metadata !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
-!3 = metadata !{i32 4, metadata !"Objective-C Garbage Collection", i32 0}
-!4 = metadata !{}
+!0 = !{i32 1, !"Objective-C Version", i32 2}
+!1 = !{i32 1, !"Objective-C Image Info Version", i32 0}
+!2 = !{i32 1, !"Objective-C Image Info Section", !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
+!3 = !{i32 4, !"Objective-C Garbage Collection", i32 0}
+!4 = !{}
diff --git a/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll b/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
index c118f10..917911a 100644
--- a/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
+++ b/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
@@ -34,7 +34,7 @@ declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) #1
attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
-!1 = metadata !{metadata !2, metadata !2, i64 0}
-!2 = metadata !{metadata !"double", metadata !3, i64 0}
-!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
-!4 = metadata !{metadata !"Simple C/C++ TBAA"}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"double", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/AArch64/arm64-ldp.ll b/test/CodeGen/AArch64/arm64-ldp.ll
index 5a98626..a9fa4ca 100644
--- a/test/CodeGen/AArch64/arm64-ldp.ll
+++ b/test/CodeGen/AArch64/arm64-ldp.ll
@@ -12,6 +12,18 @@ define i32 @ldp_int(i32* %p) nounwind {
ret i32 %add
}
+; CHECK: ldp_sext_int
+; CHECK: ldpsw
+define i64 @ldp_sext_int(i32* %p) nounwind {
+ %tmp = load i32* %p, align 4
+ %add.ptr = getelementptr inbounds i32* %p, i64 1
+ %tmp1 = load i32* %add.ptr, align 4
+ %sexttmp = sext i32 %tmp to i64
+ %sexttmp1 = sext i32 %tmp1 to i64
+ %add = add nsw i64 %sexttmp1, %sexttmp
+ ret i64 %add
+}
+
; CHECK: ldp_long
; CHECK: ldp
define i64 @ldp_long(i64* %p) nounwind {
@@ -56,6 +68,21 @@ define i32 @ldur_int(i32* %a) nounwind {
ret i32 %tmp3
}
+define i64 @ldur_sext_int(i32* %a) nounwind {
+; LDUR_CHK: ldur_sext_int
+; LDUR_CHK: ldpsw [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-8]
+; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
+; LDUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds i32* %a, i32 -1
+ %tmp1 = load i32* %p1, align 2
+ %p2 = getelementptr inbounds i32* %a, i32 -2
+ %tmp2 = load i32* %p2, align 2
+ %sexttmp1 = sext i32 %tmp1 to i64
+ %sexttmp2 = sext i32 %tmp2 to i64
+ %tmp3 = add i64 %sexttmp1, %sexttmp2
+ ret i64 %tmp3
+}
+
define i64 @ldur_long(i64* %a) nounwind ssp {
; LDUR_CHK: ldur_long
; LDUR_CHK: ldp [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-16]
@@ -110,6 +137,22 @@ define i64 @pairUpBarelyIn(i64* %a) nounwind ssp {
ret i64 %tmp3
}
+define i64 @pairUpBarelyInSext(i32* %a) nounwind ssp {
+; LDUR_CHK: pairUpBarelyInSext
+; LDUR_CHK-NOT: ldur
+; LDUR_CHK: ldpsw [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-256]
+; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
+; LDUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds i32* %a, i64 -63
+ %tmp1 = load i32* %p1, align 2
+ %p2 = getelementptr inbounds i32* %a, i64 -64
+ %tmp2 = load i32* %p2, align 2
+ %sexttmp1 = sext i32 %tmp1 to i64
+ %sexttmp2 = sext i32 %tmp2 to i64
+ %tmp3 = add i64 %sexttmp1, %sexttmp2
+ ret i64 %tmp3
+}
+
define i64 @pairUpBarelyOut(i64* %a) nounwind ssp {
; LDUR_CHK: pairUpBarelyOut
; LDUR_CHK-NOT: ldp
@@ -125,6 +168,23 @@ define i64 @pairUpBarelyOut(i64* %a) nounwind ssp {
ret i64 %tmp3
}
+define i64 @pairUpBarelyOutSext(i32* %a) nounwind ssp {
+; LDUR_CHK: pairUpBarelyOutSext
+; LDUR_CHK-NOT: ldp
+; Don't be fragile about which loads or manipulations of the base register
+; are used---just check that there isn't an ldp before the add
+; LDUR_CHK: add
+; LDUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds i32* %a, i64 -64
+ %tmp1 = load i32* %p1, align 2
+ %p2 = getelementptr inbounds i32* %a, i64 -65
+ %tmp2 = load i32* %p2, align 2
+ %sexttmp1 = sext i32 %tmp1 to i64
+ %sexttmp2 = sext i32 %tmp2 to i64
+ %tmp3 = add i64 %sexttmp1, %sexttmp2
+ ret i64 %tmp3
+}
+
define i64 @pairUpNotAligned(i64* %a) nounwind ssp {
; LDUR_CHK: pairUpNotAligned
; LDUR_CHK-NOT: ldp
@@ -147,3 +207,28 @@ define i64 @pairUpNotAligned(i64* %a) nounwind ssp {
%tmp3 = add i64 %tmp1, %tmp2
ret i64 %tmp3
}
+
+define i64 @pairUpNotAlignedSext(i32* %a) nounwind ssp {
+; LDUR_CHK: pairUpNotAlignedSext
+; LDUR_CHK-NOT: ldp
+; LDUR_CHK: ldursw
+; LDUR_CHK-NEXT: ldursw
+; LDUR_CHK-NEXT: add
+; LDUR_CHK-NEXT: ret
+ %p1 = getelementptr inbounds i32* %a, i64 -18
+ %bp1 = bitcast i32* %p1 to i8*
+ %bp1p1 = getelementptr inbounds i8* %bp1, i64 1
+ %dp1 = bitcast i8* %bp1p1 to i32*
+ %tmp1 = load i32* %dp1, align 1
+
+ %p2 = getelementptr inbounds i32* %a, i64 -17
+ %bp2 = bitcast i32* %p2 to i8*
+ %bp2p1 = getelementptr inbounds i8* %bp2, i64 1
+ %dp2 = bitcast i8* %bp2p1 to i32*
+ %tmp2 = load i32* %dp2, align 1
+
+ %sexttmp1 = sext i32 %tmp1 to i64
+ %sexttmp2 = sext i32 %tmp2 to i64
+ %tmp3 = add i64 %sexttmp1, %sexttmp2
+ ret i64 %tmp3
+}
diff --git a/test/CodeGen/AArch64/arm64-named-reg-alloc.ll b/test/CodeGen/AArch64/arm64-named-reg-alloc.ll
index d86d2e6..0c56454 100644
--- a/test/CodeGen/AArch64/arm64-named-reg-alloc.ll
+++ b/test/CodeGen/AArch64/arm64-named-reg-alloc.ll
@@ -11,4 +11,4 @@ entry:
declare i32 @llvm.read_register.i32(metadata) nounwind
-!0 = metadata !{metadata !"x5\00"}
+!0 = !{!"x5\00"}
diff --git a/test/CodeGen/AArch64/arm64-named-reg-notareg.ll b/test/CodeGen/AArch64/arm64-named-reg-notareg.ll
index 3ca14c4..759bc15 100644
--- a/test/CodeGen/AArch64/arm64-named-reg-notareg.ll
+++ b/test/CodeGen/AArch64/arm64-named-reg-notareg.ll
@@ -10,4 +10,4 @@ entry:
declare i32 @llvm.read_register.i32(metadata) nounwind
-!0 = metadata !{metadata !"notareg\00"}
+!0 = !{!"notareg\00"}
diff --git a/test/CodeGen/AArch64/arm64-neon-copy.ll b/test/CodeGen/AArch64/arm64-neon-copy.ll
index 1cfba82..4a92c3d 100644
--- a/test/CodeGen/AArch64/arm64-neon-copy.ll
+++ b/test/CodeGen/AArch64/arm64-neon-copy.ll
@@ -188,7 +188,7 @@ define <2 x float> @ins4f2(<4 x float> %tmp1, <2 x float> %tmp2) {
define <1 x double> @ins2f1(<2 x double> %tmp1, <1 x double> %tmp2) {
; CHECK-LABEL: ins2f1:
-; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+; CHECK: mov {{d[0-9]+}}, {{v[0-9]+}}.d[1]
%tmp3 = extractelement <2 x double> %tmp1, i32 1
%tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0
ret <1 x double> %tmp4
diff --git a/test/CodeGen/AArch64/arm64-neon-select_cc.ll b/test/CodeGen/AArch64/arm64-neon-select_cc.ll
index 95c582a..d334c08 100644
--- a/test/CodeGen/AArch64/arm64-neon-select_cc.ll
+++ b/test/CodeGen/AArch64/arm64-neon-select_cc.ll
@@ -204,3 +204,18 @@ define <2 x double> @test_select_cc_v2f64(double %a, double %b, <2 x double> %c,
%e = select i1 %cmp31, <2 x double> %c, <2 x double> %d
ret <2 x double> %e
}
+
+; Special case: when the select condition is an icmp with i1 operands, don't
+; do the comparison on vectors.
+; Part of PR21549.
+define <2 x i32> @test_select_cc_v2i32_icmpi1(i1 %cc, <2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: test_select_cc_v2i32_icmpi1:
+; CHECK: tst w0, #0x1
+; CHECK: csetm [[MASK:w[0-9]+]], ne
+; CHECK: dup [[DUPMASK:v[0-9]+]].2s, [[MASK]]
+; CHECK: bsl [[DUPMASK]].8b, v0.8b, v1.8b
+; CHECK: mov v0.16b, [[DUPMASK]].16b
+ %cmp = icmp ne i1 %cc, 0
+ %e = select i1 %cmp, <2 x i32> %a, <2 x i32> %b
+ ret <2 x i32> %e
+}
diff --git a/test/CodeGen/AArch64/arm64-platform-reg.ll b/test/CodeGen/AArch64/arm64-platform-reg.ll
index 651c793..b0d3ee0 100644
--- a/test/CodeGen/AArch64/arm64-platform-reg.ll
+++ b/test/CodeGen/AArch64/arm64-platform-reg.ll
@@ -1,4 +1,5 @@
-; RUN: llc -mtriple=arm64-apple-ios -o - %s | FileCheck %s --check-prefix=CHECK-DARWIN
+; RUN: llc -mtriple=arm64-apple-ios -o - %s | FileCheck %s --check-prefix=CHECK-RESERVE-X18
+; RUN: llc -mtriple=arm64-freebsd-gnu -aarch64-reserve-x18 -o - %s | FileCheck %s --check-prefix=CHECK-RESERVE-X18
; RUN: llc -mtriple=arm64-linux-gnu -o - %s | FileCheck %s
; x18 is reserved as a platform register on Darwin but not on other
@@ -16,11 +17,11 @@ define void @keep_live() {
; CHECK: ldr x18
; CHECK: str x18
-; CHECK-DARWIN-NOT: ldr fp
-; CHECK-DARWIN-NOT: ldr x18
-; CHECK-DARWIN: Spill
-; CHECK-DARWIN-NOT: ldr fp
-; CHECK-DARWIN-NOT: ldr x18
-; CHECK-DARWIN: ret
+; CHECK-RESERVE-X18-NOT: ldr fp
+; CHECK-RESERVE-X18-NOT: ldr x18
+; CHECK-RESERVE-X18: Spill
+; CHECK-RESERVE-X18-NOT: ldr fp
+; CHECK-RESERVE-X18-NOT: ldr x18
+; CHECK-RESERVE-X18: ret
ret void
}
diff --git a/test/CodeGen/AArch64/arm64-popcnt.ll b/test/CodeGen/AArch64/arm64-popcnt.ll
index 117ab3a..b0b529a 100644
--- a/test/CodeGen/AArch64/arm64-popcnt.ll
+++ b/test/CodeGen/AArch64/arm64-popcnt.ll
@@ -4,7 +4,8 @@
define i32 @cnt32_advsimd(i32 %x) nounwind readnone {
%cnt = tail call i32 @llvm.ctpop.i32(i32 %x)
ret i32 %cnt
-; CHECK: fmov s0, w0
+; CHECK: ubfx x{{[0-9]+}}
+; CHECK: fmov d0, x{{[0-9]+}}
; CHECK: cnt.8b v0, v0
; CHECK: uaddlv.8b h0, v0
; CHECK: fmov w0, s0
@@ -15,7 +16,24 @@ define i32 @cnt32_advsimd(i32 %x) nounwind readnone {
; CHECK-NONEON: and w{{[0-9]+}}, w{{[0-9]+}}, #0x33333333
; CHECK-NONEON: and w{{[0-9]+}}, w{{[0-9]+}}, #0xf0f0f0f
; CHECK-NONEON: mul
+}
+define i32 @cnt32_advsimd_2(<2 x i32> %x) {
+ %1 = extractelement <2 x i32> %x, i64 0
+ %2 = tail call i32 @llvm.ctpop.i32(i32 %1)
+ ret i32 %2
+; CHECK: fmov w0, s0
+; CHECK: fmov d0, x0
+; CHECK: cnt.8b v0, v0
+; CHECK: uaddlv.8b h0, v0
+; CHECK: fmov w0, s0
+; CHECK: ret
+; CHECK-NONEON-LABEL: cnt32_advsimd_2
+; CHECK-NONEON-NOT: 8b
+; CHECK-NONEON: and w{{[0-9]+}}, w{{[0-9]+}}, #0x55555555
+; CHECK-NONEON: and w{{[0-9]+}}, w{{[0-9]+}}, #0x33333333
+; CHECK-NONEON: and w{{[0-9]+}}, w{{[0-9]+}}, #0xf0f0f0f
+; CHECK-NONEON: mul
}
define i64 @cnt64_advsimd(i64 %x) nounwind readnone {
diff --git a/test/CodeGen/AArch64/arm64-prefetch.ll b/test/CodeGen/AArch64/arm64-prefetch.ll
index 9dc6301..aac3515 100644
--- a/test/CodeGen/AArch64/arm64-prefetch.ll
+++ b/test/CodeGen/AArch64/arm64-prefetch.ll
@@ -117,7 +117,7 @@ entry:
declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) nounwind
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"any pointer", metadata !1}
+!0 = !{!"int", !1}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{!"any pointer", !1}
diff --git a/test/CodeGen/AArch64/arm64-promote-const.ll b/test/CodeGen/AArch64/arm64-promote-const.ll
index 380ff55..5dd92a7 100644
--- a/test/CodeGen/AArch64/arm64-promote-const.ll
+++ b/test/CodeGen/AArch64/arm64-promote-const.ll
@@ -41,8 +41,7 @@ entry:
; PROMOTED-LABEL: test2:
; In stress mode, constant vector are promoted
; PROMOTED: adrp [[PAGEADDR:x[0-9]+]], [[CSTV1:__PromotedConst[0-9]+]]@PAGE
-; PROMOTED: add [[BASEADDR:x[0-9]+]], [[PAGEADDR]], [[CSTV1]]@PAGEOFF
-; PROMOTED: ldr q[[REGNUM:[0-9]+]], {{\[}}[[BASEADDR]]]
+; PROMOTED: ldr q[[REGNUM:[0-9]+]], {{\[}}[[PAGEADDR]], [[CSTV1]]@PAGEOFF]
; Destination register is defined by ABI
; PROMOTED-NEXT: add.16b v0, v0, v[[REGNUM]]
; PROMOTED-NEXT: mla.16b v0, v0, v[[REGNUM]]
@@ -64,51 +63,23 @@ entry:
ret <16 x i8> %add.i9
}
-; Two different uses of the sane constant in two different basic blocks,
+; Two different uses of the same constant in two different basic blocks,
; one dominates the other
define <16 x i8> @test3(<16 x i8> %arg, i32 %path) {
; PROMOTED-LABEL: test3:
; In stress mode, constant vector are promoted
; Since, the constant is the same as the previous function,
; the same address must be used
-; PROMOTED: adrp [[PAGEADDR:x[0-9]+]], [[CSTV1]]@PAGE
-; PROMOTED: add [[BASEADDR:x[0-9]+]], [[PAGEADDR]], [[CSTV1]]@PAGEOFF
-; PROMOTED-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[BASEADDR]]]
-; Destination register is defined by ABI
-; PROMOTED-NEXT: add.16b v0, v0, v[[REGNUM]]
-; PROMOTED-NEXT: cbnz w0, [[LABEL:LBB.*]]
-; Next BB
-; PROMOTED: adrp [[PAGEADDR:x[0-9]+]], [[CSTV2:__PromotedConst[0-9]+]]@PAGE
-; PROMOTED: add [[BASEADDR:x[0-9]+]], [[PAGEADDR]], [[CSTV2]]@PAGEOFF
-; PROMOTED-NEXT: ldr q[[REGNUM]], {{\[}}[[BASEADDR]]]
-; Next BB
-; PROMOTED-NEXT: [[LABEL]]:
-; PROMOTED-NEXT: mul.16b [[DESTV:v[0-9]+]], v0, v[[REGNUM]]
-; PROMOTED-NEXT: add.16b v0, v0, [[DESTV]]
-; PROMOTED-NEXT: ret
+; PROMOTED: ldr
+; PROMOTED: ldr
+; PROMOTED-NOT: ldr
+; PROMOTED: ret
; REGULAR-LABEL: test3:
-; Regular mode does not elimitate common sub expression by its own.
-; In other words, the same loads appears several times.
-; REGULAR: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL1:lCP.*]]@PAGE
-; REGULAR-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[PAGEADDR]], [[CSTLABEL1]]@PAGEOFF]
-; Destination register is defined by ABI
-; REGULAR-NEXT: add.16b v0, v0, v[[REGNUM]]
-; REGULAR-NEXT: cbz w0, [[LABELelse:LBB.*]]
-; Next BB
-; Redundant load
-; REGULAR: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL1]]@PAGE
-; REGULAR-NEXT: ldr q[[REGNUM]], {{\[}}[[PAGEADDR]], [[CSTLABEL1]]@PAGEOFF]
-; REGULAR-NEXT: b [[LABELend:LBB.*]]
-; Next BB
-; REGULAR-NEXT: [[LABELelse]]
-; REGULAR-NEXT: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL2:lCP.*]]@PAGE
-; REGULAR-NEXT: ldr q[[REGNUM]], {{\[}}[[PAGEADDR]], [[CSTLABEL2]]@PAGEOFF]
-; Next BB
-; REGULAR-NEXT: [[LABELend]]:
-; REGULAR-NEXT: mul.16b [[DESTV:v[0-9]+]], v0, v[[REGNUM]]
-; REGULAR-NEXT: add.16b v0, v0, [[DESTV]]
-; REGULAR-NEXT: ret
+; REGULAR: ldr
+; REGULAR: ldr
+; REGULAR-NOT: ldr
+; REGULAR: ret
entry:
%add.i = add <16 x i8> %arg, <i8 -40, i8 -93, i8 -118, i8 -99, i8 -75, i8 -105, i8 74, i8 -110, i8 62, i8 -115, i8 -119, i8 -120, i8 34, i8 -124, i8 0, i8 -128>
%tobool = icmp eq i32 %path, 0
@@ -135,34 +106,14 @@ define <16 x i8> @test4(<16 x i8> %arg, i32 %path) {
; In stress mode, constant vector are promoted
; Since, the constant is the same as the previous function,
; the same address must be used
-; PROMOTED: adrp [[PAGEADDR:x[0-9]+]], [[CSTV1]]@PAGE
-; PROMOTED: add [[BASEADDR:x[0-9]+]], [[PAGEADDR]], [[CSTV1]]@PAGEOFF
-; PROMOTED-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[BASEADDR]]]
-; Destination register is defined by ABI
-; PROMOTED-NEXT: add.16b v0, v0, v[[REGNUM]]
-; PROMOTED-NEXT: cbz w0, [[LABEL:LBB.*]]
-; Next BB
-; PROMOTED: mul.16b v0, v0, v[[REGNUM]]
-; Next BB
-; PROMOTED-NEXT: [[LABEL]]:
-; PROMOTED-NEXT: ret
-
+; PROMOTED: ldr
+; PROMOTED-NOT: ldr
+; PROMOTED: ret
; REGULAR-LABEL: test4:
-; REGULAR: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL3:lCP.*]]@PAGE
-; REGULAR-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[PAGEADDR]], [[CSTLABEL3]]@PAGEOFF]
-; Destination register is defined by ABI
-; REGULAR-NEXT: add.16b v0, v0, v[[REGNUM]]
-; REGULAR-NEXT: cbz w0, [[LABEL:LBB.*]]
-; Next BB
-; Redundant expression
-; REGULAR: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL3]]@PAGE
-; REGULAR-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[PAGEADDR]], [[CSTLABEL3]]@PAGEOFF]
-; Destination register is defined by ABI
-; REGULAR-NEXT: mul.16b v0, v0, v[[REGNUM]]
-; Next BB
-; REGULAR-NEXT: [[LABEL]]:
-; REGULAR-NEXT: ret
+; REGULAR: ldr
+; REGULAR-NOT: ldr
+; REGULAR: ret
entry:
%add.i = add <16 x i8> %arg, <i8 -40, i8 -93, i8 -118, i8 -99, i8 -75, i8 -105, i8 74, i8 -110, i8 62, i8 -115, i8 -119, i8 -120, i8 34, i8 -124, i8 0, i8 -128>
%tobool = icmp eq i32 %path, 0
@@ -184,40 +135,13 @@ define <16 x i8> @test5(<16 x i8> %arg, i32 %path) {
; In stress mode, constant vector are promoted
; Since, the constant is the same as the previous function,
; the same address must be used
-; PROMOTED: adrp [[PAGEADDR:x[0-9]+]], [[CSTV1]]@PAGE
-; PROMOTED: add [[BASEADDR:x[0-9]+]], [[PAGEADDR]], [[CSTV1]]@PAGEOFF
-; PROMOTED-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[BASEADDR]]]
-; PROMOTED-NEXT: cbz w0, [[LABEL:LBB.*]]
-; Next BB
-; PROMOTED: add.16b [[DESTV:v[0-9]+]], v0, v[[REGNUM]]
-; PROMOTED-NEXT: mul.16b v[[REGNUM]], [[DESTV]], v[[REGNUM]]
-; Next BB
-; PROMOTED-NEXT: [[LABEL]]:
-; PROMOTED-NEXT: mul.16b [[TMP1:v[0-9]+]], v[[REGNUM]], v[[REGNUM]]
-; PROMOTED-NEXT: mul.16b [[TMP2:v[0-9]+]], [[TMP1]], [[TMP1]]
-; PROMOTED-NEXT: mul.16b [[TMP3:v[0-9]+]], [[TMP2]], [[TMP2]]
-; PROMOTED-NEXT: mul.16b v0, [[TMP3]], [[TMP3]]
-; PROMOTED-NEXT: ret
+; PROMOTED: ldr
+; PROMOTED-NOT: ldr
+; PROMOTED: ret
; REGULAR-LABEL: test5:
-; REGULAR: cbz w0, [[LABELelse:LBB.*]]
-; Next BB
-; REGULAR: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL:lCP.*]]@PAGE
-; REGULAR-NEXT: ldr q[[REGNUM:[0-9]+]], {{\[}}[[PAGEADDR]], [[CSTLABEL]]@PAGEOFF]
-; REGULAR-NEXT: add.16b [[DESTV:v[0-9]+]], v0, v[[REGNUM]]
-; REGULAR-NEXT: mul.16b v[[DESTREGNUM:[0-9]+]], [[DESTV]], v[[REGNUM]]
-; REGULAR-NEXT: b [[LABELend:LBB.*]]
-; Next BB
-; REGULAR-NEXT: [[LABELelse]]
-; REGULAR-NEXT: adrp [[PAGEADDR:x[0-9]+]], [[CSTLABEL:lCP.*]]@PAGE
-; REGULAR-NEXT: ldr q[[DESTREGNUM]], {{\[}}[[PAGEADDR]], [[CSTLABEL]]@PAGEOFF]
-; Next BB
-; REGULAR-NEXT: [[LABELend]]:
-; REGULAR-NEXT: mul.16b [[TMP1:v[0-9]+]], v[[DESTREGNUM]], v[[DESTREGNUM]]
-; REGULAR-NEXT: mul.16b [[TMP2:v[0-9]+]], [[TMP1]], [[TMP1]]
-; REGULAR-NEXT: mul.16b [[TMP3:v[0-9]+]], [[TMP2]], [[TMP2]]
-; REGULAR-NEXT: mul.16b v0, [[TMP3]], [[TMP3]]
-; REGULAR-NEXT: ret
+; REGULAR: ldr
+; REGULAR: ret
entry:
%tobool = icmp eq i32 %path, 0
br i1 %tobool, label %if.end, label %if.then
diff --git a/test/CodeGen/AArch64/arm64-st1.ll b/test/CodeGen/AArch64/arm64-st1.ll
index 4370484..76d52f4 100644
--- a/test/CodeGen/AArch64/arm64-st1.ll
+++ b/test/CodeGen/AArch64/arm64-st1.ll
@@ -8,6 +8,26 @@ define void @st1lane_16b(<16 x i8> %A, i8* %D) {
ret void
}
+define void @st1lane_ro_16b(<16 x i8> %A, i8* %D, i64 %offset) {
+; CHECK-LABEL: st1lane_ro_16b
+; CHECK: add x[[XREG:[0-9]+]], x0, x1
+; CHECK: st1.b { v0 }[1], [x[[XREG]]]
+ %ptr = getelementptr i8* %D, i64 %offset
+ %tmp = extractelement <16 x i8> %A, i32 1
+ store i8 %tmp, i8* %ptr
+ ret void
+}
+
+define void @st1lane0_ro_16b(<16 x i8> %A, i8* %D, i64 %offset) {
+; CHECK-LABEL: st1lane0_ro_16b
+; CHECK: add x[[XREG:[0-9]+]], x0, x1
+; CHECK: st1.b { v0 }[0], [x[[XREG]]]
+ %ptr = getelementptr i8* %D, i64 %offset
+ %tmp = extractelement <16 x i8> %A, i32 0
+ store i8 %tmp, i8* %ptr
+ ret void
+}
+
define void @st1lane_8h(<8 x i16> %A, i16* %D) {
; CHECK-LABEL: st1lane_8h
; CHECK: st1.h
@@ -16,6 +36,25 @@ define void @st1lane_8h(<8 x i16> %A, i16* %D) {
ret void
}
+define void @st1lane_ro_8h(<8 x i16> %A, i16* %D, i64 %offset) {
+; CHECK-LABEL: st1lane_ro_8h
+; CHECK: add x[[XREG:[0-9]+]], x0, x1
+; CHECK: st1.h { v0 }[1], [x[[XREG]]]
+ %ptr = getelementptr i16* %D, i64 %offset
+ %tmp = extractelement <8 x i16> %A, i32 1
+ store i16 %tmp, i16* %ptr
+ ret void
+}
+
+define void @st1lane0_ro_8h(<8 x i16> %A, i16* %D, i64 %offset) {
+; CHECK-LABEL: st1lane0_ro_8h
+; CHECK: str h0, [x0, x1, lsl #1]
+ %ptr = getelementptr i16* %D, i64 %offset
+ %tmp = extractelement <8 x i16> %A, i32 0
+ store i16 %tmp, i16* %ptr
+ ret void
+}
+
define void @st1lane_4s(<4 x i32> %A, i32* %D) {
; CHECK-LABEL: st1lane_4s
; CHECK: st1.s
@@ -24,6 +63,25 @@ define void @st1lane_4s(<4 x i32> %A, i32* %D) {
ret void
}
+define void @st1lane_ro_4s(<4 x i32> %A, i32* %D, i64 %offset) {
+; CHECK-LABEL: st1lane_ro_4s
+; CHECK: add x[[XREG:[0-9]+]], x0, x1
+; CHECK: st1.s { v0 }[1], [x[[XREG]]]
+ %ptr = getelementptr i32* %D, i64 %offset
+ %tmp = extractelement <4 x i32> %A, i32 1
+ store i32 %tmp, i32* %ptr
+ ret void
+}
+
+define void @st1lane0_ro_4s(<4 x i32> %A, i32* %D, i64 %offset) {
+; CHECK-LABEL: st1lane0_ro_4s
+; CHECK: str s0, [x0, x1, lsl #2]
+ %ptr = getelementptr i32* %D, i64 %offset
+ %tmp = extractelement <4 x i32> %A, i32 0
+ store i32 %tmp, i32* %ptr
+ ret void
+}
+
define void @st1lane_4s_float(<4 x float> %A, float* %D) {
; CHECK-LABEL: st1lane_4s_float
; CHECK: st1.s
@@ -32,6 +90,25 @@ define void @st1lane_4s_float(<4 x float> %A, float* %D) {
ret void
}
+define void @st1lane_ro_4s_float(<4 x float> %A, float* %D, i64 %offset) {
+; CHECK-LABEL: st1lane_ro_4s_float
+; CHECK: add x[[XREG:[0-9]+]], x0, x1
+; CHECK: st1.s { v0 }[1], [x[[XREG]]]
+ %ptr = getelementptr float* %D, i64 %offset
+ %tmp = extractelement <4 x float> %A, i32 1
+ store float %tmp, float* %ptr
+ ret void
+}
+
+define void @st1lane0_ro_4s_float(<4 x float> %A, float* %D, i64 %offset) {
+; CHECK-LABEL: st1lane0_ro_4s_float
+; CHECK: str s0, [x0, x1, lsl #2]
+ %ptr = getelementptr float* %D, i64 %offset
+ %tmp = extractelement <4 x float> %A, i32 0
+ store float %tmp, float* %ptr
+ ret void
+}
+
define void @st1lane_2d(<2 x i64> %A, i64* %D) {
; CHECK-LABEL: st1lane_2d
; CHECK: st1.d
@@ -40,6 +117,25 @@ define void @st1lane_2d(<2 x i64> %A, i64* %D) {
ret void
}
+define void @st1lane_ro_2d(<2 x i64> %A, i64* %D, i64 %offset) {
+; CHECK-LABEL: st1lane_ro_2d
+; CHECK: add x[[XREG:[0-9]+]], x0, x1
+; CHECK: st1.d { v0 }[1], [x[[XREG]]]
+ %ptr = getelementptr i64* %D, i64 %offset
+ %tmp = extractelement <2 x i64> %A, i32 1
+ store i64 %tmp, i64* %ptr
+ ret void
+}
+
+define void @st1lane0_ro_2d(<2 x i64> %A, i64* %D, i64 %offset) {
+; CHECK-LABEL: st1lane0_ro_2d
+; CHECK: str d0, [x0, x1, lsl #3]
+ %ptr = getelementptr i64* %D, i64 %offset
+ %tmp = extractelement <2 x i64> %A, i32 0
+ store i64 %tmp, i64* %ptr
+ ret void
+}
+
define void @st1lane_2d_double(<2 x double> %A, double* %D) {
; CHECK-LABEL: st1lane_2d_double
; CHECK: st1.d
@@ -48,6 +144,25 @@ define void @st1lane_2d_double(<2 x double> %A, double* %D) {
ret void
}
+define void @st1lane_ro_2d_double(<2 x double> %A, double* %D, i64 %offset) {
+; CHECK-LABEL: st1lane_ro_2d_double
+; CHECK: add x[[XREG:[0-9]+]], x0, x1
+; CHECK: st1.d { v0 }[1], [x[[XREG]]]
+ %ptr = getelementptr double* %D, i64 %offset
+ %tmp = extractelement <2 x double> %A, i32 1
+ store double %tmp, double* %ptr
+ ret void
+}
+
+define void @st1lane0_ro_2d_double(<2 x double> %A, double* %D, i64 %offset) {
+; CHECK-LABEL: st1lane0_ro_2d_double
+; CHECK: str d0, [x0, x1, lsl #3]
+ %ptr = getelementptr double* %D, i64 %offset
+ %tmp = extractelement <2 x double> %A, i32 0
+ store double %tmp, double* %ptr
+ ret void
+}
+
define void @st1lane_8b(<8 x i8> %A, i8* %D) {
; CHECK-LABEL: st1lane_8b
; CHECK: st1.b
@@ -56,6 +171,26 @@ define void @st1lane_8b(<8 x i8> %A, i8* %D) {
ret void
}
+define void @st1lane_ro_8b(<8 x i8> %A, i8* %D, i64 %offset) {
+; CHECK-LABEL: st1lane_ro_8b
+; CHECK: add x[[XREG:[0-9]+]], x0, x1
+; CHECK: st1.b { v0 }[1], [x[[XREG]]]
+ %ptr = getelementptr i8* %D, i64 %offset
+ %tmp = extractelement <8 x i8> %A, i32 1
+ store i8 %tmp, i8* %ptr
+ ret void
+}
+
+define void @st1lane0_ro_8b(<8 x i8> %A, i8* %D, i64 %offset) {
+; CHECK-LABEL: st1lane0_ro_8b
+; CHECK: add x[[XREG:[0-9]+]], x0, x1
+; CHECK: st1.b { v0 }[0], [x[[XREG]]]
+ %ptr = getelementptr i8* %D, i64 %offset
+ %tmp = extractelement <8 x i8> %A, i32 0
+ store i8 %tmp, i8* %ptr
+ ret void
+}
+
define void @st1lane_4h(<4 x i16> %A, i16* %D) {
; CHECK-LABEL: st1lane_4h
; CHECK: st1.h
@@ -64,6 +199,25 @@ define void @st1lane_4h(<4 x i16> %A, i16* %D) {
ret void
}
+define void @st1lane_ro_4h(<4 x i16> %A, i16* %D, i64 %offset) {
+; CHECK-LABEL: st1lane_ro_4h
+; CHECK: add x[[XREG:[0-9]+]], x0, x1
+; CHECK: st1.h { v0 }[1], [x[[XREG]]]
+ %ptr = getelementptr i16* %D, i64 %offset
+ %tmp = extractelement <4 x i16> %A, i32 1
+ store i16 %tmp, i16* %ptr
+ ret void
+}
+
+define void @st1lane0_ro_4h(<4 x i16> %A, i16* %D, i64 %offset) {
+; CHECK-LABEL: st1lane0_ro_4h
+; CHECK: str h0, [x0, x1, lsl #1]
+ %ptr = getelementptr i16* %D, i64 %offset
+ %tmp = extractelement <4 x i16> %A, i32 0
+ store i16 %tmp, i16* %ptr
+ ret void
+}
+
define void @st1lane_2s(<2 x i32> %A, i32* %D) {
; CHECK-LABEL: st1lane_2s
; CHECK: st1.s
@@ -72,6 +226,25 @@ define void @st1lane_2s(<2 x i32> %A, i32* %D) {
ret void
}
+define void @st1lane_ro_2s(<2 x i32> %A, i32* %D, i64 %offset) {
+; CHECK-LABEL: st1lane_ro_2s
+; CHECK: add x[[XREG:[0-9]+]], x0, x1
+; CHECK: st1.s { v0 }[1], [x[[XREG]]]
+ %ptr = getelementptr i32* %D, i64 %offset
+ %tmp = extractelement <2 x i32> %A, i32 1
+ store i32 %tmp, i32* %ptr
+ ret void
+}
+
+define void @st1lane0_ro_2s(<2 x i32> %A, i32* %D, i64 %offset) {
+; CHECK-LABEL: st1lane0_ro_2s
+; CHECK: str s0, [x0, x1, lsl #2]
+ %ptr = getelementptr i32* %D, i64 %offset
+ %tmp = extractelement <2 x i32> %A, i32 0
+ store i32 %tmp, i32* %ptr
+ ret void
+}
+
define void @st1lane_2s_float(<2 x float> %A, float* %D) {
; CHECK-LABEL: st1lane_2s_float
; CHECK: st1.s
@@ -80,6 +253,25 @@ define void @st1lane_2s_float(<2 x float> %A, float* %D) {
ret void
}
+define void @st1lane_ro_2s_float(<2 x float> %A, float* %D, i64 %offset) {
+; CHECK-LABEL: st1lane_ro_2s_float
+; CHECK: add x[[XREG:[0-9]+]], x0, x1
+; CHECK: st1.s { v0 }[1], [x[[XREG]]]
+ %ptr = getelementptr float* %D, i64 %offset
+ %tmp = extractelement <2 x float> %A, i32 1
+ store float %tmp, float* %ptr
+ ret void
+}
+
+define void @st1lane0_ro_2s_float(<2 x float> %A, float* %D, i64 %offset) {
+; CHECK-LABEL: st1lane0_ro_2s_float
+; CHECK: str s0, [x0, x1, lsl #2]
+ %ptr = getelementptr float* %D, i64 %offset
+ %tmp = extractelement <2 x float> %A, i32 0
+ store float %tmp, float* %ptr
+ ret void
+}
+
define void @st2lane_16b(<16 x i8> %A, <16 x i8> %B, i8* %D) {
; CHECK-LABEL: st2lane_16b
; CHECK: st2.b
diff --git a/test/CodeGen/AArch64/arm64-stackmap-nops.ll b/test/CodeGen/AArch64/arm64-stackmap-nops.ll
new file mode 100644
index 0000000..5915b64
--- /dev/null
+++ b/test/CodeGen/AArch64/arm64-stackmap-nops.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
+
+define void @test_shadow_optimization() {
+entry:
+; Expect 8 bytes worth of nops here rather than 16: With the shadow optimization
+; in place, 8 bytes will be consumed by the frame teardown and return instr.
+; CHECK-LABEL: test_shadow_optimization:
+; CHECK: nop
+; CHECK-NEXT: nop
+; CHECK-NOT: nop
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 0, i32 16)
+ ret void
+}
+
+declare void @llvm.experimental.stackmap(i64, i32, ...)
diff --git a/test/CodeGen/AArch64/arm64-stackpointer.ll b/test/CodeGen/AArch64/arm64-stackpointer.ll
index 581faf1..a33de8c 100644
--- a/test/CodeGen/AArch64/arm64-stackpointer.ll
+++ b/test/CodeGen/AArch64/arm64-stackpointer.ll
@@ -21,4 +21,4 @@ declare void @llvm.write_register.i64(metadata, i64) nounwind
; register unsigned long current_stack_pointer asm("sp");
; CHECK-NOT: .asciz "sp"
-!0 = metadata !{metadata !"sp\00"}
+!0 = !{!"sp\00"}
diff --git a/test/CodeGen/AArch64/arm64-tls-dynamics.ll b/test/CodeGen/AArch64/arm64-tls-dynamics.ll
index e8a83fd..30ea63b 100644
--- a/test/CodeGen/AArch64/arm64-tls-dynamics.ll
+++ b/test/CodeGen/AArch64/arm64-tls-dynamics.ll
@@ -20,7 +20,7 @@ define i32 @test_generaldynamic() {
; CHECK: mrs x[[TP:[0-9]+]], TPIDR_EL0
; CHECK: ldr w0, [x[[TP]], x0]
-; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE21
; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
@@ -43,7 +43,7 @@ define i32* @test_generaldynamic_addr() {
; CHECK: mrs [[TP:x[0-9]+]], TPIDR_EL0
; CHECK: add x0, [[TP]], x0
-; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE21
; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
@@ -73,7 +73,7 @@ define i32 @test_localdynamic() {
; CHECK: ldr w0, [x[[TPIDR]], x[[TPREL]]]
-; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE21
; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
@@ -101,7 +101,7 @@ define i32* @test_localdynamic_addr() {
; CHECK: add x0, [[TPIDR]], [[TPREL]]
-; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
+; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE21
; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
diff --git a/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll b/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll
index a7f5215..923742d 100644
--- a/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll
+++ b/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll
@@ -22,10 +22,10 @@ attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"=
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"clang version 3.6.0 "}
-!1 = metadata !{metadata !2, metadata !2, i64 0}
-!2 = metadata !{metadata !"any pointer", metadata !3, i64 0}
-!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
-!4 = metadata !{metadata !"Simple C/C++ TBAA"}
-!5 = metadata !{metadata !6, metadata !6, i64 0}
-!6 = metadata !{metadata !"int", metadata !3, i64 0}
+!0 = !{!"clang version 3.6.0 "}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"any pointer", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{!6, !6, i64 0}
+!6 = !{!"int", !3, i64 0}
diff --git a/test/CodeGen/AArch64/arm64-variadic-aapcs.ll b/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
index 36a7bfd..44f2af1 100644
--- a/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
+++ b/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
@@ -12,6 +12,7 @@ define void @test_simple(i32 %n, ...) {
; CHECK: add [[STACK_TOP:x[0-9]+]], sp, #[[STACKSIZE]]
; CHECK: adrp x[[VA_LIST_HI:[0-9]+]], var
+; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, :lo12:var
; CHECK: stp x1, x2, [sp, #[[GR_BASE:[0-9]+]]]
; ... omit middle ones ...
@@ -21,11 +22,10 @@ define void @test_simple(i32 %n, ...) {
; ... omit middle ones ...
; CHECK: stp q6, q7, [sp, #
-; CHECK: str [[STACK_TOP]], [x[[VA_LIST_HI]], :lo12:var]
+; CHECK: str [[STACK_TOP]], [x[[VA_LIST]]]
; CHECK: add [[GR_TOPTMP:x[0-9]+]], sp, #[[GR_BASE]]
; CHECK: add [[GR_TOP:x[0-9]+]], [[GR_TOPTMP]], #56
-; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, :lo12:var
; CHECK: str [[GR_TOP]], [x[[VA_LIST]], #8]
; CHECK: mov [[VR_TOPTMP:x[0-9]+]], sp
@@ -50,6 +50,7 @@ define void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) {
; CHECK: add [[STACK_TOP:x[0-9]+]], sp, #[[STACKSIZE]]
; CHECK: adrp x[[VA_LIST_HI:[0-9]+]], var
+; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, :lo12:var
; CHECK: stp x3, x4, [sp, #[[GR_BASE:[0-9]+]]]
; ... omit middle ones ...
@@ -59,11 +60,10 @@ define void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) {
; ... omit middle ones ...
; CHECK: str q7, [sp, #
-; CHECK: str [[STACK_TOP]], [x[[VA_LIST_HI]], :lo12:var]
+; CHECK: str [[STACK_TOP]], [x[[VA_LIST]]]
; CHECK: add [[GR_TOPTMP:x[0-9]+]], sp, #[[GR_BASE]]
; CHECK: add [[GR_TOP:x[0-9]+]], [[GR_TOPTMP]], #40
-; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, :lo12:var
; CHECK: str [[GR_TOP]], [x[[VA_LIST]], #8]
; CHECK: mov [[VR_TOPTMP:x[0-9]+]], sp
@@ -89,18 +89,20 @@ define void @test_nospare([8 x i64], [8 x float], ...) {
call void @llvm.va_start(i8* %addr)
; CHECK-NOT: sub sp, sp
; CHECK: mov [[STACK:x[0-9]+]], sp
-; CHECK: str [[STACK]], [{{x[0-9]+}}, :lo12:var]
+; CHECK: add x[[VAR:[0-9]+]], {{x[0-9]+}}, :lo12:var
+; CHECK: str [[STACK]], [x[[VAR]]]
ret void
}
; If there are non-variadic arguments on the stack (here two i64s) then the
; __stack field should point just past them.
-define void @test_offsetstack([10 x i64], [3 x float], ...) {
+define void @test_offsetstack([8 x i64], [2 x i64], [3 x float], ...) {
; CHECK-LABEL: test_offsetstack:
; CHECK: sub sp, sp, #80
; CHECK: add [[STACK_TOP:x[0-9]+]], sp, #96
-; CHECK: str [[STACK_TOP]], [{{x[0-9]+}}, :lo12:var]
+; CHECK: add x[[VAR:[0-9]+]], {{x[0-9]+}}, :lo12:var
+; CHECK: str [[STACK_TOP]], [x[[VAR]]]
%addr = bitcast %va_list* @var to i8*
call void @llvm.va_start(i8* %addr)
diff --git a/test/CodeGen/AArch64/arm64-vshuffle.ll b/test/CodeGen/AArch64/arm64-vshuffle.ll
index 62fd961..75e0d80 100644
--- a/test/CodeGen/AArch64/arm64-vshuffle.ll
+++ b/test/CodeGen/AArch64/arm64-vshuffle.ll
@@ -29,14 +29,14 @@ entry:
}
; CHECK: lCPI1_0:
-; CHECK: .byte 2 ; 0x2
+; CHECK: .byte 0 ; 0x0
; CHECK: .byte 255 ; 0xff
-; CHECK: .byte 6 ; 0x6
+; CHECK: .byte 2 ; 0x2
; CHECK: .byte 255 ; 0xff
; CHECK: .byte 10 ; 0xa
; CHECK: .byte 12 ; 0xc
; CHECK: .byte 14 ; 0xe
-; CHECK: .byte 0 ; 0x0
+; CHECK: .byte 7 ; 0x7
; CHECK: test2
; CHECK: ldr d[[REG0:[0-9]+]], [{{.*}}, lCPI1_0@PAGEOFF]
; CHECK: adrp x[[REG2:[0-9]+]], lCPI1_1@PAGE
@@ -82,22 +82,22 @@ bb:
ret <16 x i1> %Shuff
}
; CHECK: lCPI3_1:
-; CHECK: .byte 2 ; 0x2
-; CHECK: .byte 1 ; 0x1
-; CHECK: .byte 6 ; 0x6
-; CHECK: .byte 18 ; 0x12
-; CHECK: .byte 10 ; 0xa
-; CHECK: .byte 12 ; 0xc
-; CHECK: .byte 14 ; 0xe
; CHECK: .byte 0 ; 0x0
+; CHECK: .byte 1 ; 0x1
; CHECK: .byte 2 ; 0x2
-; CHECK: .byte 31 ; 0x1f
+; CHECK: .byte 18 ; 0x12
+; CHECK: .byte 4 ; 0x4
+; CHECK: .byte 5 ; 0x5
; CHECK: .byte 6 ; 0x6
-; CHECK: .byte 30 ; 0x1e
+; CHECK: .byte 7 ; 0x7
+; CHECK: .byte 8 ; 0x8
+; CHECK: .byte 31 ; 0x1f
; CHECK: .byte 10 ; 0xa
+; CHECK: .byte 30 ; 0x1e
; CHECK: .byte 12 ; 0xc
+; CHECK: .byte 13 ; 0xd
; CHECK: .byte 14 ; 0xe
-; CHECK: .byte 0 ; 0x0
+; CHECK: .byte 15 ; 0xf
; CHECK: _test4:
; CHECK: ldr q[[REG1:[0-9]+]]
; CHECK: movi.2d v[[REG0:[0-9]+]], #0000000000000000
diff --git a/test/CodeGen/AArch64/bitcast-v2i8.ll b/test/CodeGen/AArch64/bitcast-v2i8.ll
new file mode 100644
index 0000000..4bdac64
--- /dev/null
+++ b/test/CodeGen/AArch64/bitcast-v2i8.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -mtriple=aarch64-apple-ios | FileCheck %s
+
+; Part of PR21549: going through the stack isn't ideal but is correct.
+
+define i16 @test_bitcast_v2i8_to_i16(<2 x i8> %a) {
+; CHECK-LABEL: test_bitcast_v2i8_to_i16
+; CHECK: mov.s [[WREG_HI:w[0-9]+]], v0[1]
+; CHECK-NEXT: fmov [[WREG_LO:w[0-9]+]], s0
+; CHECK-NEXT: strb [[WREG_HI]], [sp, #15]
+; CHECK-NEXT: strb [[WREG_LO]], [sp, #14]
+; CHECK-NEXT: ldrh w0, [sp, #14]
+
+ %aa = bitcast <2 x i8> %a to i16
+ ret i16 %aa
+}
diff --git a/test/CodeGen/AArch64/br-to-eh-lpad.ll b/test/CodeGen/AArch64/br-to-eh-lpad.ll
new file mode 100644
index 0000000..20bffd9
--- /dev/null
+++ b/test/CodeGen/AArch64/br-to-eh-lpad.ll
@@ -0,0 +1,78 @@
+; RUN: llc < %s -mtriple=aarch64-apple-ios -verify-machineinstrs
+
+; This function tests that the machine verifier accepts an unconditional
+; branch from an invoke basic block, to its EH landing pad basic block.
+; The test is brittle and isn't ideally reduced, because in most cases the
+; branch would be removed (for instance, turned into a fallthrough), and in
+; that case, the machine verifier, which relies on analyzing branches for this
+; kind of verification, is unable to check anything, so accepts the CFG.
+
+define void @test_branch_to_landingpad() {
+entry:
+ br i1 undef, label %if.end50.thread, label %if.then6
+
+lpad:
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*)
+ catch %struct._objc_typeinfo.12.129.194.285.350.493.519.532.571.597.623.765* @"OBJC_EHTYPE_$_NSString"
+ catch %struct._objc_typeinfo.12.129.194.285.350.493.519.532.571.597.623.765* @OBJC_EHTYPE_id
+ catch i8* null
+ br i1 undef, label %invoke.cont33, label %catch.fallthrough
+
+catch.fallthrough:
+ %matches31 = icmp eq i32 undef, 0
+ br i1 %matches31, label %invoke.cont41, label %finally.catchall
+
+if.then6:
+ invoke void @objc_exception_throw()
+ to label %invoke.cont7 unwind label %lpad
+
+invoke.cont7:
+ unreachable
+
+if.end50.thread:
+ tail call void (i8*, ...)* @printf(i8* getelementptr inbounds ([17 x i8]* @.str1, i64 0, i64 0), i32 125)
+ tail call void (i8*, ...)* @printf(i8* getelementptr inbounds ([17 x i8]* @.str1, i64 0, i64 0), i32 128)
+ unreachable
+
+invoke.cont33:
+ tail call void (i8*, ...)* @printf(i8* getelementptr inbounds ([17 x i8]* @.str1, i64 0, i64 0), i32 119)
+ unreachable
+
+invoke.cont41:
+ invoke void @objc_exception_rethrow()
+ to label %invoke.cont43 unwind label %lpad40
+
+invoke.cont43:
+ unreachable
+
+lpad40:
+ %1 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*)
+ catch i8* null
+ br label %finally.catchall
+
+finally.catchall:
+ tail call void (i8*, ...)* @printf(i8* getelementptr inbounds ([17 x i8]* @.str1, i64 0, i64 0), i32 125)
+ unreachable
+}
+
+%struct._objc_typeinfo.12.129.194.285.350.493.519.532.571.597.623.765 = type { i8**, i8*, %struct._class_t.10.127.192.283.348.491.517.530.569.595.621.764* }
+%struct._class_t.10.127.192.283.348.491.517.530.569.595.621.764 = type { %struct._class_t.10.127.192.283.348.491.517.530.569.595.621.764*, %struct._class_t.10.127.192.283.348.491.517.530.569.595.621.764*, %struct._objc_cache.0.117.182.273.338.481.507.520.559.585.611.754*, i8* (i8*, i8*)**, %struct._class_ro_t.9.126.191.282.347.490.516.529.568.594.620.763* }
+%struct._objc_cache.0.117.182.273.338.481.507.520.559.585.611.754 = type opaque
+%struct._class_ro_t.9.126.191.282.347.490.516.529.568.594.620.763 = type { i32, i32, i32, i8*, i8*, %struct.__method_list_t.2.119.184.275.340.483.509.522.561.587.613.756*, %struct._objc_protocol_list.6.123.188.279.344.487.513.526.565.591.617.760*, %struct._ivar_list_t.8.125.190.281.346.489.515.528.567.593.619.762*, i8*, %struct._prop_list_t.4.121.186.277.342.485.511.524.563.589.615.758* }
+%struct.__method_list_t.2.119.184.275.340.483.509.522.561.587.613.756 = type { i32, i32, [0 x %struct._objc_method.1.118.183.274.339.482.508.521.560.586.612.755] }
+%struct._objc_method.1.118.183.274.339.482.508.521.560.586.612.755 = type { i8*, i8*, i8* }
+%struct._objc_protocol_list.6.123.188.279.344.487.513.526.565.591.617.760 = type { i64, [0 x %struct._protocol_t.5.122.187.278.343.486.512.525.564.590.616.759*] }
+%struct._protocol_t.5.122.187.278.343.486.512.525.564.590.616.759 = type { i8*, i8*, %struct._objc_protocol_list.6.123.188.279.344.487.513.526.565.591.617.760*, %struct.__method_list_t.2.119.184.275.340.483.509.522.561.587.613.756*, %struct.__method_list_t.2.119.184.275.340.483.509.522.561.587.613.756*, %struct.__method_list_t.2.119.184.275.340.483.509.522.561.587.613.756*, %struct.__method_list_t.2.119.184.275.340.483.509.522.561.587.613.756*, %struct._prop_list_t.4.121.186.277.342.485.511.524.563.589.615.758*, i32, i32, i8** }
+%struct._ivar_list_t.8.125.190.281.346.489.515.528.567.593.619.762 = type { i32, i32, [0 x %struct._ivar_t.7.124.189.280.345.488.514.527.566.592.618.761] }
+%struct._ivar_t.7.124.189.280.345.488.514.527.566.592.618.761 = type { i32*, i8*, i8*, i32, i32 }
+%struct._prop_list_t.4.121.186.277.342.485.511.524.563.589.615.758 = type { i32, i32, [0 x %struct._prop_t.3.120.185.276.341.484.510.523.562.588.614.757] }
+%struct._prop_t.3.120.185.276.341.484.510.523.562.588.614.757 = type { i8*, i8* }
+
+@.str1 = external unnamed_addr constant [17 x i8], align 1
+@OBJC_EHTYPE_id = external global %struct._objc_typeinfo.12.129.194.285.350.493.519.532.571.597.623.765
+@"OBJC_EHTYPE_$_NSString" = external global %struct._objc_typeinfo.12.129.194.285.350.493.519.532.571.597.623.765, section "__DATA,__datacoal_nt,coalesced", align 8
+
+declare void @objc_exception_throw()
+declare void @objc_exception_rethrow()
+declare i32 @__objc_personality_v0(...)
+declare void @printf(i8* nocapture readonly, ...)
diff --git a/test/CodeGen/AArch64/combine-comparisons-by-cse.ll b/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
index df8dc87..3686a1f 100644
--- a/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
+++ b/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
@@ -366,7 +366,6 @@ define i32 @fcmpri(i32 %argc, i8** nocapture readonly %argv) {
; CHECK-LABEL-DAG: .LBB9_3
; CHECK: cmp w19, #0
; CHECK: fcmp d8, #0.0
-; CHECK: b.gt .LBB9_5
; CHECK-NOT: cmp w19, #1
; CHECK-NOT: b.ge .LBB9_5
diff --git a/test/CodeGen/AArch64/compiler-ident.ll b/test/CodeGen/AArch64/compiler-ident.ll
index 0350571..217340d 100644
--- a/test/CodeGen/AArch64/compiler-ident.ll
+++ b/test/CodeGen/AArch64/compiler-ident.ll
@@ -8,5 +8,5 @@ target triple = "aarch64--linux-gnu"
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"some LLVM version"}
+!0 = !{!"some LLVM version"}
diff --git a/test/CodeGen/AArch64/cpus.ll b/test/CodeGen/AArch64/cpus.ll
index f0f36bd..1266842 100644
--- a/test/CodeGen/AArch64/cpus.ll
+++ b/test/CodeGen/AArch64/cpus.ll
@@ -4,6 +4,7 @@
; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=generic 2>&1 | FileCheck %s
; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=cortex-a53 2>&1 | FileCheck %s
; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=cortex-a57 2>&1 | FileCheck %s
+; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=cortex-a72 2>&1 | FileCheck %s
; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=INVALID
; CHECK-NOT: {{.*}} is not a recognized processor for this target
diff --git a/test/CodeGen/AArch64/dp-3source.ll b/test/CodeGen/AArch64/dp-3source.ll
index 22bd4a8..bd96ec7 100644
--- a/test/CodeGen/AArch64/dp-3source.ll
+++ b/test/CodeGen/AArch64/dp-3source.ll
@@ -161,3 +161,18 @@ define i64 @test_umnegl(i32 %lhs, i32 %rhs) {
; CHECK: umnegl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
ret i64 %res
}
+
+@a = common global i32 0, align 4
+@b = common global i32 0, align 4
+@c = common global i32 0, align 4
+
+define void @test_mneg(){
+; CHECK-LABEL: test_mneg:
+ %1 = load i32* @a, align 4
+ %2 = load i32* @b, align 4
+ %3 = sub i32 0, %1
+ %4 = mul i32 %2, %3
+ store i32 %4, i32* @c, align 4
+; CHECK: mneg {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
+ ret void
+}
diff --git a/test/CodeGen/AArch64/f16-convert.ll b/test/CodeGen/AArch64/f16-convert.ll
index 12412d4..d1f49a91 100644
--- a/test/CodeGen/AArch64/f16-convert.ll
+++ b/test/CodeGen/AArch64/f16-convert.ll
@@ -133,7 +133,8 @@ define void @store0(i16* nocapture %a, float %val) nounwind {
define void @store1(i16* nocapture %a, double %val) nounwind {
; CHECK-LABEL: store1:
-; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: fcvt s0, d0
+; CHECK-NEXT: fcvt h0, s0
; CHECK-NEXT: str h0, [x0]
; CHECK-NEXT: ret
@@ -158,7 +159,8 @@ define void @store2(i16* nocapture %a, i32 %i, float %val) nounwind {
define void @store3(i16* nocapture %a, i32 %i, double %val) nounwind {
; CHECK-LABEL: store3:
-; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: fcvt s0, d0
+; CHECK-NEXT: fcvt h0, s0
; CHECK-NEXT: str h0, [x0, w1, sxtw #1]
; CHECK-NEXT: ret
@@ -184,7 +186,8 @@ define void @store4(i16* nocapture %a, i64 %i, float %val) nounwind {
define void @store5(i16* nocapture %a, i64 %i, double %val) nounwind {
; CHECK-LABEL: store5:
-; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: fcvt s0, d0
+; CHECK-NEXT: fcvt h0, s0
; CHECK-NEXT: str h0, [x0, x1, lsl #1]
; CHECK-NEXT: ret
@@ -209,7 +212,8 @@ define void @store6(i16* nocapture %a, float %val) nounwind {
define void @store7(i16* nocapture %a, double %val) nounwind {
; CHECK-LABEL: store7:
-; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: fcvt s0, d0
+; CHECK-NEXT: fcvt h0, s0
; CHECK-NEXT: str h0, [x0, #20]
; CHECK-NEXT: ret
@@ -234,7 +238,8 @@ define void @store8(i16* nocapture %a, float %val) nounwind {
define void @store9(i16* nocapture %a, double %val) nounwind {
; CHECK-LABEL: store9:
-; CHECK-NEXT: fcvt h0, d0
+; CHECK-NEXT: fcvt s0, d0
+; CHECK-NEXT: fcvt h0, s0
; CHECK-NEXT: stur h0, [x0, #-20]
; CHECK-NEXT: ret
diff --git a/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll b/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll
new file mode 100644
index 0000000..bc4a210
--- /dev/null
+++ b/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll
@@ -0,0 +1,42 @@
+; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s
+
+; CHECK-label: test_or
+; CHECK: cbnz w0, {{LBB[0-9]+_2}}
+; CHECK: cbz w1, {{LBB[0-9]+_1}}
+define i64 @test_or(i32 %a, i32 %b) {
+bb1:
+ %0 = icmp eq i32 %a, 0
+ %1 = icmp eq i32 %b, 0
+ %or.cond = or i1 %0, %1
+ br i1 %or.cond, label %bb3, label %bb4, !prof !0
+
+bb3:
+ ret i64 0
+
+bb4:
+ %2 = call i64 @bar()
+ ret i64 %2
+}
+
+; CHECK-label: test_ans
+; CHECK: cbz w0, {{LBB[0-9]+_2}}
+; CHECK: cbnz w1, {{LBB[0-9]+_3}}
+define i64 @test_and(i32 %a, i32 %b) {
+bb1:
+ %0 = icmp ne i32 %a, 0
+ %1 = icmp ne i32 %b, 0
+ %or.cond = and i1 %0, %1
+ br i1 %or.cond, label %bb4, label %bb3, !prof !1
+
+bb3:
+ ret i64 0
+
+bb4:
+ %2 = call i64 @bar()
+ ret i64 %2
+}
+
+declare i64 @bar()
+
+!0 = !{!"branch_weights", i32 5128, i32 32}
+!1 = !{!"branch_weights", i32 1024, i32 4136}
diff --git a/test/CodeGen/AArch64/fast-isel-branch_weights.ll b/test/CodeGen/AArch64/fast-isel-branch_weights.ll
index 5b22476..70dbdf2 100644
--- a/test/CodeGen/AArch64/fast-isel-branch_weights.ll
+++ b/test/CodeGen/AArch64/fast-isel-branch_weights.ll
@@ -16,4 +16,4 @@ success:
ret i64 0
}
-!0 = metadata !{metadata !"branch_weights", i32 0, i32 2147483647}
+!0 = !{!"branch_weights", i32 0, i32 2147483647}
diff --git a/test/CodeGen/AArch64/fast-isel-memcpy.ll b/test/CodeGen/AArch64/fast-isel-memcpy.ll
new file mode 100644
index 0000000..9161dad
--- /dev/null
+++ b/test/CodeGen/AArch64/fast-isel-memcpy.ll
@@ -0,0 +1,15 @@
+; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s
+
+; Test that we don't segfault.
+; CHECK-LABEL: test
+; CHECK: ldr [[REG1:x[0-9]+]], [x1]
+; CHECK-NEXT: and [[REG2:x[0-9]+]], x0, #0x7fffffffffffffff
+; CHECK-NEXT: str [[REG1]], {{\[}}[[REG2]]{{\]}}
+define void @test(i64 %a, i8* %b) {
+ %1 = and i64 %a, 9223372036854775807
+ %2 = inttoptr i64 %1 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %b, i64 8, i32 8, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1)
diff --git a/test/CodeGen/AArch64/fast-isel-tbz.ll b/test/CodeGen/AArch64/fast-isel-tbz.ll
index d7f46b2..a5f02ff 100644
--- a/test/CodeGen/AArch64/fast-isel-tbz.ll
+++ b/test/CodeGen/AArch64/fast-isel-tbz.ll
@@ -1,5 +1,5 @@
-; RUN: llc -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck %s
-; RUN: llc -fast-isel -fast-isel-abort -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck %s
+; RUN: llc -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck --check-prefix=CHECK %s
+; RUN: llc -fast-isel -fast-isel-abort -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck --check-prefix=CHECK --check-prefix=FAST %s
define i32 @icmp_eq_i8(i8 zeroext %a) {
; CHECK-LABEL: icmp_eq_i8
@@ -121,6 +121,160 @@ bb2:
ret i32 0
}
+define i32 @icmp_slt_i8(i8 zeroext %a) {
+; FAST-LABEL: icmp_slt_i8
+; FAST: tbnz w0, #7, {{LBB.+_2}}
+ %1 = icmp slt i8 %a, 0
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+define i32 @icmp_slt_i16(i16 zeroext %a) {
+; FAST-LABEL: icmp_slt_i16
+; FAST: tbnz w0, #15, {{LBB.+_2}}
+ %1 = icmp slt i16 %a, 0
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+define i32 @icmp_slt_i32(i32 %a) {
+; CHECK-LABEL: icmp_slt_i32
+; CHECK: tbnz w0, #31, {{LBB.+_2}}
+ %1 = icmp slt i32 %a, 0
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+define i32 @icmp_slt_i64(i64 %a) {
+; CHECK-LABEL: icmp_slt_i64
+; CHECK: tbnz x0, #63, {{LBB.+_2}}
+ %1 = icmp slt i64 %a, 0
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+define i32 @icmp_sge_i8(i8 zeroext %a) {
+; FAST-LABEL: icmp_sge_i8
+; FAST: tbz w0, #7, {{LBB.+_2}}
+ %1 = icmp sge i8 %a, 0
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+define i32 @icmp_sge_i16(i16 zeroext %a) {
+; FAST-LABEL: icmp_sge_i16
+; FAST: tbz w0, #15, {{LBB.+_2}}
+ %1 = icmp sge i16 %a, 0
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+define i32 @icmp_sle_i8(i8 zeroext %a) {
+; FAST-LABEL: icmp_sle_i8
+; FAST: tbnz w0, #7, {{LBB.+_2}}
+ %1 = icmp sle i8 %a, -1
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+define i32 @icmp_sle_i16(i16 zeroext %a) {
+; FAST-LABEL: icmp_sle_i16
+; FAST: tbnz w0, #15, {{LBB.+_2}}
+ %1 = icmp sle i16 %a, -1
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+define i32 @icmp_sle_i32(i32 %a) {
+; CHECK-LABEL: icmp_sle_i32
+; CHECK: tbnz w0, #31, {{LBB.+_2}}
+ %1 = icmp sle i32 %a, -1
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+define i32 @icmp_sle_i64(i64 %a) {
+; CHECK-LABEL: icmp_sle_i64
+; CHECK: tbnz x0, #63, {{LBB.+_2}}
+ %1 = icmp sle i64 %a, -1
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+define i32 @icmp_sgt_i8(i8 zeroext %a) {
+; FAST-LABEL: icmp_sgt_i8
+; FAST: tbz w0, #7, {{LBB.+_2}}
+ %1 = icmp sgt i8 %a, -1
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+define i32 @icmp_sgt_i16(i16 zeroext %a) {
+; FAST-LABEL: icmp_sgt_i16
+; FAST: tbz w0, #15, {{LBB.+_2}}
+ %1 = icmp sgt i16 %a, -1
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+define i32 @icmp_sgt_i32(i32 %a) {
+; CHECK-LABEL: icmp_sgt_i32
+; CHECK: tbz w0, #31, {{LBB.+_2}}
+ %1 = icmp sgt i32 %a, -1
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
+define i32 @icmp_sgt_i64(i64 %a) {
+; FAST-LABEL: icmp_sgt_i64
+; FAST: tbz x0, #63, {{LBB.+_2}}
+ %1 = icmp sgt i64 %a, -1
+ br i1 %1, label %bb1, label %bb2, !prof !0
+bb1:
+ ret i32 1
+bb2:
+ ret i32 0
+}
+
; Test that we don't fold the 'and' instruction into the compare.
define i32 @icmp_eq_and_i32(i32 %a, i1 %c) {
; CHECK-LABEL: icmp_eq_and_i32
@@ -137,5 +291,5 @@ bb2:
ret i32 0
}
-!0 = metadata !{metadata !"branch_weights", i32 0, i32 2147483647}
-!1 = metadata !{metadata !"branch_weights", i32 2147483647, i32 0}
+!0 = !{!"branch_weights", i32 0, i32 2147483647}
+!1 = !{!"branch_weights", i32 2147483647, i32 0}
diff --git a/test/CodeGen/AArch64/fdiv-combine.ll b/test/CodeGen/AArch64/fdiv-combine.ll
new file mode 100644
index 0000000..389eefd
--- /dev/null
+++ b/test/CodeGen/AArch64/fdiv-combine.ll
@@ -0,0 +1,94 @@
+; RUN: llc -march=aarch64 < %s | FileCheck %s
+
+; Following test cases check:
+; a / D; b / D; c / D;
+; =>
+; recip = 1.0 / D; a * recip; b * recip; c * recip;
+define void @three_fdiv_float(float %D, float %a, float %b, float %c) #0 {
+; CHECK-LABEL: three_fdiv_float:
+; CHECK: fdiv
+; CHECK-NEXT-NOT: fdiv
+; CHECK: fmul
+; CHECK: fmul
+; CHECK: fmul
+ %div = fdiv float %a, %D
+ %div1 = fdiv float %b, %D
+ %div2 = fdiv float %c, %D
+ tail call void @foo_3f(float %div, float %div1, float %div2)
+ ret void
+}
+
+define void @three_fdiv_double(double %D, double %a, double %b, double %c) #0 {
+; CHECK-LABEL: three_fdiv_double:
+; CHECK: fdiv
+; CHECK-NEXT-NOT: fdiv
+; CHECK: fmul
+; CHECK: fmul
+; CHECK: fmul
+ %div = fdiv double %a, %D
+ %div1 = fdiv double %b, %D
+ %div2 = fdiv double %c, %D
+ tail call void @foo_3d(double %div, double %div1, double %div2)
+ ret void
+}
+
+define void @three_fdiv_4xfloat(<4 x float> %D, <4 x float> %a, <4 x float> %b, <4 x float> %c) #0 {
+; CHECK-LABEL: three_fdiv_4xfloat:
+; CHECK: fdiv
+; CHECK-NEXT-NOT: fdiv
+; CHECK: fmul
+; CHECK: fmul
+; CHECK: fmul
+ %div = fdiv <4 x float> %a, %D
+ %div1 = fdiv <4 x float> %b, %D
+ %div2 = fdiv <4 x float> %c, %D
+ tail call void @foo_3_4xf(<4 x float> %div, <4 x float> %div1, <4 x float> %div2)
+ ret void
+}
+
+define void @three_fdiv_2xdouble(<2 x double> %D, <2 x double> %a, <2 x double> %b, <2 x double> %c) #0 {
+; CHECK-LABEL: three_fdiv_2xdouble:
+; CHECK: fdiv
+; CHECK-NEXT-NOT: fdiv
+; CHECK: fmul
+; CHECK: fmul
+; CHECK: fmul
+ %div = fdiv <2 x double> %a, %D
+ %div1 = fdiv <2 x double> %b, %D
+ %div2 = fdiv <2 x double> %c, %D
+ tail call void @foo_3_2xd(<2 x double> %div, <2 x double> %div1, <2 x double> %div2)
+ ret void
+}
+
+; Following test cases check we never combine two FDIVs if neither of them
+; calculates a reciprocal.
+define void @two_fdiv_float(float %D, float %a, float %b) #0 {
+; CHECK-LABEL: two_fdiv_float:
+; CHECK: fdiv
+; CHECK: fdiv
+; CHECK-NEXT-NOT: fmul
+ %div = fdiv float %a, %D
+ %div1 = fdiv float %b, %D
+ tail call void @foo_2f(float %div, float %div1)
+ ret void
+}
+
+define void @two_fdiv_double(double %D, double %a, double %b) #0 {
+; CHECK-LABEL: two_fdiv_double:
+; CHECK: fdiv
+; CHECK: fdiv
+; CHECK-NEXT-NOT: fmul
+ %div = fdiv double %a, %D
+ %div1 = fdiv double %b, %D
+ tail call void @foo_2d(double %div, double %div1)
+ ret void
+}
+
+declare void @foo_3f(float, float, float)
+declare void @foo_3d(double, double, double)
+declare void @foo_3_4xf(<4 x float>, <4 x float>, <4 x float>)
+declare void @foo_3_2xd(<2 x double>, <2 x double>, <2 x double>)
+declare void @foo_2f(float, float)
+declare void @foo_2d(double, double)
+
+attributes #0 = { "unsafe-fp-math"="true" }
diff --git a/test/CodeGen/AArch64/fp16-v8-instructions.ll b/test/CodeGen/AArch64/fp16-v8-instructions.ll
index 9ee2296..b75f160 100644
--- a/test/CodeGen/AArch64/fp16-v8-instructions.ll
+++ b/test/CodeGen/AArch64/fp16-v8-instructions.ll
@@ -188,10 +188,10 @@ define <8 x half> @s_to_h(<8 x float> %a) {
define <8 x half> @d_to_h(<8 x double> %a) {
; CHECK-LABEL: d_to_h:
-; CHECK-DAG: ins v{{[0-9]+}}.d
-; CHECK-DAG: ins v{{[0-9]+}}.d
-; CHECK-DAG: ins v{{[0-9]+}}.d
-; CHECK-DAG: ins v{{[0-9]+}}.d
+; CHECK-DAG: mov d{{[0-9]+}}, v{{[0-9]+}}.d[1]
+; CHECK-DAG: mov d{{[0-9]+}}, v{{[0-9]+}}.d[1]
+; CHECK-DAG: mov d{{[0-9]+}}, v{{[0-9]+}}.d[1]
+; CHECK-DAG: mov d{{[0-9]+}}, v{{[0-9]+}}.d[1]
; CHECK-DAG: fcvt h
; CHECK-DAG: fcvt h
; CHECK-DAG: fcvt h
diff --git a/test/CodeGen/AArch64/fpimm.ll b/test/CodeGen/AArch64/fpimm.ll
index e59520c..b7db918 100644
--- a/test/CodeGen/AArch64/fpimm.ll
+++ b/test/CodeGen/AArch64/fpimm.ll
@@ -1,4 +1,6 @@
-; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-apple-darwin -code-model=large -verify-machineinstrs < %s | FileCheck %s --check-prefix=LARGE
+; RUN: llc -mtriple=aarch64-apple-darwin -code-model=large -fast-isel -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s --check-prefix=LARGE
@varf32 = global float 0.0
@varf64 = global double 0.0
@@ -34,3 +36,22 @@ define void @check_double() {
; CHECK: ret
ret void
}
+
+; LARGE-LABEL: check_float2
+; LARGE: movz [[REG:w[0-9]+]], #0x4049, lsl #16
+; LARGE-NEXT: movk [[REG]], #0xfdb
+; LARGE-NEXT: fmov s0, [[REG]]
+define float @check_float2() {
+ ret float 3.14159274101257324218750
+}
+
+; LARGE-LABEL: check_double2
+; LARGE: movz [[REG:x[0-9]+]], #0x4009, lsl #48
+; LARGE-NEXT: movk [[REG]], #0x21fb, lsl #32
+; LARGE-NEXT: movk [[REG]], #0x5444, lsl #16
+; LARGE-NEXT: movk [[REG]], #0x2d18
+; LARGE-NEXT: fmov d0, [[REG]]
+define double @check_double2() {
+ ret double 3.1415926535897931159979634685441851615905761718750
+}
+
diff --git a/test/CodeGen/AArch64/func-argpassing.ll b/test/CodeGen/AArch64/func-argpassing.ll
index abb732c..9fc9a5f 100644
--- a/test/CodeGen/AArch64/func-argpassing.ll
+++ b/test/CodeGen/AArch64/func-argpassing.ll
@@ -96,10 +96,8 @@ define [2 x i64] @return_struct() {
%addr = bitcast %myStruct* @varstruct to [2 x i64]*
%val = load [2 x i64]* %addr
ret [2 x i64] %val
-; CHECK-DAG: ldr x0, [{{x[0-9]+}}, {{#?}}:lo12:varstruct]
- ; Odd register regex below disallows x0 which we want to be live now.
-; CHECK-DAG: add {{x[1-9][0-9]*}}, {{x[1-9][0-9]*}}, {{#?}}:lo12:varstruct
-; CHECK: ldr x1, [{{x[1-9][0-9]*}}, #8]
+; CHECK: add x[[VARSTRUCT:[0-9]+]], {{x[0-9]+}}, :lo12:varstruct
+; CHECK: ldp x0, x1, [x[[VARSTRUCT]]]
; Make sure epilogue immediately follows
; CHECK-NEXT: ret
}
@@ -166,8 +164,8 @@ define void @stacked_fpu(float %var0, double %var1, float %var2, float %var3,
define i64 @check_i128_regalign(i32 %val0, i128 %val1, i64 %val2) {
; CHECK-LABEL: check_i128_regalign
store i128 %val1, i128* @var128
-; CHECK-DAG: str x2, [{{x[0-9]+}}, {{#?}}:lo12:var128]
-; CHECK-DAG: str x3, [{{x[0-9]+}}, #8]
+; CHECK: add x[[VAR128:[0-9]+]], {{x[0-9]+}}, :lo12:var128
+; CHECK-DAG: stp x2, x3, [x[[VAR128]]]
ret i64 %val2
; CHECK: mov x0, x4
diff --git a/test/CodeGen/AArch64/func-calls.ll b/test/CodeGen/AArch64/func-calls.ll
index 51979f0..16157f8 100644
--- a/test/CodeGen/AArch64/func-calls.ll
+++ b/test/CodeGen/AArch64/func-calls.ll
@@ -62,8 +62,8 @@ define void @simple_rets() {
%arr = call [2 x i64] @return_smallstruct()
store [2 x i64] %arr, [2 x i64]* @varsmallstruct
; CHECK: bl return_smallstruct
-; CHECK: str x1, [{{x[0-9]+}}, #8]
-; CHECK: str x0, [{{x[0-9]+}}, {{#?}}:lo12:varsmallstruct]
+; CHECK: add x[[VARSMALLSTRUCT:[0-9]+]], {{x[0-9]+}}, :lo12:varsmallstruct
+; CHECK: stp x0, x1, [x[[VARSMALLSTRUCT]]]
call void @return_large_struct(%myStruct* sret @varstruct)
; CHECK: add x8, {{x[0-9]+}}, {{#?}}:lo12:varstruct
@@ -128,12 +128,12 @@ define void @check_i128_align() {
call void @check_i128_stackalign(i32 0, i32 1, i32 2, i32 3,
i32 4, i32 5, i32 6, i32 7,
i32 42, i128 %val)
-; CHECK: ldr [[I128LO:x[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:var128]
-; CHECK: ldr [[I128HI:x[0-9]+]], [{{x[0-9]+}}, #8]
+; CHECK: add x[[VAR128:[0-9]+]], {{x[0-9]+}}, :lo12:var128
+; CHECK: ldp [[I128LO:x[0-9]+]], [[I128HI:x[0-9]+]], [x[[VAR128]]]
; CHECK: stp [[I128LO]], [[I128HI]], [sp, #16]
-; CHECK-NONEON: ldr [[I128LO:x[0-9]+]], [{{x[0-9]+}}, :lo12:var128]
-; CHECK-NONEON: ldr [[I128HI:x[0-9]+]], [{{x[0-9]+}}, #8]
+; CHECK-NONEON: add x[[VAR128:[0-9]+]], {{x[0-9]+}}, :lo12:var128
+; CHECK-NONEON: ldp [[I128LO:x[0-9]+]], [[I128HI:x[0-9]+]], [x[[VAR128]]]
; CHECK-NONEON: stp [[I128LO]], [[I128HI]], [sp, #16]
; CHECK: bl check_i128_stackalign
diff --git a/test/CodeGen/AArch64/ghc-cc.ll b/test/CodeGen/AArch64/ghc-cc.ll
new file mode 100644
index 0000000..505bd5f
--- /dev/null
+++ b/test/CodeGen/AArch64/ghc-cc.ll
@@ -0,0 +1,89 @@
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+
+; Check the GHC call convention works (aarch64)
+
+@base = external global i64 ; assigned to register: r19
+@sp = external global i64 ; assigned to register: r20
+@hp = external global i64 ; assigned to register: r21
+@r1 = external global i64 ; assigned to register: r22
+@r2 = external global i64 ; assigned to register: r23
+@r3 = external global i64 ; assigned to register: r24
+@r4 = external global i64 ; assigned to register: r25
+@r5 = external global i64 ; assigned to register: r26
+@r6 = external global i64 ; assigned to register: r27
+@splim = external global i64 ; assigned to register: r28
+
+@f1 = external global float ; assigned to register: s8
+@f2 = external global float ; assigned to register: s9
+@f3 = external global float ; assigned to register: s10
+@f4 = external global float ; assigned to register: s11
+
+@d1 = external global double ; assigned to register: d12
+@d2 = external global double ; assigned to register: d13
+@d3 = external global double ; assigned to register: d14
+@d4 = external global double ; assigned to register: d15
+
+define ghccc i64 @addtwo(i64 %x, i64 %y) nounwind {
+entry:
+ ; CHECK-LABEL: addtwo
+ ; CHECK: add x0, x19, x20
+ ; CHECK-NEXT: ret
+ %0 = add i64 %x, %y
+ ret i64 %0
+}
+
+define void @zap(i64 %a, i64 %b) nounwind {
+entry:
+ ; CHECK-LABEL: zap
+ ; CHECK-NOT: mov {{x[0-9]+}}, sp
+ ; CHECK: bl addtwo
+ ; CHECK-NEXT: bl foo
+ %0 = call ghccc i64 @addtwo(i64 %a, i64 %b)
+ call void @foo() nounwind
+ ret void
+}
+
+define ghccc void @foo_i64 () nounwind {
+entry:
+ ; CHECK-LABEL: foo_i64
+ ; CHECK: adrp {{x[0-9]+}}, base
+ ; CHECK-NEXT: ldr x19, [{{x[0-9]+}}, :lo12:base]
+ ; CHECK-NEXT: bl bar_i64
+ ; CHECK-NEXT: ret
+
+ %0 = load i64* @base
+ tail call ghccc void @bar_i64( i64 %0 ) nounwind
+ ret void
+}
+
+define ghccc void @foo_float () nounwind {
+entry:
+ ; CHECK-LABEL: foo_float
+ ; CHECK: adrp {{x[0-9]+}}, f1
+ ; CHECK-NEXT: ldr s8, [{{x[0-9]+}}, :lo12:f1]
+ ; CHECK-NEXT: bl bar_float
+ ; CHECK-NEXT: ret
+
+ %0 = load float* @f1
+ tail call ghccc void @bar_float( float %0 ) nounwind
+ ret void
+}
+
+define ghccc void @foo_double () nounwind {
+entry:
+ ; CHECK-LABEL: foo_double
+ ; CHECK: adrp {{x[0-9]+}}, d1
+ ; CHECK-NEXT: ldr d12, [{{x[0-9]+}}, :lo12:d1]
+ ; CHECK-NEXT: bl bar_double
+ ; CHECK-NEXT: ret
+
+ %0 = load double* @d1
+ tail call ghccc void @bar_double( double %0 ) nounwind
+ ret void
+}
+
+declare ghccc void @foo ()
+
+declare ghccc void @bar_i64 (i64)
+declare ghccc void @bar_float (float)
+declare ghccc void @bar_double (double)
diff --git a/test/CodeGen/AArch64/global-merge-1.ll b/test/CodeGen/AArch64/global-merge-1.ll
index 68aba5e..7dc8da1 100644
--- a/test/CodeGen/AArch64/global-merge-1.ll
+++ b/test/CodeGen/AArch64/global-merge-1.ll
@@ -11,6 +11,7 @@
@n = internal global i32 0, align 4
define void @f1(i32 %a1, i32 %a2) {
+;CHECK-APPLE-IOS-NOT: adrp
;CHECK-APPLE-IOS: adrp x8, __MergedGlobals@PAGE
;CHECK-APPLE-IOS-NOT: adrp
;CHECK-APPLE-IOS: add x8, x8, __MergedGlobals@PAGEOFF
diff --git a/test/CodeGen/AArch64/global-merge-2.ll b/test/CodeGen/AArch64/global-merge-2.ll
index a773566..70b700c 100644
--- a/test/CodeGen/AArch64/global-merge-2.ll
+++ b/test/CodeGen/AArch64/global-merge-2.ll
@@ -8,6 +8,7 @@
define void @f1(i32 %a1, i32 %a2) {
;CHECK-APPLE-IOS-LABEL: _f1:
+;CHECK-APPLE-IOS-NOT: adrp
;CHECK-APPLE-IOS: adrp x8, __MergedGlobals_x@PAGE
;CHECK-APPLE-IOS: add x8, x8, __MergedGlobals_x@PAGEOFF
;CHECK-APPLE-IOS-NOT: adrp
diff --git a/test/CodeGen/AArch64/implicit-sret.ll b/test/CodeGen/AArch64/implicit-sret.ll
new file mode 100644
index 0000000..264d519
--- /dev/null
+++ b/test/CodeGen/AArch64/implicit-sret.ll
@@ -0,0 +1,13 @@
+; RUN: llc %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
+;
+; Handle implicit sret arguments that are generated on-the-fly during lowering.
+; <rdar://19792160> Null pointer assertion in AArch64TargetLowering
+
+; CHECK-LABEL: big_retval
+; ... str or stp for the first 1024 bits
+; CHECK: strb wzr, [x8, #128]
+; CHECK: ret
+define i1032 @big_retval() {
+entry:
+ ret i1032 0
+}
diff --git a/test/CodeGen/AArch64/large_shift.ll b/test/CodeGen/AArch64/large_shift.ll
new file mode 100644
index 0000000..f72c97d
--- /dev/null
+++ b/test/CodeGen/AArch64/large_shift.ll
@@ -0,0 +1,21 @@
+; RUN: llc -march=aarch64 -o - %s
+target triple = "arm64-unknown-unknown"
+
+; Make sure we don't run into an assert in the aarch64 code selection when
+; DAGCombining fails.
+
+declare void @t()
+
+define void @foo() {
+ %c = bitcast i64 270458 to i64
+ %t0 = lshr i64 %c, 422383
+ %t1 = trunc i64 %t0 to i1
+ br i1 %t1, label %BB1, label %BB0
+
+BB0:
+ call void @t()
+ br label %BB1
+
+BB1:
+ ret void
+}
diff --git a/test/CodeGen/AArch64/machine_cse_impdef_killflags.ll b/test/CodeGen/AArch64/machine_cse_impdef_killflags.ll
new file mode 100644
index 0000000..e77824f
--- /dev/null
+++ b/test/CodeGen/AArch64/machine_cse_impdef_killflags.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mtriple=aarch64-apple-ios -fast-isel -verify-machineinstrs | FileCheck %s
+
+; Check that the kill flag is cleared between CSE'd instructions on their
+; imp-def'd registers.
+; The verifier would complain otherwise.
+define i64 @csed-impdef-killflag(i64 %a) {
+; CHECK-LABEL: csed-impdef-killflag
+; CHECK-DAG: mov [[REG0:w[0-9]+]], wzr
+; CHECK-DAG: orr [[REG1:w[0-9]+]], wzr, #0x1
+; CHECK-DAG: orr [[REG2:x[0-9]+]], xzr, #0x2
+; CHECK-DAG: orr [[REG3:x[0-9]+]], xzr, #0x3
+; CHECK: cmp x0, #0
+; CHECK-DAG: csel w[[SELECT_WREG_1:[0-9]+]], [[REG0]], [[REG1]], ne
+; CHECK-DAG: csel [[SELECT_XREG_2:x[0-9]+]], [[REG2]], [[REG3]], ne
+; CHECK: ubfx [[SELECT_XREG_1:x[0-9]+]], x[[SELECT_WREG_1]], #0, #32
+; CHECK-NEXT: add x0, [[SELECT_XREG_2]], [[SELECT_XREG_1]]
+; CHECK-NEXT: ret
+
+ %1 = icmp ne i64 %a, 0
+ %2 = select i1 %1, i32 0, i32 1
+ %3 = icmp ne i64 %a, 0
+ %4 = select i1 %3, i64 2, i64 3
+ %5 = zext i32 %2 to i64
+ %6 = add i64 %4, %5
+ ret i64 %6
+}
diff --git a/test/CodeGen/AArch64/neon-scalar-copy.ll b/test/CodeGen/AArch64/neon-scalar-copy.ll
index 6afac31..3f77060 100644
--- a/test/CodeGen/AArch64/neon-scalar-copy.ll
+++ b/test/CodeGen/AArch64/neon-scalar-copy.ll
@@ -1,101 +1,145 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s --check-prefix=CHECK
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon -asm-verbose=false < %s | FileCheck %s
-
-define float @test_dup_sv2S(<2 x float> %v) {
- ; CHECK-LABEL: test_dup_sv2S
- ; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+define float @test_dup_sv2S(<2 x float> %v) #0 {
+ ; CHECK-LABEL: test_dup_sv2S:
+ ; CHECK-NEXT: mov s{{[0-9]+}}, {{v[0-9]+}}.s[1]
+ ; CHECK-NEXT: ret
%tmp1 = extractelement <2 x float> %v, i32 1
ret float %tmp1
}
-define float @test_dup_sv2S_0(<2 x float> %v) {
- ; CHECK-LABEL: test_dup_sv2S_0
+define float @test_dup_sv2S_0(<2 x float> %v) #0 {
+ ; CHECK-LABEL: test_dup_sv2S_0:
; CHECK-NOT: dup {{[vsd][0-9]+}}
; CHECK-NOT: ins {{[vsd][0-9]+}}
- ; CHECK: ret
+ ; CHECK-NEXT: ret
%tmp1 = extractelement <2 x float> %v, i32 0
ret float %tmp1
}
-define float @test_dup_sv4S(<4 x float> %v) {
- ; CHECK-LABEL: test_dup_sv4S
+define float @test_dup_sv4S(<4 x float> %v) #0 {
+ ; CHECK-LABEL: test_dup_sv4S:
+ ; CHECK-NEXT: mov s{{[0-9]+}}, {{v[0-9]+}}.s[1]
+ ; CHECK-NEXT: ret
+ %tmp1 = extractelement <4 x float> %v, i32 1
+ ret float %tmp1
+}
+
+define float @test_dup_sv4S_0(<4 x float> %v) #0 {
+ ; CHECK-LABEL: test_dup_sv4S_0:
; CHECK-NOT: dup {{[vsd][0-9]+}}
; CHECK-NOT: ins {{[vsd][0-9]+}}
- ; CHECK: ret
+ ; CHECK-NEXT: ret
%tmp1 = extractelement <4 x float> %v, i32 0
ret float %tmp1
}
-define double @test_dup_dvD(<1 x double> %v) {
- ; CHECK-LABEL: test_dup_dvD
+define double @test_dup_dvD(<1 x double> %v) #0 {
+ ; CHECK-LABEL: test_dup_dvD:
; CHECK-NOT: dup {{[vsd][0-9]+}}
; CHECK-NOT: ins {{[vsd][0-9]+}}
- ; CHECK: ret
+ ; CHECK-NEXT: ret
%tmp1 = extractelement <1 x double> %v, i32 0
ret double %tmp1
}
-define double @test_dup_dv2D(<2 x double> %v) {
- ; CHECK-LABEL: test_dup_dv2D
- ; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+define double @test_dup_dv2D(<2 x double> %v) #0 {
+ ; CHECK-LABEL: test_dup_dv2D:
+ ; CHECK-NEXT: mov d{{[0-9]+}}, {{v[0-9]+}}.d[1]
+ ; CHECK-NEXT: ret
%tmp1 = extractelement <2 x double> %v, i32 1
ret double %tmp1
}
-define double @test_dup_dv2D_0(<2 x double> %v) {
- ; CHECK-LABEL: test_dup_dv2D_0
- ; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
- ; CHECK: ret
- %tmp1 = extractelement <2 x double> %v, i32 1
+define double @test_dup_dv2D_0(<2 x double> %v) #0 {
+ ; CHECK-LABEL: test_dup_dv2D_0:
+ ; CHECK-NOT: dup {{[vsd][0-9]+}}
+ ; CHECK-NOT: ins {{[vsd][0-9]+}}
+ ; CHECK-NEXT: ret
+ %tmp1 = extractelement <2 x double> %v, i32 0
ret double %tmp1
}
-define <1 x i8> @test_vector_dup_bv16B(<16 x i8> %v1) {
- ; CHECK-LABEL: test_vector_dup_bv16B
+define half @test_dup_hv8H(<8 x half> %v) #0 {
+ ; CHECK-LABEL: test_dup_hv8H:
+ ; CHECK-NEXT: mov h{{[0-9]+}}, {{v[0-9]+}}.h[1]
+ ; CHECK-NEXT: ret
+ %tmp1 = extractelement <8 x half> %v, i32 1
+ ret half %tmp1
+}
+
+define half @test_dup_hv8H_0(<8 x half> %v) #0 {
+ ; CHECK-LABEL: test_dup_hv8H_0:
+ ; CHECK-NOT: dup {{[vsdh][0-9]+}}
+ ; CHECK-NOT: ins {{[vsdh][0-9]+}}
+ ; CHECK-NEXT: ret
+ %tmp1 = extractelement <8 x half> %v, i32 0
+ ret half %tmp1
+}
+
+define <1 x i8> @test_vector_dup_bv16B(<16 x i8> %v1) #0 {
+ ; CHECK-LABEL: test_vector_dup_bv16B:
+ ; CHECK-NEXT: umov [[W:w[0-9]+]], v0.b[14]
+ ; CHECK-NEXT: fmov s0, [[W]]
+ ; CHECK-NEXT: ret
%shuffle.i = shufflevector <16 x i8> %v1, <16 x i8> undef, <1 x i32> <i32 14>
ret <1 x i8> %shuffle.i
}
-define <1 x i8> @test_vector_dup_bv8B(<8 x i8> %v1) {
- ; CHECK-LABEL: test_vector_dup_bv8B
+define <1 x i8> @test_vector_dup_bv8B(<8 x i8> %v1) #0 {
+ ; CHECK-LABEL: test_vector_dup_bv8B:
+ ; CHECK-NEXT: dup v0.8b, v0.b[7]
+ ; CHECK-NEXT: ret
%shuffle.i = shufflevector <8 x i8> %v1, <8 x i8> undef, <1 x i32> <i32 7>
ret <1 x i8> %shuffle.i
}
-define <1 x i16> @test_vector_dup_hv8H(<8 x i16> %v1) {
- ; CHECK-LABEL: test_vector_dup_hv8H
+define <1 x i16> @test_vector_dup_hv8H(<8 x i16> %v1) #0 {
+ ; CHECK-LABEL: test_vector_dup_hv8H:
+ ; CHECK-NEXT: umov [[W:w[0-9]+]], v0.h[7]
+ ; CHECK-NEXT: fmov s0, [[W]]
+ ; CHECK-NEXT: ret
%shuffle.i = shufflevector <8 x i16> %v1, <8 x i16> undef, <1 x i32> <i32 7>
ret <1 x i16> %shuffle.i
}
-define <1 x i16> @test_vector_dup_hv4H(<4 x i16> %v1) {
- ; CHECK-LABEL: test_vector_dup_hv4H
+define <1 x i16> @test_vector_dup_hv4H(<4 x i16> %v1) #0 {
+ ; CHECK-LABEL: test_vector_dup_hv4H:
+ ; CHECK-NEXT: dup v0.4h, v0.h[3]
+ ; CHECK-NEXT: ret
%shuffle.i = shufflevector <4 x i16> %v1, <4 x i16> undef, <1 x i32> <i32 3>
ret <1 x i16> %shuffle.i
}
-define <1 x i32> @test_vector_dup_sv4S(<4 x i32> %v1) {
- ; CHECK-LABEL: test_vector_dup_sv4S
+define <1 x i32> @test_vector_dup_sv4S(<4 x i32> %v1) #0 {
+ ; CHECK-LABEL: test_vector_dup_sv4S:
+ ; CHECK-NEXT: mov [[W:w[0-9]+]], v0.s[3]
+ ; CHECK-NEXT: fmov s0, [[W]]
+ ; CHECK-NEXT: ret
%shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <1 x i32> <i32 3>
ret <1 x i32> %shuffle
}
-define <1 x i32> @test_vector_dup_sv2S(<2 x i32> %v1) {
- ; CHECK-LABEL: test_vector_dup_sv2S
+define <1 x i32> @test_vector_dup_sv2S(<2 x i32> %v1) #0 {
+ ; CHECK-LABEL: test_vector_dup_sv2S:
+ ; CHECK-NEXT: dup v0.2s, v0.s[1]
+ ; CHECK-NEXT: ret
%shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <1 x i32> <i32 1>
ret <1 x i32> %shuffle
}
-define <1 x i64> @test_vector_dup_dv2D(<2 x i64> %v1) {
- ; CHECK-LABEL: test_vector_dup_dv2D
- ; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #8
+define <1 x i64> @test_vector_dup_dv2D(<2 x i64> %v1) #0 {
+ ; CHECK-LABEL: test_vector_dup_dv2D:
+ ; CHECK-NEXT: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #8
+ ; CHECK-NEXT: ret
%shuffle.i = shufflevector <2 x i64> %v1, <2 x i64> undef, <1 x i32> <i32 1>
ret <1 x i64> %shuffle.i
}
-define <1 x i64> @test_vector_copy_dup_dv2D(<1 x i64> %a, <2 x i64> %c) {
- ; CHECK-LABEL: test_vector_copy_dup_dv2D
- ; CHECK: {{dup|mov}} {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+define <1 x i64> @test_vector_copy_dup_dv2D(<1 x i64> %a, <2 x i64> %c) #0 {
+ ; CHECK-LABEL: test_vector_copy_dup_dv2D:
+ ; CHECK-NEXT: {{dup|mov}} {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+ ; CHECK-NEXT: ret
%vget_lane = extractelement <2 x i64> %c, i32 1
%vset_lane = insertelement <1 x i64> undef, i64 %vget_lane, i32 0
ret <1 x i64> %vset_lane
@@ -118,3 +162,5 @@ define void @test_out_of_range_insert(<4 x i32> %vec, i32 %elt) {
insertelement <4 x i32> %vec, i32 %elt, i32 4
ret void
}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AArch64/or-combine.ll b/test/CodeGen/AArch64/or-combine.ll
new file mode 100644
index 0000000..c6c343a
--- /dev/null
+++ b/test/CodeGen/AArch64/or-combine.ll
@@ -0,0 +1,44 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -o - %s | FileCheck %s
+
+define i32 @test_consts(i32 %in) {
+; CHECK-LABEL: test_consts:
+; CHECK-NOT: bfxil
+; CHECK-NOT: and
+; CHECK-NOT: orr
+; CHECK: ret
+
+ %lo = and i32 %in, 65535
+ %hi = and i32 %in, -65536
+ %res = or i32 %lo, %hi
+ ret i32 %res
+}
+
+define i32 @test_generic(i32 %in, i32 %mask1, i32 %mask2) {
+; CHECK-LABEL: test_generic:
+; CHECK: orr [[FULL_MASK:w[0-9]+]], w1, w2
+; CHECK: and w0, w0, [[FULL_MASK]]
+
+ %lo = and i32 %in, %mask1
+ %hi = and i32 %in, %mask2
+ %res = or i32 %lo, %hi
+ ret i32 %res
+}
+
+; In this case the transformation isn't profitable, since %lo and %hi
+; are used more than once.
+define [3 x i32] @test_reuse(i32 %in, i32 %mask1, i32 %mask2) {
+; CHECK-LABEL: test_reuse:
+; CHECK-DAG: and w1, w0, w1
+; CHECK-DAG: and w2, w0, w2
+; CHECK-DAG: orr w0, w1, w2
+
+ %lo = and i32 %in, %mask1
+ %hi = and i32 %in, %mask2
+ %recombine = or i32 %lo, %hi
+
+ %res.tmp0 = insertvalue [3 x i32] undef, i32 %recombine, 0
+ %res.tmp1 = insertvalue [3 x i32] %res.tmp0, i32 %lo, 1
+ %res = insertvalue [3 x i32] %res.tmp1, i32 %hi, 2
+
+ ret [3 x i32] %res
+}
diff --git a/test/CodeGen/AArch64/ragreedy-csr.ll b/test/CodeGen/AArch64/ragreedy-csr.ll
index de29b1b..31ff543 100644
--- a/test/CodeGen/AArch64/ragreedy-csr.ll
+++ b/test/CodeGen/AArch64/ragreedy-csr.ll
@@ -271,27 +271,27 @@ return:
%retval.0 = phi i32 [ 0, %entry ], [ 1, %land.lhs.true52 ], [ 1, %land.lhs.true43 ], [ 0, %if.else123 ], [ 1, %while.cond59.preheader ], [ 1, %while.cond95.preheader ], [ 1, %while.cond130.preheader ], [ 1, %land.lhs.true28 ], [ 1, %if.then83 ], [ 0, %lor.lhs.false74 ], [ 1, %land.rhs ], [ 1, %if.then117 ], [ 0, %while.body104 ], [ 1, %land.rhs99 ], [ 1, %if.then152 ], [ 0, %while.body139 ], [ 1, %land.rhs134 ], [ 0, %while.body ]
ret i32 %retval.0
}
-!181 = metadata !{metadata !"branch_weights", i32 662038, i32 1}
-!988 = metadata !{metadata !"branch_weights", i32 12091450, i32 1916}
-!989 = metadata !{metadata !"branch_weights", i32 7564670, i32 4526781}
-!990 = metadata !{metadata !"branch_weights", i32 7484958, i32 13283499}
-!991 = metadata !{metadata !"branch_weights", i32 8677007, i32 4606493}
-!992 = metadata !{metadata !"branch_weights", i32 -1172426948, i32 145094705}
-!993 = metadata !{metadata !"branch_weights", i32 1468914, i32 5683688}
-!994 = metadata !{metadata !"branch_weights", i32 114025221, i32 -1217548794, i32 -1199521551, i32 87712616}
-!995 = metadata !{metadata !"branch_weights", i32 1853716452, i32 -444717951, i32 932776759}
-!996 = metadata !{metadata !"branch_weights", i32 1004870, i32 20259}
-!997 = metadata !{metadata !"branch_weights", i32 20071, i32 189}
-!998 = metadata !{metadata !"branch_weights", i32 -1020255939, i32 572177766}
-!999 = metadata !{metadata !"branch_weights", i32 2666513, i32 3466431}
-!1000 = metadata !{metadata !"branch_weights", i32 5117635, i32 1859780}
-!1001 = metadata !{metadata !"branch_weights", i32 354902465, i32 -1444604407}
-!1002 = metadata !{metadata !"branch_weights", i32 -1762419279, i32 1592770684}
-!1003 = metadata !{metadata !"branch_weights", i32 1435905930, i32 -1951930624}
-!1004 = metadata !{metadata !"branch_weights", i32 1, i32 504888}
-!1005 = metadata !{metadata !"branch_weights", i32 94662, i32 504888}
-!1006 = metadata !{metadata !"branch_weights", i32 -1897793104, i32 160196332}
-!1007 = metadata !{metadata !"branch_weights", i32 2074643678, i32 -29579071}
-!1008 = metadata !{metadata !"branch_weights", i32 1, i32 226163}
-!1009 = metadata !{metadata !"branch_weights", i32 58357, i32 226163}
-!1010 = metadata !{metadata !"branch_weights", i32 -2072848646, i32 92907517}
+!181 = !{!"branch_weights", i32 662038, i32 1}
+!988 = !{!"branch_weights", i32 12091450, i32 1916}
+!989 = !{!"branch_weights", i32 7564670, i32 4526781}
+!990 = !{!"branch_weights", i32 7484958, i32 13283499}
+!991 = !{!"branch_weights", i32 8677007, i32 4606493}
+!992 = !{!"branch_weights", i32 -1172426948, i32 145094705}
+!993 = !{!"branch_weights", i32 1468914, i32 5683688}
+!994 = !{!"branch_weights", i32 114025221, i32 -1217548794, i32 -1199521551, i32 87712616}
+!995 = !{!"branch_weights", i32 1853716452, i32 -444717951, i32 932776759}
+!996 = !{!"branch_weights", i32 1004870, i32 20259}
+!997 = !{!"branch_weights", i32 20071, i32 189}
+!998 = !{!"branch_weights", i32 -1020255939, i32 572177766}
+!999 = !{!"branch_weights", i32 2666513, i32 3466431}
+!1000 = !{!"branch_weights", i32 5117635, i32 1859780}
+!1001 = !{!"branch_weights", i32 354902465, i32 -1444604407}
+!1002 = !{!"branch_weights", i32 -1762419279, i32 1592770684}
+!1003 = !{!"branch_weights", i32 1435905930, i32 -1951930624}
+!1004 = !{!"branch_weights", i32 1, i32 504888}
+!1005 = !{!"branch_weights", i32 94662, i32 504888}
+!1006 = !{!"branch_weights", i32 -1897793104, i32 160196332}
+!1007 = !{!"branch_weights", i32 2074643678, i32 -29579071}
+!1008 = !{!"branch_weights", i32 1, i32 226163}
+!1009 = !{!"branch_weights", i32 58357, i32 226163}
+!1010 = !{!"branch_weights", i32 -2072848646, i32 92907517}
diff --git a/test/CodeGen/AArch64/remat.ll b/test/CodeGen/AArch64/remat.ll
index 32b3ed2..8b3e6dd 100644
--- a/test/CodeGen/AArch64/remat.ll
+++ b/test/CodeGen/AArch64/remat.ll
@@ -1,5 +1,6 @@
; RUN: llc -mtriple=aarch64-linux-gnuabi -mcpu=cortex-a57 -o - %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnuabi -mcpu=cortex-a53 -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnuabi -mcpu=cortex-a72 -o - %s | FileCheck %s
%X = type { i64, i64, i64 }
declare void @f(%X*)
diff --git a/test/CodeGen/AArch64/setcc-type-mismatch.ll b/test/CodeGen/AArch64/setcc-type-mismatch.ll
new file mode 100644
index 0000000..86817fa
--- /dev/null
+++ b/test/CodeGen/AArch64/setcc-type-mismatch.ll
@@ -0,0 +1,11 @@
+; RUN: llc -mtriple=aarch64-linux-gnu %s -o - | FileCheck %s
+
+define void @test_mismatched_setcc(<4 x i22> %l, <4 x i22> %r, <4 x i1>* %addr) {
+; CHECK-LABEL: test_mismatched_setcc:
+; CHECK: cmeq [[CMP128:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK: xtn {{v[0-9]+}}.4h, [[CMP128]].4s
+
+ %tst = icmp eq <4 x i22> %l, %r
+ store <4 x i1> %tst, <4 x i1>* %addr
+ ret void
+}
diff --git a/test/CodeGen/ARM/2007-05-09-tailmerge-2.ll b/test/CodeGen/ARM/2007-05-09-tailmerge-2.ll
index 4894116..37e41ec 100644
--- a/test/CodeGen/ARM/2007-05-09-tailmerge-2.ll
+++ b/test/CodeGen/ARM/2007-05-09-tailmerge-2.ll
@@ -1,6 +1,11 @@
-; RUN: llc < %s -march=arm -enable-tail-merge | grep bl.*baz | count 1
-; RUN: llc < %s -march=arm -enable-tail-merge | grep bl.*quux | count 1
+; RUN: llc < %s -march=arm | FileCheck %s
+
; Check that calls to baz and quux are tail-merged.
+; CHECK: bl _baz
+; CHECK-NOT: bl _baz
+; CHECK: bl _quux
+; CHECK-NOT: bl _quux
+
; PR1628
; ModuleID = 'tail.c'
diff --git a/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll b/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll
index acbab8a..30ae723 100644
--- a/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll
+++ b/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll
@@ -1,10 +1,23 @@
-; RUN: llc < %s -march=arm | grep bl.*baz | count 1
-; RUN: llc < %s -march=arm | grep bl.*quux | count 1
-; RUN: llc < %s -march=arm -enable-tail-merge=0 | grep bl.*baz | count 2
-; RUN: llc < %s -march=arm -enable-tail-merge=0 | grep bl.*quux | count 2
-; Check that tail merging is the default on ARM, and that -enable-tail-merge=0 works.
+; RUN: llc < %s -march=arm | FileCheck %s
+; RUN: llc < %s -march=arm -enable-tail-merge=0 | \
+; RUN: FileCheck --check-prefix=NOMERGE %s
+
+; Check that tail merging is the default on ARM, and that -enable-tail-merge=0
+; works.
; PR1628
+; CHECK: bl _baz
+; CHECK-NOT: bl _baz
+
+; CHECK: bl _quux
+; CHECK-NOT: bl _quux
+
+; NOMERGE: bl _baz
+; NOMERGE: bl _baz
+
+; NOMERGE: bl _quux
+; NOMERGE: bl _quux
+
; ModuleID = 'tail.c'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "i686-apple-darwin8"
diff --git a/test/CodeGen/ARM/2009-10-16-Scope.ll b/test/CodeGen/ARM/2009-10-16-Scope.ll
index b4e758d..de05644 100644
--- a/test/CodeGen/ARM/2009-10-16-Scope.ll
+++ b/test/CodeGen/ARM/2009-10-16-Scope.ll
@@ -9,7 +9,7 @@ entry:
br label %do.body, !dbg !0
do.body: ; preds = %entry
- call void @llvm.dbg.declare(metadata !{i32* %count_}, metadata !4, metadata !{metadata !"0x102"})
+ call void @llvm.dbg.declare(metadata i32* %count_, metadata !4, metadata !{!"0x102"})
%conv = ptrtoint i32* %count_ to i32, !dbg !0 ; <i32> [#uses=1]
%call = call i32 @foo(i32 %conv) ssp, !dbg !0 ; <i32> [#uses=0]
br label %do.end, !dbg !0
@@ -22,13 +22,13 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
declare i32 @foo(i32) ssp
-!0 = metadata !{i32 5, i32 2, metadata !1, null}
-!1 = metadata !{metadata !"0xb\001\001\000", null, metadata !2}; [DW_TAG_lexical_block ]
-!2 = metadata !{metadata !"0x2e\00bar\00bar\00bar\004\000\001\000\006\000\000\000", i32 0, metadata !3, null, null, null, null, null, null}; [DW_TAG_subprogram ]
-!3 = metadata !{metadata !"0x11\0012\00clang 1.1\001\00\000\00\000", metadata !8, null, metadata !9, null, null, null}; [DW_TAG_compile_unit ]
-!4 = metadata !{metadata !"0x100\00count_\005\000", metadata !5, metadata !3, metadata !6}; [ DW_TAG_auto_variable ]
-!5 = metadata !{metadata !"0xb\001\001\000", null, metadata !1}; [DW_TAG_lexical_block ]
-!6 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, metadata !3}; [DW_TAG_base_type ]
-!7 = metadata !{i32 6, i32 1, metadata !2, null}
-!8 = metadata !{metadata !"genmodes.i", metadata !"/Users/yash/Downloads"}
-!9 = metadata !{i32 0}
+!0 = !MDLocation(line: 5, column: 2, scope: !1)
+!1 = !{!"0xb\001\001\000", null, !2}; [DW_TAG_lexical_block ]
+!2 = !{!"0x2e\00bar\00bar\00bar\004\000\001\000\006\000\000\000", i32 0, !3, null, null, null, null, null, null}; [DW_TAG_subprogram ]
+!3 = !{!"0x11\0012\00clang 1.1\001\00\000\00\000", !8, null, !9, null, null, null}; [DW_TAG_compile_unit ]
+!4 = !{!"0x100\00count_\005\000", !5, !3, !6}; [ DW_TAG_auto_variable ]
+!5 = !{!"0xb\001\001\000", null, !1}; [DW_TAG_lexical_block ]
+!6 = !{!"0x24\00int\000\0032\0032\000\000\005", null, !3}; [DW_TAG_base_type ]
+!7 = !MDLocation(line: 6, column: 1, scope: !2)
+!8 = !{!"genmodes.i", !"/Users/yash/Downloads"}
+!9 = !{i32 0}
diff --git a/test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll b/test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll
index bce3120..6f7db93 100644
--- a/test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll
+++ b/test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll
@@ -5,7 +5,7 @@ target triple = "armv4t-apple-darwin10"
define hidden i32 @__addvsi3(i32 %a, i32 %b) nounwind {
entry:
- tail call void @llvm.dbg.value(metadata !{i32 %b}, i64 0, metadata !0, metadata !{metadata !"0x102"})
+ tail call void @llvm.dbg.value(metadata i32 %b, i64 0, metadata !0, metadata !{!"0x102"})
%0 = add nsw i32 %b, %a, !dbg !9 ; <i32> [#uses=1]
ret i32 %0, !dbg !11
}
@@ -14,19 +14,19 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!3}
!llvm.module.flags = !{!15}
-!0 = metadata !{metadata !"0x101\00b\0093\000", metadata !1, metadata !2, metadata !6} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{metadata !"0x2e\00__addvsi3\00__addvsi3\00__addvsi3\0094\000\001\000\006\000\000\000", metadata !12, null, metadata !4, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!2 = metadata !{metadata !"0x29", metadata !12} ; [ DW_TAG_file_type ]
-!12 = metadata !{metadata !"libgcc2.c", metadata !"/Users/bwilson/local/nightly/test-2010-04-14/build/llvmgcc.roots/llvmgcc~obj/src/gcc"}
-!3 = metadata !{metadata !"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build 00)\001\00\000\00\000", metadata !12, metadata !13, metadata !13, metadata !14, null, null} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !12, metadata !2, null, metadata !5, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!5 = metadata !{metadata !6, metadata !6, metadata !6}
-!6 = metadata !{metadata !"0x16\00SItype\00152\000\000\000\000", metadata !12, null, metadata !8} ; [ DW_TAG_typedef ]
-!7 = metadata !{metadata !"0x29", metadata !"libgcc2.h", metadata !"/Users/bwilson/local/nightly/test-2010-04-14/build/llvmgcc.roots/llvmgcc~obj/src/gcc", metadata !3} ; [ DW_TAG_file_type ]
-!8 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !12, metadata !2} ; [ DW_TAG_base_type ]
-!9 = metadata !{i32 95, i32 0, metadata !10, null}
-!10 = metadata !{metadata !"0xb\0094\000\000", metadata !12, metadata !1} ; [ DW_TAG_lexical_block ]
-!11 = metadata !{i32 100, i32 0, metadata !10, null}
-!13 = metadata !{i32 0}
-!14 = metadata !{metadata !1}
-!15 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x101\00b\0093\000", !1, !2, !6} ; [ DW_TAG_arg_variable ]
+!1 = !{!"0x2e\00__addvsi3\00__addvsi3\00__addvsi3\0094\000\001\000\006\000\000\000", !12, null, !4, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!2 = !{!"0x29", !12} ; [ DW_TAG_file_type ]
+!12 = !{!"libgcc2.c", !"/Users/bwilson/local/nightly/test-2010-04-14/build/llvmgcc.roots/llvmgcc~obj/src/gcc"}
+!3 = !{!"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build 00)\001\00\000\00\000", !12, !13, !13, !14, null, null} ; [ DW_TAG_compile_unit ]
+!4 = !{!"0x15\00\000\000\000\000\000\000", !12, !2, null, !5, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!5 = !{!6, !6, !6}
+!6 = !{!"0x16\00SItype\00152\000\000\000\000", !12, null, !8} ; [ DW_TAG_typedef ]
+!7 = !{!"0x29", !"libgcc2.h", !"/Users/bwilson/local/nightly/test-2010-04-14/build/llvmgcc.roots/llvmgcc~obj/src/gcc", !3} ; [ DW_TAG_file_type ]
+!8 = !{!"0x24\00int\000\0032\0032\000\000\005", !12, !2} ; [ DW_TAG_base_type ]
+!9 = !MDLocation(line: 95, scope: !10)
+!10 = !{!"0xb\0094\000\000", !12, !1} ; [ DW_TAG_lexical_block ]
+!11 = !MDLocation(line: 100, scope: !10)
+!13 = !{i32 0}
+!14 = !{!1}
+!15 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll b/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll
index efe1ab5..18b3be0 100644
--- a/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll
+++ b/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll
@@ -7,16 +7,16 @@ target triple = "thumbv7-apple-darwin3.0.0-iphoneos"
define void @x0(i8* nocapture %buf, i32 %nbytes) nounwind optsize {
entry:
- tail call void @llvm.dbg.value(metadata !{i8* %buf}, i64 0, metadata !0, metadata !{metadata !"0x102"}), !dbg !15
- tail call void @llvm.dbg.value(metadata !{i32 %nbytes}, i64 0, metadata !8, metadata !{metadata !"0x102"}), !dbg !16
+ tail call void @llvm.dbg.value(metadata i8* %buf, i64 0, metadata !0, metadata !{!"0x102"}), !dbg !15
+ tail call void @llvm.dbg.value(metadata i32 %nbytes, i64 0, metadata !8, metadata !{!"0x102"}), !dbg !16
%tmp = load i32* @length, !dbg !17 ; <i32> [#uses=3]
%cmp = icmp eq i32 %tmp, -1, !dbg !17 ; <i1> [#uses=1]
%cmp.not = xor i1 %cmp, true ; <i1> [#uses=1]
%cmp3 = icmp ult i32 %tmp, %nbytes, !dbg !17 ; <i1> [#uses=1]
%or.cond = and i1 %cmp.not, %cmp3 ; <i1> [#uses=1]
- tail call void @llvm.dbg.value(metadata !{i32 %tmp}, i64 0, metadata !8, metadata !{metadata !"0x102"}), !dbg !17
+ tail call void @llvm.dbg.value(metadata i32 %tmp, i64 0, metadata !8, metadata !{!"0x102"}), !dbg !17
%nbytes.addr.0 = select i1 %or.cond, i32 %tmp, i32 %nbytes ; <i32> [#uses=1]
- tail call void @llvm.dbg.value(metadata !18, i64 0, metadata !10, metadata !{metadata !"0x102"}), !dbg !19
+ tail call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !10, metadata !{!"0x102"}), !dbg !19
br label %while.cond, !dbg !20
while.cond: ; preds = %while.body, %entry
@@ -47,30 +47,30 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.lv.fn = !{!0, !8, !10, !12}
!llvm.dbg.gv = !{!14}
-!0 = metadata !{metadata !"0x101\00buf\004\000", metadata !1, metadata !2, metadata !6} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{metadata !"0x2e\00x0\00x0\00x0\005\000\001\000\006\000\000\000", metadata !26, null, metadata !4, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!2 = metadata !{metadata !"0x29", metadata !26} ; [ DW_TAG_file_type ]
-!3 = metadata !{metadata !"0x11\0012\00clang 2.0\001\00\00\00\00", metadata !26, null, null, null, null, null} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !26, metadata !2, null, metadata !5, null} ; [ DW_TAG_subroutine_type ]
-!5 = metadata !{null}
-!6 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", metadata !26, metadata !2, metadata !7} ; [ DW_TAG_pointer_type ]
-!7 = metadata !{metadata !"0x24\00unsigned char\000\008\008\000\000\008", metadata !26, metadata !2} ; [ DW_TAG_base_type ]
-!8 = metadata !{metadata !"0x101\00nbytes\004\000", metadata !1, metadata !2, metadata !9} ; [ DW_TAG_arg_variable ]
-!9 = metadata !{metadata !"0x24\00unsigned long\000\0032\0032\000\000\007", metadata !26, metadata !2} ; [ DW_TAG_base_type ]
-!10 = metadata !{metadata !"0x100\00nread\006\000", metadata !11, metadata !2, metadata !9} ; [ DW_TAG_auto_variable ]
-!11 = metadata !{metadata !"0xb\005\001\000", metadata !26, metadata !1} ; [ DW_TAG_lexical_block ]
-!12 = metadata !{metadata !"0x100\00c\007\000", metadata !11, metadata !2, metadata !13} ; [ DW_TAG_auto_variable ]
-!13 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !26, metadata !2} ; [ DW_TAG_base_type ]
-!14 = metadata !{metadata !"0x34\00length\00length\00length\001\000\001", metadata !2, metadata !2, metadata !13, i32* @length} ; [ DW_TAG_variable ]
-!15 = metadata !{i32 4, i32 24, metadata !1, null}
-!16 = metadata !{i32 4, i32 43, metadata !1, null}
-!17 = metadata !{i32 9, i32 2, metadata !11, null}
-!18 = metadata !{i32 0}
-!19 = metadata !{i32 10, i32 2, metadata !11, null}
-!20 = metadata !{i32 11, i32 2, metadata !11, null}
-!21 = metadata !{i32 12, i32 3, metadata !22, null}
-!22 = metadata !{metadata !"0xb\0011\0045\000", metadata !26, metadata !11} ; [ DW_TAG_lexical_block ]
-!23 = metadata !{i32 13, i32 3, metadata !22, null}
-!24 = metadata !{i32 14, i32 2, metadata !22, null}
-!25 = metadata !{i32 15, i32 1, metadata !11, null}
-!26 = metadata !{metadata !"t.c", metadata !"/private/tmp"}
+!0 = !{!"0x101\00buf\004\000", !1, !2, !6} ; [ DW_TAG_arg_variable ]
+!1 = !{!"0x2e\00x0\00x0\00x0\005\000\001\000\006\000\000\000", !26, null, !4, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!2 = !{!"0x29", !26} ; [ DW_TAG_file_type ]
+!3 = !{!"0x11\0012\00clang 2.0\001\00\00\00\00", !26, null, null, null, null, null} ; [ DW_TAG_compile_unit ]
+!4 = !{!"0x15\00\000\000\000\000\000\000", !26, !2, null, !5, null} ; [ DW_TAG_subroutine_type ]
+!5 = !{null}
+!6 = !{!"0xf\00\000\0032\0032\000\000", !26, !2, !7} ; [ DW_TAG_pointer_type ]
+!7 = !{!"0x24\00unsigned char\000\008\008\000\000\008", !26, !2} ; [ DW_TAG_base_type ]
+!8 = !{!"0x101\00nbytes\004\000", !1, !2, !9} ; [ DW_TAG_arg_variable ]
+!9 = !{!"0x24\00unsigned long\000\0032\0032\000\000\007", !26, !2} ; [ DW_TAG_base_type ]
+!10 = !{!"0x100\00nread\006\000", !11, !2, !9} ; [ DW_TAG_auto_variable ]
+!11 = !{!"0xb\005\001\000", !26, !1} ; [ DW_TAG_lexical_block ]
+!12 = !{!"0x100\00c\007\000", !11, !2, !13} ; [ DW_TAG_auto_variable ]
+!13 = !{!"0x24\00int\000\0032\0032\000\000\005", !26, !2} ; [ DW_TAG_base_type ]
+!14 = !{!"0x34\00length\00length\00length\001\000\001", !2, !2, !13, i32* @length} ; [ DW_TAG_variable ]
+!15 = !MDLocation(line: 4, column: 24, scope: !1)
+!16 = !MDLocation(line: 4, column: 43, scope: !1)
+!17 = !MDLocation(line: 9, column: 2, scope: !11)
+!18 = !{i32 0}
+!19 = !MDLocation(line: 10, column: 2, scope: !11)
+!20 = !MDLocation(line: 11, column: 2, scope: !11)
+!21 = !MDLocation(line: 12, column: 3, scope: !22)
+!22 = !{!"0xb\0011\0045\000", !26, !11} ; [ DW_TAG_lexical_block ]
+!23 = !MDLocation(line: 13, column: 3, scope: !22)
+!24 = !MDLocation(line: 14, column: 2, scope: !22)
+!25 = !MDLocation(line: 15, column: 1, scope: !11)
+!26 = !{!"t.c", !"/private/tmp"}
diff --git a/test/CodeGen/ARM/2010-08-04-StackVariable.ll b/test/CodeGen/ARM/2010-08-04-StackVariable.ll
index f10408c..f71a6c9 100644
--- a/test/CodeGen/ARM/2010-08-04-StackVariable.ll
+++ b/test/CodeGen/ARM/2010-08-04-StackVariable.ll
@@ -6,8 +6,8 @@
define i32 @_Z3fooi4SVal(i32 %i, %struct.SVal* noalias %location) nounwind ssp {
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- call void @llvm.dbg.value(metadata !{i32 %i}, i64 0, metadata !23, metadata !{metadata !"0x102"}), !dbg !24
- call void @llvm.dbg.value(metadata !{%struct.SVal* %location}, i64 0, metadata !25, metadata !{metadata !"0x102"}), !dbg !24
+ call void @llvm.dbg.value(metadata i32 %i, i64 0, metadata !23, metadata !{!"0x102"}), !dbg !24
+ call void @llvm.dbg.value(metadata %struct.SVal* %location, i64 0, metadata !25, metadata !{!"0x102"}), !dbg !24
%0 = icmp ne i32 %i, 0, !dbg !27 ; <i1> [#uses=1]
br i1 %0, label %bb, label %bb1, !dbg !27
@@ -34,7 +34,7 @@ return: ; preds = %bb2
define linkonce_odr void @_ZN4SValC1Ev(%struct.SVal* %this) nounwind ssp align 2 {
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- call void @llvm.dbg.value(metadata !{%struct.SVal* %this}, i64 0, metadata !31, metadata !{metadata !"0x102"}), !dbg !34
+ call void @llvm.dbg.value(metadata %struct.SVal* %this, i64 0, metadata !31, metadata !{!"0x102"}), !dbg !34
%0 = getelementptr inbounds %struct.SVal* %this, i32 0, i32 0, !dbg !34 ; <i8**> [#uses=1]
store i8* null, i8** %0, align 8, !dbg !34
%1 = getelementptr inbounds %struct.SVal* %this, i32 0, i32 1, !dbg !34 ; <i32*> [#uses=1]
@@ -52,7 +52,7 @@ entry:
%0 = alloca %struct.SVal ; <%struct.SVal*> [#uses=3]
%v = alloca %struct.SVal ; <%struct.SVal*> [#uses=4]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- call void @llvm.dbg.declare(metadata !{%struct.SVal* %v}, metadata !38, metadata !{metadata !"0x102"}), !dbg !41
+ call void @llvm.dbg.declare(metadata %struct.SVal* %v, metadata !38, metadata !{!"0x102"}), !dbg !41
call void @_ZN4SValC1Ev(%struct.SVal* %v) nounwind, !dbg !41
%1 = getelementptr inbounds %struct.SVal* %v, i32 0, i32 1, !dbg !42 ; <i32*> [#uses=1]
store i32 1, i32* %1, align 8, !dbg !42
@@ -65,7 +65,7 @@ entry:
%7 = load i32* %6, align 8, !dbg !43 ; <i32> [#uses=1]
store i32 %7, i32* %5, align 8, !dbg !43
%8 = call i32 @_Z3fooi4SVal(i32 2, %struct.SVal* noalias %0) nounwind, !dbg !43 ; <i32> [#uses=0]
- call void @llvm.dbg.value(metadata !{i32 %8}, i64 0, metadata !44, metadata !{metadata !"0x102"}), !dbg !43
+ call void @llvm.dbg.value(metadata i32 %8, i64 0, metadata !44, metadata !{!"0x102"}), !dbg !43
br label %return, !dbg !45
return: ; preds = %entry
@@ -77,53 +77,53 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!3}
!llvm.module.flags = !{!49}
-!0 = metadata !{metadata !"0x2e\00SVal\00SVal\00\0011\000\000\000\006\000\000\000", metadata !48, metadata !1, metadata !14, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!1 = metadata !{metadata !"0x13\00SVal\001\00128\0064\000\000\000", metadata !48, null, null, metadata !4, null, null, null} ; [ DW_TAG_structure_type ] [SVal] [line 1, size 128, align 64, offset 0] [def] [from ]
-!2 = metadata !{metadata !"0x29", metadata !48} ; [ DW_TAG_file_type ]
-!3 = metadata !{metadata !"0x11\004\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\000\00\000\00\001", metadata !48, metadata !47, metadata !47, metadata !46, metadata !47, metadata !47} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{metadata !5, metadata !7, metadata !0, metadata !9}
-!5 = metadata !{metadata !"0xd\00Data\007\0064\0064\000\000", metadata !48, metadata !1, metadata !6} ; [ DW_TAG_member ]
-!6 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", metadata !48, null, null} ; [ DW_TAG_pointer_type ]
-!7 = metadata !{metadata !"0xd\00Kind\008\0032\0032\0064\000", metadata !48, metadata !1, metadata !8} ; [ DW_TAG_member ]
-!8 = metadata !{metadata !"0x24\00unsigned int\000\0032\0032\000\000\007", metadata !48, null} ; [ DW_TAG_base_type ]
-!9 = metadata !{metadata !"0x2e\00~SVal\00~SVal\00\0012\000\000\000\006\000\000\000", metadata !48, metadata !1, metadata !10, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!10 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !48, null, null, metadata !11, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!11 = metadata !{null, metadata !12, metadata !13}
-!12 = metadata !{metadata !"0xf\00\000\0064\0064\000\0064", metadata !48, null, metadata !1} ; [ DW_TAG_pointer_type ]
-!13 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !48, null} ; [ DW_TAG_base_type ]
-!14 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !48, null, null, metadata !15, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!15 = metadata !{null, metadata !12}
-!16 = metadata !{metadata !"0x2e\00SVal\00SVal\00_ZN4SValC1Ev\0011\000\001\000\006\000\000\000", metadata !48, metadata !1, metadata !14, null, void (%struct.SVal*)* @_ZN4SValC1Ev, null, null, null} ; [ DW_TAG_subprogram ]
-!17 = metadata !{metadata !"0x2e\00foo\00foo\00_Z3fooi4SVal\0016\000\001\000\006\000\000\000", metadata !48, metadata !2, metadata !18, null, i32 (i32, %struct.SVal*)* @_Z3fooi4SVal, null, null, null} ; [ DW_TAG_subprogram ]
-!18 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !48, null, null, metadata !19, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!19 = metadata !{metadata !13, metadata !13, metadata !1}
-!20 = metadata !{metadata !"0x2e\00main\00main\00main\0023\000\001\000\006\000\000\000", metadata !48, metadata !2, metadata !21, null, i32 ()* @main, null, null, null} ; [ DW_TAG_subprogram ]
-!21 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !48, null, null, metadata !22, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!22 = metadata !{metadata !13}
-!23 = metadata !{metadata !"0x101\00i\0016\000", metadata !17, metadata !2, metadata !13} ; [ DW_TAG_arg_variable ]
-!24 = metadata !{i32 16, i32 0, metadata !17, null}
-!25 = metadata !{metadata !"0x101\00location\0016\000", metadata !17, metadata !2, metadata !26} ; [ DW_TAG_arg_variable ]
-!26 = metadata !{metadata !"0x10\00SVal\000\0064\0064\000\000", metadata !48, metadata !2, metadata !1} ; [ DW_TAG_reference_type ]
-!27 = metadata !{i32 17, i32 0, metadata !28, null}
-!28 = metadata !{metadata !"0xb\0016\000\002", metadata !2, metadata !17} ; [ DW_TAG_lexical_block ]
-!29 = metadata !{i32 18, i32 0, metadata !28, null}
-!30 = metadata !{i32 20, i32 0, metadata !28, null}
-!31 = metadata !{metadata !"0x101\00this\0011\000", metadata !16, metadata !2, metadata !32} ; [ DW_TAG_arg_variable ]
-!32 = metadata !{metadata !"0x26\00\000\0064\0064\000\0064", metadata !48, metadata !2, metadata !33} ; [ DW_TAG_const_type ]
-!33 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", metadata !48, metadata !2, metadata !1} ; [ DW_TAG_pointer_type ]
-!34 = metadata !{i32 11, i32 0, metadata !16, null}
-!35 = metadata !{i32 11, i32 0, metadata !36, null}
-!36 = metadata !{metadata !"0xb\0011\000\001", metadata !48, metadata !37} ; [ DW_TAG_lexical_block ]
-!37 = metadata !{metadata !"0xb\0011\000\000", metadata !48, metadata !16} ; [ DW_TAG_lexical_block ]
-!38 = metadata !{metadata !"0x100\00v\0024\000", metadata !39, metadata !2, metadata !1} ; [ DW_TAG_auto_variable ]
-!39 = metadata !{metadata !"0xb\0023\000\004", metadata !48, metadata !40} ; [ DW_TAG_lexical_block ]
-!40 = metadata !{metadata !"0xb\0023\000\003", metadata !48, metadata !20} ; [ DW_TAG_lexical_block ]
-!41 = metadata !{i32 24, i32 0, metadata !39, null}
-!42 = metadata !{i32 25, i32 0, metadata !39, null}
-!43 = metadata !{i32 26, i32 0, metadata !39, null}
-!44 = metadata !{metadata !"0x100\00k\0026\000", metadata !39, metadata !2, metadata !13} ; [ DW_TAG_auto_variable ]
-!45 = metadata !{i32 27, i32 0, metadata !39, null}
-!46 = metadata !{metadata !16, metadata !17, metadata !20}
-!47 = metadata !{}
-!48 = metadata !{metadata !"small.cc", metadata !"/Users/manav/R8248330"}
-!49 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x2e\00SVal\00SVal\00\0011\000\000\000\006\000\000\000", !48, !1, !14, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!1 = !{!"0x13\00SVal\001\00128\0064\000\000\000", !48, null, null, !4, null, null, null} ; [ DW_TAG_structure_type ] [SVal] [line 1, size 128, align 64, offset 0] [def] [from ]
+!2 = !{!"0x29", !48} ; [ DW_TAG_file_type ]
+!3 = !{!"0x11\004\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\000\00\000\00\001", !48, !47, !47, !46, !47, !47} ; [ DW_TAG_compile_unit ]
+!4 = !{!5, !7, !0, !9}
+!5 = !{!"0xd\00Data\007\0064\0064\000\000", !48, !1, !6} ; [ DW_TAG_member ]
+!6 = !{!"0xf\00\000\0064\0064\000\000", !48, null, null} ; [ DW_TAG_pointer_type ]
+!7 = !{!"0xd\00Kind\008\0032\0032\0064\000", !48, !1, !8} ; [ DW_TAG_member ]
+!8 = !{!"0x24\00unsigned int\000\0032\0032\000\000\007", !48, null} ; [ DW_TAG_base_type ]
+!9 = !{!"0x2e\00~SVal\00~SVal\00\0012\000\000\000\006\000\000\000", !48, !1, !10, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!10 = !{!"0x15\00\000\000\000\000\000\000", !48, null, null, !11, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!11 = !{null, !12, !13}
+!12 = !{!"0xf\00\000\0064\0064\000\0064", !48, null, !1} ; [ DW_TAG_pointer_type ]
+!13 = !{!"0x24\00int\000\0032\0032\000\000\005", !48, null} ; [ DW_TAG_base_type ]
+!14 = !{!"0x15\00\000\000\000\000\000\000", !48, null, null, !15, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!15 = !{null, !12}
+!16 = !{!"0x2e\00SVal\00SVal\00_ZN4SValC1Ev\0011\000\001\000\006\000\000\000", !48, !1, !14, null, void (%struct.SVal*)* @_ZN4SValC1Ev, null, null, null} ; [ DW_TAG_subprogram ]
+!17 = !{!"0x2e\00foo\00foo\00_Z3fooi4SVal\0016\000\001\000\006\000\000\000", !48, !2, !18, null, i32 (i32, %struct.SVal*)* @_Z3fooi4SVal, null, null, null} ; [ DW_TAG_subprogram ]
+!18 = !{!"0x15\00\000\000\000\000\000\000", !48, null, null, !19, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!19 = !{!13, !13, !1}
+!20 = !{!"0x2e\00main\00main\00main\0023\000\001\000\006\000\000\000", !48, !2, !21, null, i32 ()* @main, null, null, null} ; [ DW_TAG_subprogram ]
+!21 = !{!"0x15\00\000\000\000\000\000\000", !48, null, null, !22, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!22 = !{!13}
+!23 = !{!"0x101\00i\0016\000", !17, !2, !13} ; [ DW_TAG_arg_variable ]
+!24 = !MDLocation(line: 16, scope: !17)
+!25 = !{!"0x101\00location\0016\000", !17, !2, !26} ; [ DW_TAG_arg_variable ]
+!26 = !{!"0x10\00SVal\000\0064\0064\000\000", !48, !2, !1} ; [ DW_TAG_reference_type ]
+!27 = !MDLocation(line: 17, scope: !28)
+!28 = !{!"0xb\0016\000\002", !2, !17} ; [ DW_TAG_lexical_block ]
+!29 = !MDLocation(line: 18, scope: !28)
+!30 = !MDLocation(line: 20, scope: !28)
+!31 = !{!"0x101\00this\0011\000", !16, !2, !32} ; [ DW_TAG_arg_variable ]
+!32 = !{!"0x26\00\000\0064\0064\000\0064", !48, !2, !33} ; [ DW_TAG_const_type ]
+!33 = !{!"0xf\00\000\0064\0064\000\000", !48, !2, !1} ; [ DW_TAG_pointer_type ]
+!34 = !MDLocation(line: 11, scope: !16)
+!35 = !MDLocation(line: 11, scope: !36)
+!36 = !{!"0xb\0011\000\001", !48, !37} ; [ DW_TAG_lexical_block ]
+!37 = !{!"0xb\0011\000\000", !48, !16} ; [ DW_TAG_lexical_block ]
+!38 = !{!"0x100\00v\0024\000", !39, !2, !1} ; [ DW_TAG_auto_variable ]
+!39 = !{!"0xb\0023\000\004", !48, !40} ; [ DW_TAG_lexical_block ]
+!40 = !{!"0xb\0023\000\003", !48, !20} ; [ DW_TAG_lexical_block ]
+!41 = !MDLocation(line: 24, scope: !39)
+!42 = !MDLocation(line: 25, scope: !39)
+!43 = !MDLocation(line: 26, scope: !39)
+!44 = !{!"0x100\00k\0026\000", !39, !2, !13} ; [ DW_TAG_auto_variable ]
+!45 = !MDLocation(line: 27, scope: !39)
+!46 = !{!16, !17, !20}
+!47 = !{}
+!48 = !{!"small.cc", !"/Users/manav/R8248330"}
+!49 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll b/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
index 7fbd3ba..67dda67 100644
--- a/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
+++ b/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
@@ -30,9 +30,9 @@ target triple = "thumbv7-apple-darwin10"
define zeroext i8 @get1(i8 zeroext %a) nounwind optsize {
entry:
- tail call void @llvm.dbg.value(metadata !{i8 %a}, i64 0, metadata !10, metadata !{metadata !"0x102"}), !dbg !30
+ tail call void @llvm.dbg.value(metadata i8 %a, i64 0, metadata !10, metadata !{!"0x102"}), !dbg !30
%0 = load i8* @x1, align 4, !dbg !30
- tail call void @llvm.dbg.value(metadata !{i8 %0}, i64 0, metadata !11, metadata !{metadata !"0x102"}), !dbg !30
+ tail call void @llvm.dbg.value(metadata i8 %0, i64 0, metadata !11, metadata !{!"0x102"}), !dbg !30
store i8 %a, i8* @x1, align 4, !dbg !30
ret i8 %0, !dbg !31
}
@@ -41,36 +41,36 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
define zeroext i8 @get2(i8 zeroext %a) nounwind optsize {
entry:
- tail call void @llvm.dbg.value(metadata !{i8 %a}, i64 0, metadata !18, metadata !{metadata !"0x102"}), !dbg !32
+ tail call void @llvm.dbg.value(metadata i8 %a, i64 0, metadata !18, metadata !{!"0x102"}), !dbg !32
%0 = load i8* @x2, align 4, !dbg !32
- tail call void @llvm.dbg.value(metadata !{i8 %0}, i64 0, metadata !19, metadata !{metadata !"0x102"}), !dbg !32
+ tail call void @llvm.dbg.value(metadata i8 %0, i64 0, metadata !19, metadata !{!"0x102"}), !dbg !32
store i8 %a, i8* @x2, align 4, !dbg !32
ret i8 %0, !dbg !33
}
define zeroext i8 @get3(i8 zeroext %a) nounwind optsize {
entry:
- tail call void @llvm.dbg.value(metadata !{i8 %a}, i64 0, metadata !21, metadata !{metadata !"0x102"}), !dbg !34
+ tail call void @llvm.dbg.value(metadata i8 %a, i64 0, metadata !21, metadata !{!"0x102"}), !dbg !34
%0 = load i8* @x3, align 4, !dbg !34
- tail call void @llvm.dbg.value(metadata !{i8 %0}, i64 0, metadata !22, metadata !{metadata !"0x102"}), !dbg !34
+ tail call void @llvm.dbg.value(metadata i8 %0, i64 0, metadata !22, metadata !{!"0x102"}), !dbg !34
store i8 %a, i8* @x3, align 4, !dbg !34
ret i8 %0, !dbg !35
}
define zeroext i8 @get4(i8 zeroext %a) nounwind optsize {
entry:
- tail call void @llvm.dbg.value(metadata !{i8 %a}, i64 0, metadata !24, metadata !{metadata !"0x102"}), !dbg !36
+ tail call void @llvm.dbg.value(metadata i8 %a, i64 0, metadata !24, metadata !{!"0x102"}), !dbg !36
%0 = load i8* @x4, align 4, !dbg !36
- tail call void @llvm.dbg.value(metadata !{i8 %0}, i64 0, metadata !25, metadata !{metadata !"0x102"}), !dbg !36
+ tail call void @llvm.dbg.value(metadata i8 %0, i64 0, metadata !25, metadata !{!"0x102"}), !dbg !36
store i8 %a, i8* @x4, align 4, !dbg !36
ret i8 %0, !dbg !37
}
define zeroext i8 @get5(i8 zeroext %a) nounwind optsize {
entry:
- tail call void @llvm.dbg.value(metadata !{i8 %a}, i64 0, metadata !27, metadata !{metadata !"0x102"}), !dbg !38
+ tail call void @llvm.dbg.value(metadata i8 %a, i64 0, metadata !27, metadata !{!"0x102"}), !dbg !38
%0 = load i8* @x5, align 4, !dbg !38
- tail call void @llvm.dbg.value(metadata !{i8 %0}, i64 0, metadata !28, metadata !{metadata !"0x102"}), !dbg !38
+ tail call void @llvm.dbg.value(metadata i8 %0, i64 0, metadata !28, metadata !{!"0x102"}), !dbg !38
store i8 %a, i8* @x5, align 4, !dbg !38
ret i8 %0, !dbg !39
}
@@ -78,53 +78,53 @@ entry:
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!49}
-!0 = metadata !{metadata !"0x2e\00get1\00get1\00get1\004\000\001\000\006\00256\001\004", metadata !47, metadata !1, metadata !3, null, i8 (i8)* @get1, null, null, metadata !42} ; [ DW_TAG_subprogram ]
-!1 = metadata !{metadata !"0x29", metadata !47} ; [ DW_TAG_file_type ]
-!2 = metadata !{metadata !"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build 2369.8)\001\00\000\00\000", metadata !47, metadata !48, metadata !48, metadata !40, metadata !41, metadata !48} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !47, metadata !1, null, metadata !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5, metadata !5}
-!5 = metadata !{metadata !"0x24\00_Bool\000\008\008\000\000\002", metadata !47, metadata !1} ; [ DW_TAG_base_type ]
-!6 = metadata !{metadata !"0x2e\00get2\00get2\00get2\007\000\001\000\006\00256\001\007", metadata !47, metadata !1, metadata !3, null, i8 (i8)* @get2, null, null, metadata !43} ; [ DW_TAG_subprogram ]
-!7 = metadata !{metadata !"0x2e\00get3\00get3\00get3\0010\000\001\000\006\00256\001\0010", metadata !47, metadata !1, metadata !3, null, i8 (i8)* @get3, null, null, metadata !44} ; [ DW_TAG_subprogram ]
-!8 = metadata !{metadata !"0x2e\00get4\00get4\00get4\0013\000\001\000\006\00256\001\0013", metadata !47, metadata !1, metadata !3, null, i8 (i8)* @get4, null, null, metadata !45} ; [ DW_TAG_subprogram ]
-!9 = metadata !{metadata !"0x2e\00get5\00get5\00get5\0016\000\001\000\006\00256\001\0016", metadata !47, metadata !1, metadata !3, null, i8 (i8)* @get5, null, null, metadata !46} ; [ DW_TAG_subprogram ]
-!10 = metadata !{metadata !"0x101\00a\004\000", metadata !0, metadata !1, metadata !5} ; [ DW_TAG_arg_variable ]
-!11 = metadata !{metadata !"0x100\00b\004\000", metadata !12, metadata !1, metadata !5} ; [ DW_TAG_auto_variable ]
-!12 = metadata !{metadata !"0xb\004\000\000", metadata !47, metadata !0} ; [ DW_TAG_lexical_block ]
-!13 = metadata !{metadata !"0x34\00x1\00x1\00\003\001\001", metadata !1, metadata !1, metadata !5, i8* @x1, null} ; [ DW_TAG_variable ]
-!14 = metadata !{metadata !"0x34\00x2\00x2\00\006\001\001", metadata !1, metadata !1, metadata !5, i8* @x2, null} ; [ DW_TAG_variable ]
-!15 = metadata !{metadata !"0x34\00x3\00x3\00\009\001\001", metadata !1, metadata !1, metadata !5, i8* @x3, null} ; [ DW_TAG_variable ]
-!16 = metadata !{metadata !"0x34\00x4\00x4\00\0012\001\001", metadata !1, metadata !1, metadata !5, i8* @x4, null} ; [ DW_TAG_variable ]
-!17 = metadata !{metadata !"0x34\00x5\00x5\00\0015\000\001", metadata !1, metadata !1, metadata !5, i8* @x5, null} ; [ DW_TAG_variable ]
-!18 = metadata !{metadata !"0x101\00a\007\000", metadata !6, metadata !1, metadata !5} ; [ DW_TAG_arg_variable ]
-!19 = metadata !{metadata !"0x100\00b\007\000", metadata !20, metadata !1, metadata !5} ; [ DW_TAG_auto_variable ]
-!20 = metadata !{metadata !"0xb\007\000\001", metadata !47, metadata !6} ; [ DW_TAG_lexical_block ]
-!21 = metadata !{metadata !"0x101\00a\0010\000", metadata !7, metadata !1, metadata !5} ; [ DW_TAG_arg_variable ]
-!22 = metadata !{metadata !"0x100\00b\0010\000", metadata !23, metadata !1, metadata !5} ; [ DW_TAG_auto_variable ]
-!23 = metadata !{metadata !"0xb\0010\000\002", metadata !47, metadata !7} ; [ DW_TAG_lexical_block ]
-!24 = metadata !{metadata !"0x101\00a\0013\000", metadata !8, metadata !1, metadata !5} ; [ DW_TAG_arg_variable ]
-!25 = metadata !{metadata !"0x100\00b\0013\000", metadata !26, metadata !1, metadata !5} ; [ DW_TAG_auto_variable ]
-!26 = metadata !{metadata !"0xb\0013\000\003", metadata !47, metadata !8} ; [ DW_TAG_lexical_block ]
-!27 = metadata !{metadata !"0x101\00a\0016\000", metadata !9, metadata !1, metadata !5} ; [ DW_TAG_arg_variable ]
-!28 = metadata !{metadata !"0x100\00b\0016\000", metadata !29, metadata !1, metadata !5} ; [ DW_TAG_auto_variable ]
-!29 = metadata !{metadata !"0xb\0016\000\004", metadata !47, metadata !9} ; [ DW_TAG_lexical_block ]
-!30 = metadata !{i32 4, i32 0, metadata !0, null}
-!31 = metadata !{i32 4, i32 0, metadata !12, null}
-!32 = metadata !{i32 7, i32 0, metadata !6, null}
-!33 = metadata !{i32 7, i32 0, metadata !20, null}
-!34 = metadata !{i32 10, i32 0, metadata !7, null}
-!35 = metadata !{i32 10, i32 0, metadata !23, null}
-!36 = metadata !{i32 13, i32 0, metadata !8, null}
-!37 = metadata !{i32 13, i32 0, metadata !26, null}
-!38 = metadata !{i32 16, i32 0, metadata !9, null}
-!39 = metadata !{i32 16, i32 0, metadata !29, null}
-!40 = metadata !{metadata !0, metadata !6, metadata !7, metadata !8, metadata !9}
-!41 = metadata !{metadata !13, metadata !14, metadata !15, metadata !16, metadata !17}
-!42 = metadata !{metadata !10, metadata !11}
-!43 = metadata !{metadata !18, metadata !19}
-!44 = metadata !{metadata !21, metadata !22}
-!45 = metadata !{metadata !24, metadata !25}
-!46 = metadata !{metadata !27, metadata !28}
-!47 = metadata !{metadata !"foo.c", metadata !"/tmp/"}
-!48 = metadata !{}
-!49 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x2e\00get1\00get1\00get1\004\000\001\000\006\00256\001\004", !47, !1, !3, null, i8 (i8)* @get1, null, null, !42} ; [ DW_TAG_subprogram ]
+!1 = !{!"0x29", !47} ; [ DW_TAG_file_type ]
+!2 = !{!"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build 2369.8)\001\00\000\00\000", !47, !48, !48, !40, !41, !48} ; [ DW_TAG_compile_unit ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", !47, !1, null, !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !{!5, !5}
+!5 = !{!"0x24\00_Bool\000\008\008\000\000\002", !47, !1} ; [ DW_TAG_base_type ]
+!6 = !{!"0x2e\00get2\00get2\00get2\007\000\001\000\006\00256\001\007", !47, !1, !3, null, i8 (i8)* @get2, null, null, !43} ; [ DW_TAG_subprogram ]
+!7 = !{!"0x2e\00get3\00get3\00get3\0010\000\001\000\006\00256\001\0010", !47, !1, !3, null, i8 (i8)* @get3, null, null, !44} ; [ DW_TAG_subprogram ]
+!8 = !{!"0x2e\00get4\00get4\00get4\0013\000\001\000\006\00256\001\0013", !47, !1, !3, null, i8 (i8)* @get4, null, null, !45} ; [ DW_TAG_subprogram ]
+!9 = !{!"0x2e\00get5\00get5\00get5\0016\000\001\000\006\00256\001\0016", !47, !1, !3, null, i8 (i8)* @get5, null, null, !46} ; [ DW_TAG_subprogram ]
+!10 = !{!"0x101\00a\004\000", !0, !1, !5} ; [ DW_TAG_arg_variable ]
+!11 = !{!"0x100\00b\004\000", !12, !1, !5} ; [ DW_TAG_auto_variable ]
+!12 = !{!"0xb\004\000\000", !47, !0} ; [ DW_TAG_lexical_block ]
+!13 = !{!"0x34\00x1\00x1\00\003\001\001", !1, !1, !5, i8* @x1, null} ; [ DW_TAG_variable ]
+!14 = !{!"0x34\00x2\00x2\00\006\001\001", !1, !1, !5, i8* @x2, null} ; [ DW_TAG_variable ]
+!15 = !{!"0x34\00x3\00x3\00\009\001\001", !1, !1, !5, i8* @x3, null} ; [ DW_TAG_variable ]
+!16 = !{!"0x34\00x4\00x4\00\0012\001\001", !1, !1, !5, i8* @x4, null} ; [ DW_TAG_variable ]
+!17 = !{!"0x34\00x5\00x5\00\0015\000\001", !1, !1, !5, i8* @x5, null} ; [ DW_TAG_variable ]
+!18 = !{!"0x101\00a\007\000", !6, !1, !5} ; [ DW_TAG_arg_variable ]
+!19 = !{!"0x100\00b\007\000", !20, !1, !5} ; [ DW_TAG_auto_variable ]
+!20 = !{!"0xb\007\000\001", !47, !6} ; [ DW_TAG_lexical_block ]
+!21 = !{!"0x101\00a\0010\000", !7, !1, !5} ; [ DW_TAG_arg_variable ]
+!22 = !{!"0x100\00b\0010\000", !23, !1, !5} ; [ DW_TAG_auto_variable ]
+!23 = !{!"0xb\0010\000\002", !47, !7} ; [ DW_TAG_lexical_block ]
+!24 = !{!"0x101\00a\0013\000", !8, !1, !5} ; [ DW_TAG_arg_variable ]
+!25 = !{!"0x100\00b\0013\000", !26, !1, !5} ; [ DW_TAG_auto_variable ]
+!26 = !{!"0xb\0013\000\003", !47, !8} ; [ DW_TAG_lexical_block ]
+!27 = !{!"0x101\00a\0016\000", !9, !1, !5} ; [ DW_TAG_arg_variable ]
+!28 = !{!"0x100\00b\0016\000", !29, !1, !5} ; [ DW_TAG_auto_variable ]
+!29 = !{!"0xb\0016\000\004", !47, !9} ; [ DW_TAG_lexical_block ]
+!30 = !MDLocation(line: 4, scope: !0)
+!31 = !MDLocation(line: 4, scope: !12)
+!32 = !MDLocation(line: 7, scope: !6)
+!33 = !MDLocation(line: 7, scope: !20)
+!34 = !MDLocation(line: 10, scope: !7)
+!35 = !MDLocation(line: 10, scope: !23)
+!36 = !MDLocation(line: 13, scope: !8)
+!37 = !MDLocation(line: 13, scope: !26)
+!38 = !MDLocation(line: 16, scope: !9)
+!39 = !MDLocation(line: 16, scope: !29)
+!40 = !{!0, !6, !7, !8, !9}
+!41 = !{!13, !14, !15, !16, !17}
+!42 = !{!10, !11}
+!43 = !{!18, !19}
+!44 = !{!21, !22}
+!45 = !{!24, !25}
+!46 = !{!27, !28}
+!47 = !{!"foo.c", !"/tmp/"}
+!48 = !{}
+!49 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/ARM/2011-04-12-FastRegAlloc.ll b/test/CodeGen/ARM/2011-04-12-FastRegAlloc.ll
index eb23de0..e9a6793 100644
--- a/test/CodeGen/ARM/2011-04-12-FastRegAlloc.ll
+++ b/test/CodeGen/ARM/2011-04-12-FastRegAlloc.ll
@@ -12,4 +12,4 @@ entry:
ret void
}
-!0 = metadata !{i32 109}
+!0 = !{i32 109}
diff --git a/test/CodeGen/ARM/2011-05-04-MultipleLandingPadSuccs.ll b/test/CodeGen/ARM/2011-05-04-MultipleLandingPadSuccs.ll
index d3394b5..2af3e3e 100644
--- a/test/CodeGen/ARM/2011-05-04-MultipleLandingPadSuccs.ll
+++ b/test/CodeGen/ARM/2011-05-04-MultipleLandingPadSuccs.ll
@@ -81,8 +81,8 @@ declare void @_Unwind_SjLj_Resume_or_Rethrow(i8*)
declare void @_ZSt9terminatev()
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"bool", metadata !1}
-!4 = metadata !{metadata !"int", metadata !1}
+!0 = !{!"any pointer", !1}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA", null}
+!3 = !{!"bool", !1}
+!4 = !{!"int", !1}
diff --git a/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll b/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
index ede936c..3edc946 100644
--- a/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
+++ b/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
@@ -29,41 +29,41 @@ target triple = "thumbv7-apple-macosx10.7.0"
@x5 = global i32 0, align 4
define i32 @get1(i32 %a) nounwind optsize ssp {
- tail call void @llvm.dbg.value(metadata !{i32 %a}, i64 0, metadata !10, metadata !{metadata !"0x102"}), !dbg !30
+ tail call void @llvm.dbg.value(metadata i32 %a, i64 0, metadata !10, metadata !{!"0x102"}), !dbg !30
%1 = load i32* @x1, align 4, !dbg !31
- tail call void @llvm.dbg.value(metadata !{i32 %1}, i64 0, metadata !11, metadata !{metadata !"0x102"}), !dbg !31
+ tail call void @llvm.dbg.value(metadata i32 %1, i64 0, metadata !11, metadata !{!"0x102"}), !dbg !31
store i32 %a, i32* @x1, align 4, !dbg !31
ret i32 %1, !dbg !31
}
define i32 @get2(i32 %a) nounwind optsize ssp {
- tail call void @llvm.dbg.value(metadata !{i32 %a}, i64 0, metadata !13, metadata !{metadata !"0x102"}), !dbg !32
+ tail call void @llvm.dbg.value(metadata i32 %a, i64 0, metadata !13, metadata !{!"0x102"}), !dbg !32
%1 = load i32* @x2, align 4, !dbg !33
- tail call void @llvm.dbg.value(metadata !{i32 %1}, i64 0, metadata !14, metadata !{metadata !"0x102"}), !dbg !33
+ tail call void @llvm.dbg.value(metadata i32 %1, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !33
store i32 %a, i32* @x2, align 4, !dbg !33
ret i32 %1, !dbg !33
}
define i32 @get3(i32 %a) nounwind optsize ssp {
- tail call void @llvm.dbg.value(metadata !{i32 %a}, i64 0, metadata !16, metadata !{metadata !"0x102"}), !dbg !34
+ tail call void @llvm.dbg.value(metadata i32 %a, i64 0, metadata !16, metadata !{!"0x102"}), !dbg !34
%1 = load i32* @x3, align 4, !dbg !35
- tail call void @llvm.dbg.value(metadata !{i32 %1}, i64 0, metadata !17, metadata !{metadata !"0x102"}), !dbg !35
+ tail call void @llvm.dbg.value(metadata i32 %1, i64 0, metadata !17, metadata !{!"0x102"}), !dbg !35
store i32 %a, i32* @x3, align 4, !dbg !35
ret i32 %1, !dbg !35
}
define i32 @get4(i32 %a) nounwind optsize ssp {
- tail call void @llvm.dbg.value(metadata !{i32 %a}, i64 0, metadata !19, metadata !{metadata !"0x102"}), !dbg !36
+ tail call void @llvm.dbg.value(metadata i32 %a, i64 0, metadata !19, metadata !{!"0x102"}), !dbg !36
%1 = load i32* @x4, align 4, !dbg !37
- tail call void @llvm.dbg.value(metadata !{i32 %1}, i64 0, metadata !20, metadata !{metadata !"0x102"}), !dbg !37
+ tail call void @llvm.dbg.value(metadata i32 %1, i64 0, metadata !20, metadata !{!"0x102"}), !dbg !37
store i32 %a, i32* @x4, align 4, !dbg !37
ret i32 %1, !dbg !37
}
define i32 @get5(i32 %a) nounwind optsize ssp {
- tail call void @llvm.dbg.value(metadata !{i32 %a}, i64 0, metadata !27, metadata !{metadata !"0x102"}), !dbg !38
+ tail call void @llvm.dbg.value(metadata i32 %a, i64 0, metadata !27, metadata !{!"0x102"}), !dbg !38
%1 = load i32* @x5, align 4, !dbg !39
- tail call void @llvm.dbg.value(metadata !{i32 %1}, i64 0, metadata !28, metadata !{metadata !"0x102"}), !dbg !39
+ tail call void @llvm.dbg.value(metadata i32 %1, i64 0, metadata !28, metadata !{!"0x102"}), !dbg !39
store i32 %a, i32* @x5, align 4, !dbg !39
ret i32 %1, !dbg !39
}
@@ -73,50 +73,50 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!49}
-!0 = metadata !{metadata !"0x11\0012\00clang\001\00\000\00\001", metadata !47, metadata !48, metadata !48, metadata !40, metadata !41, metadata !48} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{metadata !"0x2e\00get1\00get1\00\005\000\001\000\006\00256\001\005", metadata !47, metadata !2, metadata !3, null, i32 (i32)* @get1, null, null, metadata !42} ; [ DW_TAG_subprogram ] [line 5] [def] [get1]
-!2 = metadata !{metadata !"0x29", metadata !47} ; [ DW_TAG_file_type ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !47, metadata !2, null, metadata !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5}
-!5 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, metadata !0} ; [ DW_TAG_base_type ]
-!6 = metadata !{metadata !"0x2e\00get2\00get2\00\008\000\001\000\006\00256\001\008", metadata !47, metadata !2, metadata !3, null, i32 (i32)* @get2, null, null, metadata !43} ; [ DW_TAG_subprogram ] [line 8] [def] [get2]
-!7 = metadata !{metadata !"0x2e\00get3\00get3\00\0011\000\001\000\006\00256\001\0011", metadata !47, metadata !2, metadata !3, null, i32 (i32)* @get3, null, null, metadata !44} ; [ DW_TAG_subprogram ] [line 11] [def] [get3]
-!8 = metadata !{metadata !"0x2e\00get4\00get4\00\0014\000\001\000\006\00256\001\0014", metadata !47, metadata !2, metadata !3, null, i32 (i32)* @get4, null, null, metadata !45} ; [ DW_TAG_subprogram ] [line 14] [def] [get4]
-!9 = metadata !{metadata !"0x2e\00get5\00get5\00\0017\000\001\000\006\00256\001\0017", metadata !47, metadata !2, metadata !3, null, i32 (i32)* @get5, null, null, metadata !46} ; [ DW_TAG_subprogram ] [line 17] [def] [get5]
-!10 = metadata !{metadata !"0x101\00a\0016777221\000", metadata !1, metadata !2, metadata !5} ; [ DW_TAG_arg_variable ]
-!11 = metadata !{metadata !"0x100\00b\005\000", metadata !12, metadata !2, metadata !5} ; [ DW_TAG_auto_variable ]
-!12 = metadata !{metadata !"0xb\005\0019\000", metadata !47, metadata !1} ; [ DW_TAG_lexical_block ]
-!13 = metadata !{metadata !"0x101\00a\0016777224\000", metadata !6, metadata !2, metadata !5} ; [ DW_TAG_arg_variable ]
-!14 = metadata !{metadata !"0x100\00b\008\000", metadata !15, metadata !2, metadata !5} ; [ DW_TAG_auto_variable ]
-!15 = metadata !{metadata !"0xb\008\0017\001", metadata !47, metadata !6} ; [ DW_TAG_lexical_block ]
-!16 = metadata !{metadata !"0x101\00a\0016777227\000", metadata !7, metadata !2, metadata !5} ; [ DW_TAG_arg_variable ]
-!17 = metadata !{metadata !"0x100\00b\0011\000", metadata !18, metadata !2, metadata !5} ; [ DW_TAG_auto_variable ]
-!18 = metadata !{metadata !"0xb\0011\0019\002", metadata !47, metadata !7} ; [ DW_TAG_lexical_block ]
-!19 = metadata !{metadata !"0x101\00a\0016777230\000", metadata !8, metadata !2, metadata !5} ; [ DW_TAG_arg_variable ]
-!20 = metadata !{metadata !"0x100\00b\0014\000", metadata !21, metadata !2, metadata !5} ; [ DW_TAG_auto_variable ]
-!21 = metadata !{metadata !"0xb\0014\0019\003", metadata !47, metadata !8} ; [ DW_TAG_lexical_block ]
-!25 = metadata !{metadata !"0x34\00x1\00x1\00\004\001\001", metadata !0, metadata !2, metadata !5, i32* @x1, null} ; [ DW_TAG_variable ]
-!26 = metadata !{metadata !"0x34\00x2\00x2\00\007\001\001", metadata !0, metadata !2, metadata !5, i32* @x2, null} ; [ DW_TAG_variable ]
-!27 = metadata !{metadata !"0x101\00a\0016777233\000", metadata !9, metadata !2, metadata !5} ; [ DW_TAG_arg_variable ]
-!28 = metadata !{metadata !"0x100\00b\0017\000", metadata !29, metadata !2, metadata !5} ; [ DW_TAG_auto_variable ]
-!29 = metadata !{metadata !"0xb\0017\0019\004", metadata !47, metadata !9} ; [ DW_TAG_lexical_block ]
-!30 = metadata !{i32 5, i32 16, metadata !1, null}
-!31 = metadata !{i32 5, i32 32, metadata !12, null}
-!32 = metadata !{i32 8, i32 14, metadata !6, null}
-!33 = metadata !{i32 8, i32 29, metadata !15, null}
-!34 = metadata !{i32 11, i32 16, metadata !7, null}
-!35 = metadata !{i32 11, i32 32, metadata !18, null}
-!36 = metadata !{i32 14, i32 16, metadata !8, null}
-!37 = metadata !{i32 14, i32 32, metadata !21, null}
-!38 = metadata !{i32 17, i32 16, metadata !9, null}
-!39 = metadata !{i32 17, i32 32, metadata !29, null}
-!40 = metadata !{metadata !1, metadata !6, metadata !7, metadata !8, metadata !9}
-!41 = metadata !{metadata !25, metadata !26}
-!42 = metadata !{metadata !10, metadata !11}
-!43 = metadata !{metadata !13, metadata !14}
-!44 = metadata !{metadata !16, metadata !17}
-!45 = metadata !{metadata !19, metadata !20}
-!46 = metadata !{metadata !27, metadata !28}
-!47 = metadata !{metadata !"ss3.c", metadata !"/private/tmp"}
-!48 = metadata !{}
-!49 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x11\0012\00clang\001\00\000\00\001", !47, !48, !48, !40, !41, !48} ; [ DW_TAG_compile_unit ]
+!1 = !{!"0x2e\00get1\00get1\00\005\000\001\000\006\00256\001\005", !47, !2, !3, null, i32 (i32)* @get1, null, null, !42} ; [ DW_TAG_subprogram ] [line 5] [def] [get1]
+!2 = !{!"0x29", !47} ; [ DW_TAG_file_type ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", !47, !2, null, !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !{!5}
+!5 = !{!"0x24\00int\000\0032\0032\000\000\005", null, !0} ; [ DW_TAG_base_type ]
+!6 = !{!"0x2e\00get2\00get2\00\008\000\001\000\006\00256\001\008", !47, !2, !3, null, i32 (i32)* @get2, null, null, !43} ; [ DW_TAG_subprogram ] [line 8] [def] [get2]
+!7 = !{!"0x2e\00get3\00get3\00\0011\000\001\000\006\00256\001\0011", !47, !2, !3, null, i32 (i32)* @get3, null, null, !44} ; [ DW_TAG_subprogram ] [line 11] [def] [get3]
+!8 = !{!"0x2e\00get4\00get4\00\0014\000\001\000\006\00256\001\0014", !47, !2, !3, null, i32 (i32)* @get4, null, null, !45} ; [ DW_TAG_subprogram ] [line 14] [def] [get4]
+!9 = !{!"0x2e\00get5\00get5\00\0017\000\001\000\006\00256\001\0017", !47, !2, !3, null, i32 (i32)* @get5, null, null, !46} ; [ DW_TAG_subprogram ] [line 17] [def] [get5]
+!10 = !{!"0x101\00a\0016777221\000", !1, !2, !5} ; [ DW_TAG_arg_variable ]
+!11 = !{!"0x100\00b\005\000", !12, !2, !5} ; [ DW_TAG_auto_variable ]
+!12 = !{!"0xb\005\0019\000", !47, !1} ; [ DW_TAG_lexical_block ]
+!13 = !{!"0x101\00a\0016777224\000", !6, !2, !5} ; [ DW_TAG_arg_variable ]
+!14 = !{!"0x100\00b\008\000", !15, !2, !5} ; [ DW_TAG_auto_variable ]
+!15 = !{!"0xb\008\0017\001", !47, !6} ; [ DW_TAG_lexical_block ]
+!16 = !{!"0x101\00a\0016777227\000", !7, !2, !5} ; [ DW_TAG_arg_variable ]
+!17 = !{!"0x100\00b\0011\000", !18, !2, !5} ; [ DW_TAG_auto_variable ]
+!18 = !{!"0xb\0011\0019\002", !47, !7} ; [ DW_TAG_lexical_block ]
+!19 = !{!"0x101\00a\0016777230\000", !8, !2, !5} ; [ DW_TAG_arg_variable ]
+!20 = !{!"0x100\00b\0014\000", !21, !2, !5} ; [ DW_TAG_auto_variable ]
+!21 = !{!"0xb\0014\0019\003", !47, !8} ; [ DW_TAG_lexical_block ]
+!25 = !{!"0x34\00x1\00x1\00\004\001\001", !0, !2, !5, i32* @x1, null} ; [ DW_TAG_variable ]
+!26 = !{!"0x34\00x2\00x2\00\007\001\001", !0, !2, !5, i32* @x2, null} ; [ DW_TAG_variable ]
+!27 = !{!"0x101\00a\0016777233\000", !9, !2, !5} ; [ DW_TAG_arg_variable ]
+!28 = !{!"0x100\00b\0017\000", !29, !2, !5} ; [ DW_TAG_auto_variable ]
+!29 = !{!"0xb\0017\0019\004", !47, !9} ; [ DW_TAG_lexical_block ]
+!30 = !MDLocation(line: 5, column: 16, scope: !1)
+!31 = !MDLocation(line: 5, column: 32, scope: !12)
+!32 = !MDLocation(line: 8, column: 14, scope: !6)
+!33 = !MDLocation(line: 8, column: 29, scope: !15)
+!34 = !MDLocation(line: 11, column: 16, scope: !7)
+!35 = !MDLocation(line: 11, column: 32, scope: !18)
+!36 = !MDLocation(line: 14, column: 16, scope: !8)
+!37 = !MDLocation(line: 14, column: 32, scope: !21)
+!38 = !MDLocation(line: 17, column: 16, scope: !9)
+!39 = !MDLocation(line: 17, column: 32, scope: !29)
+!40 = !{!1, !6, !7, !8, !9}
+!41 = !{!25, !26}
+!42 = !{!10, !11}
+!43 = !{!13, !14}
+!44 = !{!16, !17}
+!45 = !{!19, !20}
+!46 = !{!27, !28}
+!47 = !{!"ss3.c", !"/private/tmp"}
+!48 = !{}
+!49 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll b/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll
index b3a7e34..69d72bd 100644
--- a/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll
+++ b/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll
@@ -65,7 +65,7 @@ declare i32 @__gxx_personality_sj0(...)
!llvm.module.flags = !{!0, !1, !2, !3}
-!0 = metadata !{i32 1, metadata !"Objective-C Version", i32 2}
-!1 = metadata !{i32 1, metadata !"Objective-C Image Info Version", i32 0}
-!2 = metadata !{i32 1, metadata !"Objective-C Image Info Section", metadata !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
-!3 = metadata !{i32 4, metadata !"Objective-C Garbage Collection", i32 0}
+!0 = !{i32 1, !"Objective-C Version", i32 2}
+!1 = !{i32 1, !"Objective-C Image Info Version", i32 0}
+!2 = !{i32 1, !"Objective-C Image Info Section", !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
+!3 = !{i32 4, !"Objective-C Garbage Collection", i32 0}
diff --git a/test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll b/test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll
index adb5c7e..70e3079 100644
--- a/test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll
+++ b/test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll
@@ -169,4 +169,4 @@ define arm_aapcs_vfpcc void @foo(float, i1 zeroext, i1 zeroext) nounwind uwtable
declare arm_aapcs_vfpcc void @bar(%0*, float)
-!0 = metadata !{metadata !"branch_weights", i32 64, i32 4}
+!0 = !{!"branch_weights", i32 64, i32 4}
diff --git a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll
index 5235e9c..53860ea 100644
--- a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll
+++ b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll
@@ -8,4 +8,4 @@ define void @f() nounwind ssp {
ret void
}
-!0 = metadata !{i32 318437}
+!0 = !{i32 318437}
diff --git a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll
index d389b5c..b47247c 100644
--- a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll
+++ b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll
@@ -8,4 +8,4 @@ define hidden void @f(i32* %corr, i32 %order) nounwind ssp {
ret void
}
-!0 = metadata !{i32 257}
+!0 = !{i32 257}
diff --git a/test/CodeGen/ARM/2014-08-04-muls-it.ll b/test/CodeGen/ARM/2014-08-04-muls-it.ll
index 4636bff..5ba1347 100644
--- a/test/CodeGen/ARM/2014-08-04-muls-it.ll
+++ b/test/CodeGen/ARM/2014-08-04-muls-it.ll
@@ -17,9 +17,7 @@ if.end: ; preds = %if.then, %entry
; CHECK-LABEL: function
; CHECK: cmp r0, r1
-; CHECK: bne [[LABEL:[.*]]]
; CHECK-NOT: mulseq r0, r0, r0
-; CHECK: [[LABEL]]
-; CHECK: muls r0, r0, r0
+; CHECK: muleq r0, r0, r0
; CHECK: bx lr
diff --git a/test/CodeGen/ARM/2015-01-21-thumbv4t-ldstr-opt.ll b/test/CodeGen/ARM/2015-01-21-thumbv4t-ldstr-opt.ll
new file mode 100644
index 0000000..de2dead
--- /dev/null
+++ b/test/CodeGen/ARM/2015-01-21-thumbv4t-ldstr-opt.ll
@@ -0,0 +1,48 @@
+; RUN: llc -mtriple=thumbv4t-none--eabi < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V4T
+; RUN: llc -mtriple=thumbv6m-none--eabi < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V6M
+
+; CHECK-LABEL: test1
+define i32 @test1(i32* %p) {
+
+; Offsets less than 8 can be generated in a single add
+; CHECK: adds [[NEWBASE:r[0-9]]], r0, #4
+ %1 = getelementptr inbounds i32* %p, i32 1
+ %2 = getelementptr inbounds i32* %p, i32 2
+ %3 = getelementptr inbounds i32* %p, i32 3
+ %4 = getelementptr inbounds i32* %p, i32 4
+
+; CHECK-NEXT: ldm [[NEWBASE]],
+ %5 = load i32* %1, align 4
+ %6 = load i32* %2, align 4
+ %7 = load i32* %3, align 4
+ %8 = load i32* %4, align 4
+
+ %9 = add nsw i32 %5, %6
+ %10 = add nsw i32 %9, %7
+ %11 = add nsw i32 %10, %8
+ ret i32 %11
+}
+
+; CHECK-LABEL: test2
+define i32 @test2(i32* %p) {
+
+; Offsets >=8 require a mov and an add
+; CHECK-V4T: movs [[NEWBASE:r[0-9]]], r0
+; CHECK-V6M: mov [[NEWBASE:r[0-9]]], r0
+; CHECK-NEXT: adds [[NEWBASE]], #8
+ %1 = getelementptr inbounds i32* %p, i32 2
+ %2 = getelementptr inbounds i32* %p, i32 3
+ %3 = getelementptr inbounds i32* %p, i32 4
+ %4 = getelementptr inbounds i32* %p, i32 5
+
+; CHECK-NEXT: ldm [[NEWBASE]],
+ %5 = load i32* %1, align 4
+ %6 = load i32* %2, align 4
+ %7 = load i32* %3, align 4
+ %8 = load i32* %4, align 4
+
+ %9 = add nsw i32 %5, %6
+ %10 = add nsw i32 %9, %7
+ %11 = add nsw i32 %10, %8
+ ret i32 %11
+}
diff --git a/test/CodeGen/ARM/Windows/read-only-data.ll b/test/CodeGen/ARM/Windows/read-only-data.ll
index 0ccb5ed..0438d68 100644
--- a/test/CodeGen/ARM/Windows/read-only-data.ll
+++ b/test/CodeGen/ARM/Windows/read-only-data.ll
@@ -10,6 +10,6 @@ entry:
ret void
}
-; CHECK: .section .rdata,"rd"
+; CHECK: .section .rdata,"dr"
; CHECK-NOT: .section ".rodata.str1.1"
diff --git a/test/CodeGen/ARM/Windows/stack-probe-non-default.ll b/test/CodeGen/ARM/Windows/stack-probe-non-default.ll
new file mode 100644
index 0000000..796bcdd
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/stack-probe-non-default.ll
@@ -0,0 +1,27 @@
+; RUN: llc -mtriple thumbv7-windows -mcpu cortex-a9 -o - %s \
+; RUN: | FileCheck %s -check-prefix CHECK-DEFAULT-CODE-MODEL
+
+; RUN: llc -mtriple thumbv7-windows -mcpu cortex-a9 -code-model large -o - %s \
+; RUN: | FileCheck %s -check-prefix CHECK-LARGE-CODE-MODEL
+
+declare dllimport arm_aapcs_vfpcc void @initialise(i8*)
+
+define dllexport arm_aapcs_vfpcc signext i8 @function(i32 %offset) #0 {
+entry:
+ %buffer = alloca [4096 x i8], align 1
+ %0 = getelementptr inbounds [4096 x i8]* %buffer, i32 0, i32 0
+ call arm_aapcs_vfpcc void @initialise(i8* %0)
+ %arrayidx = getelementptr inbounds [4096 x i8]* %buffer, i32 0, i32 %offset
+ %1 = load i8* %arrayidx, align 1
+ ret i8 %1
+}
+
+attributes #0 = { "stack-probe-size"="8096" }
+
+; CHECK-DEFAULT-CODE-MODEL-NOT: __chkstk
+; CHECK-DEFAULT-CODE-MODEL: sub.w sp, sp, #4096
+
+; CHECK-LARGE-CODE-MODEL-NOT: movw r12, :lower16:__chkstk
+; CHECK-LARGE-CODE-MODEL-NOT: movt r12, :upper16:__chkstk
+; CHECK-LARGE-CODE-MODEL: sub.w sp, sp, #4096
+
diff --git a/test/CodeGen/ARM/Windows/structors.ll b/test/CodeGen/ARM/Windows/structors.ll
index a1a9026..874b5bf 100644
--- a/test/CodeGen/ARM/Windows/structors.ll
+++ b/test/CodeGen/ARM/Windows/structors.ll
@@ -7,6 +7,6 @@ entry:
ret void
}
-; CHECK: .section .CRT$XCU,"rd"
+; CHECK: .section .CRT$XCU,"dr"
; CHECK: .long function
diff --git a/test/CodeGen/ARM/aggregate-padding.ll b/test/CodeGen/ARM/aggregate-padding.ll
new file mode 100644
index 0000000..bc46a9c
--- /dev/null
+++ b/test/CodeGen/ARM/aggregate-padding.ll
@@ -0,0 +1,101 @@
+; RUN: llc -mtriple=armv7-linux-gnueabihf %s -o - | FileCheck %s
+
+; [2 x i64] should be contiguous when split (e.g. we shouldn't try to align all
+; i32 components to 64 bits). Also makes sure i64 based types are properly
+; aligned on the stack.
+define i64 @test_i64_contiguous_on_stack([8 x double], float, i32 %in, [2 x i64] %arg) nounwind {
+; CHECK-LABEL: test_i64_contiguous_on_stack:
+; CHECK-DAG: ldr [[LO0:r[0-9]+]], [sp, #8]
+; CHECK-DAG: ldr [[HI0:r[0-9]+]], [sp, #12]
+; CHECK-DAG: ldr [[LO1:r[0-9]+]], [sp, #16]
+; CHECK-DAG: ldr [[HI1:r[0-9]+]], [sp, #20]
+; CHECK: adds r0, [[LO0]], [[LO1]]
+; CHECK: adc r1, [[HI0]], [[HI1]]
+
+ %val1 = extractvalue [2 x i64] %arg, 0
+ %val2 = extractvalue [2 x i64] %arg, 1
+ %sum = add i64 %val1, %val2
+ ret i64 %sum
+}
+
+; [2 x i64] should try to use looks for 4 regs, not 8 (which might happen if the
+; i64 -> i32, i32 split wasn't handled correctly).
+define i64 @test_2xi64_uses_4_regs([8 x double], float, [2 x i64] %arg) nounwind {
+; CHECK-LABEL: test_2xi64_uses_4_regs:
+; CHECK-DAG: mov r0, r2
+; CHECK-DAG: mov r1, r3
+
+ %val = extractvalue [2 x i64] %arg, 1
+ ret i64 %val
+}
+
+; An aggregate should be able to split between registers and stack if there is
+; nothing else on the stack.
+define i32 @test_aggregates_split([8 x double], i32, [4 x i32] %arg) nounwind {
+; CHECK-LABEL: test_aggregates_split:
+; CHECK: ldr [[VAL3:r[0-9]+]], [sp]
+; CHECK: add r0, r1, [[VAL3]]
+
+ %val0 = extractvalue [4 x i32] %arg, 0
+ %val3 = extractvalue [4 x i32] %arg, 3
+ %sum = add i32 %val0, %val3
+ ret i32 %sum
+}
+
+; If an aggregate has to be moved entirely onto the stack, nothing should be
+; able to use r0-r3 any more. Also checks that [2 x i64] properly aligned when
+; it uses regs.
+define i32 @test_no_int_backfilling([8 x double], float, i32, [2 x i64], i32 %arg) nounwind {
+; CHECK-LABEL: test_no_int_backfilling:
+; CHECK: ldr r0, [sp, #24]
+ ret i32 %arg
+}
+
+; Even if the argument was successfully allocated as reg block, there should be
+; no backfillig to r1.
+define i32 @test_no_int_backfilling_regsonly(i32, [1 x i64], i32 %arg) {
+; CHECK-LABEL: test_no_int_backfilling_regsonly:
+; CHECK: ldr r0, [sp]
+ ret i32 %arg
+}
+
+; If an aggregate has to be moved entirely onto the stack, nothing should be
+; able to use r0-r3 any more.
+define float @test_no_float_backfilling([7 x double], [4 x i32], i32, [4 x double], float %arg) nounwind {
+; CHECK-LABEL: test_no_float_backfilling:
+; CHECK: vldr s0, [sp, #40]
+ ret float %arg
+}
+
+; They're a bit pointless, but types like [N x i8] should work as well.
+define i8 @test_i8_in_regs(i32, [3 x i8] %arg) {
+; CHECK-LABEL: test_i8_in_regs:
+; CHECK: add r0, r1, r3
+ %val0 = extractvalue [3 x i8] %arg, 0
+ %val2 = extractvalue [3 x i8] %arg, 2
+ %sum = add i8 %val0, %val2
+ ret i8 %sum
+}
+
+define i16 @test_i16_split(i32, i32, [3 x i16] %arg) {
+; CHECK-LABEL: test_i16_split:
+; CHECK: ldrh [[VAL2:r[0-9]+]], [sp]
+; CHECK: add r0, r2, [[VAL2]]
+ %val0 = extractvalue [3 x i16] %arg, 0
+ %val2 = extractvalue [3 x i16] %arg, 2
+ %sum = add i16 %val0, %val2
+ ret i16 %sum
+}
+
+; Beware: on the stack each i16 still gets a 32-bit slot, the array is not
+; packed.
+define i16 @test_i16_forced_stack([8 x double], double, i32, i32, [3 x i16] %arg) {
+; CHECK-LABEL: test_i16_forced_stack:
+; CHECK-DAG: ldrh [[VAL0:r[0-9]+]], [sp, #8]
+; CHECK-DAG: ldrh [[VAL2:r[0-9]+]], [sp, #16]
+; CHECK: add r0, [[VAL0]], [[VAL2]]
+ %val0 = extractvalue [3 x i16] %arg, 0
+ %val2 = extractvalue [3 x i16] %arg, 2
+ %sum = add i16 %val0, %val2
+ ret i16 %sum
+}
diff --git a/test/CodeGen/ARM/alloc-no-stack-realign.ll b/test/CodeGen/ARM/alloc-no-stack-realign.ll
index 6e6311d..5ad8719 100644
--- a/test/CodeGen/ARM/alloc-no-stack-realign.ll
+++ b/test/CodeGen/ARM/alloc-no-stack-realign.ll
@@ -8,21 +8,28 @@
define void @test1(<16 x float>* noalias sret %agg.result) nounwind ssp "no-realign-stack" {
entry:
-; NO-REALIGN: test1
-; NO-REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #48
-; NO-REALIGN: vst1.64
-; NO-REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #32
-; NO-REALIGN: vst1.64
-; NO-REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #16
-; NO-REALIGN: vst1.64
-; NO-REALIGN: vst1.64
-; NO-REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #48
-; NO-REALIGN: vst1.64
-; NO-REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #32
-; NO-REALIGN: vst1.64
-; NO-REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #16
-; NO-REALIGN: vst1.64
-; NO-REALIGN: vst1.64
+; NO-REALIGN-LABEL: test1
+; NO-REALIGN: mov r[[R2:[0-9]+]], r[[R1:[0-9]+]]
+; NO-REALIGN: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
+; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
+; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #48
+; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+
+; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1:[0-9]+]], #48
+; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
+; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; NO-REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]!
+; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+
+; NO-REALIGN: add r[[R2:[0-9]+]], r[[R0:0]], #48
+; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; NO-REALIGN: add r[[R2:[0-9]+]], r[[R0]], #32
+; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; NO-REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]!
+; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]
%retval = alloca <16 x float>, align 16
%0 = load <16 x float>* @T3_retval, align 16
store <16 x float> %0, <16 x float>* %retval
@@ -33,22 +40,31 @@ entry:
define void @test2(<16 x float>* noalias sret %agg.result) nounwind ssp {
entry:
-; REALIGN: test2
-; REALIGN: bic sp, sp, #63
-; REALIGN: orr [[R2:r[0-9]+]], [[R1:r[0-9]+]], #48
-; REALIGN: vst1.64
-; REALIGN: orr [[R2:r[0-9]+]], [[R1:r[0-9]+]], #32
-; REALIGN: vst1.64
-; REALIGN: orr [[R2:r[0-9]+]], [[R1:r[0-9]+]], #16
-; REALIGN: vst1.64
-; REALIGN: vst1.64
-; REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #48
-; REALIGN: vst1.64
-; REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #32
-; REALIGN: vst1.64
-; REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #16
-; REALIGN: vst1.64
-; REALIGN: vst1.64
+; REALIGN-LABEL: test2
+; REALIGN: bfc sp, #0, #6
+; REALIGN: mov r[[R2:[0-9]+]], r[[R1:[0-9]+]]
+; REALIGN: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
+; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
+; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; REALIGN: add r[[R2:[0-9]+]], r[[R1]], #48
+; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+
+
+; REALIGN: orr r[[R2:[0-9]+]], r[[R1:[0-9]+]], #48
+; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #32
+; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; REALIGN: orr r[[R2:[0-9]+]], r[[R1]], #16
+; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+
+; REALIGN: add r[[R1:[0-9]+]], r[[R0:0]], #48
+; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; REALIGN: add r[[R1:[0-9]+]], r[[R0]], #32
+; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]!
+; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]
%retval = alloca <16 x float>, align 16
%0 = load <16 x float>* @T3_retval, align 16
store <16 x float> %0, <16 x float>* %retval
diff --git a/test/CodeGen/ARM/arm-abi-attr.ll b/test/CodeGen/ARM/arm-abi-attr.ll
index f3923ae..61cb6ce 100644
--- a/test/CodeGen/ARM/arm-abi-attr.ll
+++ b/test/CodeGen/ARM/arm-abi-attr.ll
@@ -1,13 +1,13 @@
-; RUN: llc -mtriple=arm-linux < %s | FileCheck %s --check-prefix=APCS
-; RUN: llc -mtriple=arm-linux -mattr=apcs < %s | \
+; RUN: llc -mtriple=arm-linux-gnu < %s | FileCheck %s --check-prefix=APCS
+; RUN: llc -mtriple=arm-linux-gnu -target-abi=apcs < %s | \
; RUN: FileCheck %s --check-prefix=APCS
-; RUN: llc -mtriple=arm-linux-gnueabi -mattr=apcs < %s | \
+; RUN: llc -mtriple=arm-linux-gnueabi -target-abi=apcs < %s | \
; RUN: FileCheck %s --check-prefix=APCS
; RUN: llc -mtriple=arm-linux-gnueabi < %s | FileCheck %s --check-prefix=AAPCS
-; RUN: llc -mtriple=arm-linux-gnueabi -mattr=aapcs < %s | \
+; RUN: llc -mtriple=arm-linux-gnueabi -target-abi=aapcs < %s | \
; RUN: FileCheck %s --check-prefix=AAPCS
-; RUN: llc -mtriple=arm-linux-gnu -mattr=aapcs < %s | \
+; RUN: llc -mtriple=arm-linux-gnu -target-abi=aapcs < %s | \
; RUN: FileCheck %s --check-prefix=AAPCS
; The stack is 8 byte aligned on AAPCS and 4 on APCS, so we should get a BIC
diff --git a/test/CodeGen/ARM/atomic-64bit.ll b/test/CodeGen/ARM/atomic-64bit.ll
index 462c185..0c0769f 100644
--- a/test/CodeGen/ARM/atomic-64bit.ll
+++ b/test/CodeGen/ARM/atomic-64bit.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE
; RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabihf | FileCheck %s --check-prefix=CHECK-THUMB --check-prefix=CHECK-THUMB-LE
-; RUN: llc < %s -mtriple=armebv7 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
+; RUN: llc < %s -mtriple=armebv7 -target-abi apcs | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
; RUN: llc < %s -mtriple=thumbebv7-none-linux-gnueabihf | FileCheck %s --check-prefix=CHECK-THUMB --check-prefix=CHECK-THUMB-BE
define i64 @test1(i64* %ptr, i64 %val) {
diff --git a/test/CodeGen/ARM/atomic-ops-v8.ll b/test/CodeGen/ARM/atomic-ops-v8.ll
index 7072aaa..6ba1352 100644
--- a/test/CodeGen/ARM/atomic-ops-v8.ll
+++ b/test/CodeGen/ARM/atomic-ops-v8.ll
@@ -1296,7 +1296,7 @@ define void @test_atomic_store_monotonic_regoff_i8(i64 %base, i64 %off, i8 %val)
%addr = inttoptr i64 %addr_int to i8*
store atomic i8 %val, i8* %addr monotonic, align 1
-; CHECK-LE: ldrb{{(\.w)?}} [[VAL:r[0-9]+]], [sp]
+; CHECK-LE: ldr{{b?(\.w)?}} [[VAL:r[0-9]+]], [sp]
; CHECK-LE: strb [[VAL]], [r0, r2]
; CHECK-BE: ldrb{{(\.w)?}} [[VAL:r[0-9]+]], [sp, #3]
; CHECK-BE: strb [[VAL]], [r1, r3]
diff --git a/test/CodeGen/ARM/big-endian-neon-extend.ll b/test/CodeGen/ARM/big-endian-neon-extend.ll
index 931c6c3..1498356 100644
--- a/test/CodeGen/ARM/big-endian-neon-extend.ll
+++ b/test/CodeGen/ARM/big-endian-neon-extend.ll
@@ -2,10 +2,18 @@
define void @vector_ext_2i8_to_2i64( <2 x i8>* %loadaddr, <2 x i64>* %storeaddr ) {
; CHECK-LABEL: vector_ext_2i8_to_2i64:
-; CHECK: vld1.16 {[[REG:d[0-9]+]]
-; CHECK: vmov.i64 {{q[0-9]+}}, #0xff
-; CHECK: vrev16.8 [[REG]], [[REG]]
-; CHECK: vmovl.u8 {{q[0-9]+}}, [[REG]]
+; CHECK: vld1.16 {[[REG:d[0-9]+]][0]}, [r0:16]
+; CHECK-NEXT: vmov.i64 [[MASK:q[0-9]+]], #0xff
+; CHECK-NEXT: vrev64.32 [[MASK]], [[MASK]]
+; CHECK-NEXT: vrev16.8 [[REG]], [[REG]]
+; CHECK-NEXT: vmovl.u8 [[QREG:q[0-9]+]], [[REG]]
+; CHECK-NEXT: vmovl.u16 [[QREG]], [[REG]]
+; CHECK-NEXT: vmovl.u32 [[QREG]], [[REG]]
+; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]]
+; CHECK-NEXT: vand [[QREG]], [[QREG]], [[MASK]]
+; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]]
+; CHECK-NEXT: vst1.64 {[[REG]], {{d[0-9]+}}}, [r1]
+; CHECK-NEXT: bx lr
%1 = load <2 x i8>* %loadaddr
%2 = zext <2 x i8> %1 to <2 x i64>
store <2 x i64> %2, <2 x i64>* %storeaddr
@@ -14,10 +22,17 @@ define void @vector_ext_2i8_to_2i64( <2 x i8>* %loadaddr, <2 x i64>* %storeaddr
define void @vector_ext_2i16_to_2i64( <2 x i16>* %loadaddr, <2 x i64>* %storeaddr ) {
; CHECK-LABEL: vector_ext_2i16_to_2i64:
-; CHECK: vld1.32 {[[REG:d[0-9]+]]
-; CHECK: vmov.i64 {{q[0-9]+}}, #0xffff
-; CHECK: vrev32.16 [[REG]], [[REG]]
-; CHECK: vmovl.u16 {{q[0-9]+}}, [[REG]]
+; CHECK: vld1.32 {[[REG:d[0-9]+]][0]}, [r0:32]
+; CHECK-NEXT: vmov.i64 [[MASK:q[0-9]+]], #0xffff
+; CHECK-NEXT: vrev64.32 [[MASK]], [[MASK]]
+; CHECK-NEXT: vrev32.16 [[REG]], [[REG]]
+; CHECK-NEXT: vmovl.u16 [[QREG:q[0-9]+]], [[REG]]
+; CHECK-NEXT: vmovl.u32 [[QREG]], [[REG]]
+; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]]
+; CHECK-NEXT: vand [[QREG]], [[QREG]], [[MASK]]
+; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]]
+; CHECK-NEXT: vst1.64 {[[REG]], {{d[0-9]+}}}, [r1]
+; CHECK-NEXT: bx lr
%1 = load <2 x i16>* %loadaddr
%2 = zext <2 x i16> %1 to <2 x i64>
store <2 x i64> %2, <2 x i64>* %storeaddr
@@ -27,8 +42,13 @@ define void @vector_ext_2i16_to_2i64( <2 x i16>* %loadaddr, <2 x i64>* %storeadd
define void @vector_ext_2i8_to_2i32( <2 x i8>* %loadaddr, <2 x i32>* %storeaddr ) {
; CHECK-LABEL: vector_ext_2i8_to_2i32:
-; CHECK: vld1.16 {[[REG:d[0-9]+]]
-; CHECK: vrev16.8 [[REG]], [[REG]]
+; CHECK: vld1.16 {[[REG:d[0-9]+]][0]}, [r0:16]
+; CHECK-NEXT: vrev16.8 [[REG]], [[REG]]
+; CHECK-NEXT: vmovl.u8 [[QREG:q[0-9]+]], [[REG]]
+; CHECK-NEXT: vmovl.u16 [[QREG]], [[REG]]
+; CHECK-NEXT: vrev64.32 [[REG]], [[REG]]
+; CHECK-NEXT: vstr [[REG]], [r1]
+; CHECK-NEXT: bx lr
%1 = load <2 x i8>* %loadaddr
%2 = zext <2 x i8> %1 to <2 x i32>
store <2 x i32> %2, <2 x i32>* %storeaddr
@@ -37,9 +57,12 @@ define void @vector_ext_2i8_to_2i32( <2 x i8>* %loadaddr, <2 x i32>* %storeaddr
define void @vector_ext_2i16_to_2i32( <2 x i16>* %loadaddr, <2 x i32>* %storeaddr ) {
; CHECK-LABEL: vector_ext_2i16_to_2i32:
-; CHECK: vld1.32 {[[REG:d[0-9]+]]
-; CHECK: vrev32.16 [[REG]], [[REG]]
-; CHECK: vmovl.u16 {{q[0-9]+}}, [[REG]]
+; CHECK: vld1.32 {[[REG:d[0-9]+]][0]}, [r0:32]
+; CHECK-NEXT: vrev32.16 [[REG]], [[REG]]
+; CHECK-NEXT: vmovl.u16 [[QREG:q[0-9]+]], [[REG]]
+; CHECK-NEXT: vrev64.32 [[REG]], [[REG]]
+; CHECK-NEXT: vstr [[REG]], [r1]
+; CHECK-NEXT: bx lr
%1 = load <2 x i16>* %loadaddr
%2 = zext <2 x i16> %1 to <2 x i32>
store <2 x i32> %2, <2 x i32>* %storeaddr
@@ -48,9 +71,15 @@ define void @vector_ext_2i16_to_2i32( <2 x i16>* %loadaddr, <2 x i32>* %storeadd
define void @vector_ext_2i8_to_2i16( <2 x i8>* %loadaddr, <2 x i16>* %storeaddr ) {
; CHECK-LABEL: vector_ext_2i8_to_2i16:
-; CHECK: vld1.16 {[[REG:d[0-9]+]]
-; CHECK: vrev16.8 [[REG]], [[REG]]
-; CHECK: vmovl.u8 {{q[0-9]+}}, [[REG]]
+; CHECK: vld1.16 {[[REG:d[0-9]+]][0]}, [r0:16]
+; CHECK-NEXT: vrev16.8 [[REG]], [[REG]]
+; CHECK-NEXT: vmovl.u8 [[QREG:q[0-9]+]], [[REG]]
+; CHECK-NEXT: vmovl.u16 [[QREG]], [[REG]]
+; CHECK-NEXT: vrev32.16 [[REG]], [[REG]]
+; CHECK-NEXT: vuzp.16 [[REG]], {{d[0-9]+}}
+; CHECK-NEXT: vrev32.16 [[REG]], {{d[0-9]+}}
+; CHECK-NEXT: vst1.32 {[[REG]][0]}, [r1:32]
+; CHECK-NEXT: bx lr
%1 = load <2 x i8>* %loadaddr
%2 = zext <2 x i8> %1 to <2 x i16>
store <2 x i16> %2, <2 x i16>* %storeaddr
@@ -59,9 +88,13 @@ define void @vector_ext_2i8_to_2i16( <2 x i8>* %loadaddr, <2 x i16>* %storeaddr
define void @vector_ext_4i8_to_4i32( <4 x i8>* %loadaddr, <4 x i32>* %storeaddr ) {
; CHECK-LABEL: vector_ext_4i8_to_4i32:
-; CHECK: vld1.32 {[[REG:d[0-9]+]]
-; CHECK: vrev32.8 [[REG]], [[REG]]
-; CHECK: vmovl.u8 {{q[0-9]+}}, [[REG]]
+; CHECK: vld1.32 {[[REG:d[0-9]+]][0]}, [r0:32]
+; CHECK-NEXT: vrev32.8 [[REG]], [[REG]]
+; CHECK-NEXT: vmovl.u8 [[QREG:q[0-9]+]], [[REG]]
+; CHECK-NEXT: vmovl.u16 [[QREG]], [[REG]]
+; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]]
+; CHECK-NEXT: vst1.64 {[[REG]], {{d[0-9]+}}}, [r1]
+; CHECK-NEXT: bx lr
%1 = load <4 x i8>* %loadaddr
%2 = zext <4 x i8> %1 to <4 x i32>
store <4 x i32> %2, <4 x i32>* %storeaddr
@@ -70,12 +103,14 @@ define void @vector_ext_4i8_to_4i32( <4 x i8>* %loadaddr, <4 x i32>* %storeaddr
define void @vector_ext_4i8_to_4i16( <4 x i8>* %loadaddr, <4 x i16>* %storeaddr ) {
; CHECK-LABEL: vector_ext_4i8_to_4i16:
-; CHECK: vld1.32 {[[REG:d[0-9]+]]
-; CHECK: vrev32.8 [[REG]], [[REG]]
-; CHECK: vmovl.u8 {{q[0-9]+}}, [[REG]]
+; CHECK: vld1.32 {[[REG:d[0-9]+]][0]}, [r0:32]
+; CHECK-NEXT: vrev32.8 [[REG]], [[REG]]
+; CHECK-NEXT: vmovl.u8 [[QREG:q[0-9]+]], [[REG]]
+; CHECK-NEXT: vrev64.16 [[REG]], [[REG]]
+; CHECK-NEXT: vstr [[REG]], [r1]
+; CHECK-NEXT: bx lr
%1 = load <4 x i8>* %loadaddr
%2 = zext <4 x i8> %1 to <4 x i16>
store <4 x i16> %2, <4 x i16>* %storeaddr
ret void
}
-
diff --git a/test/CodeGen/ARM/build-attributes-encoding.s b/test/CodeGen/ARM/build-attributes-encoding.s
index 34a1ad3..29f13f0 100644
--- a/test/CodeGen/ARM/build-attributes-encoding.s
+++ b/test/CodeGen/ARM/build-attributes-encoding.s
@@ -78,7 +78,7 @@
// CHECK-NEXT: EntrySize: 0
// CHECK-NEXT: SectionData (
// CHECK-NEXT: 0000: 41460000 00616561 62690001 3C000000
-// CHECK-NEXT: 0010: 05434F52 5445582D 41380006 0A074108
+// CHECK-NEXT: 0010: 05636F72 7465782D 61380006 0A074108
// CHECK-NEXT: 0020: 0109020A 030C0214 01150117 01180119
// CHECK-NEXT: 0030: 011B001C 0124012A 012C0244 036EA001
// CHECK-NEXT: 0040: 81013100 FA0101
diff --git a/test/CodeGen/ARM/build-attributes.ll b/test/CodeGen/ARM/build-attributes.ll
index 99c2445..37c6a447 100644
--- a/test/CodeGen/ARM/build-attributes.ll
+++ b/test/CodeGen/ARM/build-attributes.ll
@@ -3,39 +3,106 @@
; RUN: llc < %s -mtriple=thumbv5-linux-gnueabi -mcpu=xscale | FileCheck %s --check-prefix=XSCALE
; RUN: llc < %s -mtriple=armv6-linux-gnueabi | FileCheck %s --check-prefix=V6
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6-FAST
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi | FileCheck %s --check-prefix=V6M
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST
+; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi | FileCheck %s --check-prefix=V6M
+; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST
; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s | FileCheck %s --check-prefix=ARM1156T2F-S
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=ARM1156T2F-S-FAST
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi | FileCheck %s --check-prefix=V7M
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7M-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=V7
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7-FAST
; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V8-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi | FileCheck %s --check-prefix=Vt8
+; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=-neon,-crypto | FileCheck %s --check-prefix=V8-FPARMv8
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=-fp-armv8,-crypto | FileCheck %s --check-prefix=V8-NEON
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=-crypto | FileCheck %s --check-prefix=V8-FPARMv8-NEON
; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8-FPARMv8-NEON-CRYPTO
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-neon,+d16 | FileCheck %s --check-prefix=CORTEX-A5-NONEON
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A5-NOFPU
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-NOFPU-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-SOFT-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A9-HARD
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-HARD-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A12-NOFPU
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-NOFPU-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 | FileCheck %s --check-prefix=CORTEX-A15
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A15-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 | FileCheck %s --check-prefix=CORTEX-A17-DEFAULT
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A17-NOFPU
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-NOFPU-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 | FileCheck %s --check-prefix=CORTEX-M0
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus | FileCheck %s --check-prefix=CORTEX-M0PLUS
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0PLUS-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 | FileCheck %s --check-prefix=CORTEX-M1
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M1-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 | FileCheck %s --check-prefix=SC000
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC000-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 | FileCheck %s --check-prefix=CORTEX-M3
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M3-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 | FileCheck %s --check-prefix=SC300
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC300-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-M4-SOFT
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-SOFT-FAST
; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-M4-HARD
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-HARD-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SOFT
+; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-NOFPU-FAST
; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=+fp-only-sp | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SINGLE
-; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-DOUBLE
+; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=+fp-only-sp -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-FAST
+; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 | FileCheck %s --check-prefix=CORTEX-M7-DOUBLE
+; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 | FileCheck %s --check-prefix=CORTEX-R5
+; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R5-FAST
+; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
+; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 | FileCheck %s --check-prefix=CORTEX-R7
+; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R7-FAST
+; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 | FileCheck %s --check-prefix=CORTEX-A53
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A53-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 | FileCheck %s --check-prefix=CORTEX-A57
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A57-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 | FileCheck %s --check-prefix=CORTEX-A72
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A72-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 | FileCheck %s --check-prefix=CORTEX-A7-CHECK
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-CHECK-FAST
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2,-vfp3,-vfp4,-neon | FileCheck %s --check-prefix=CORTEX-A7-NOFPU
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2,-vfp3,-vfp4,-neon -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-NOFPU-FAST
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-FPUV4-FAST
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,,+d16,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4
; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -relocation-model=pic | FileCheck %s --check-prefix=RELOC-PIC
; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -relocation-model=static | FileCheck %s --check-prefix=RELOC-OTHER
@@ -49,6 +116,9 @@
; RUN: llc < %s -mtriple=armv8-none-linux-gnueabi -mcpu=cortex-a57 -arm-no-strict-align | FileCheck %s --check-prefix=NO-STRICT-ALIGN
; RUN: llc < %s -mtriple=armv8-none-linux-gnueabi -mcpu=cortex-a57 -arm-strict-align | FileCheck %s --check-prefix=STRICT-ALIGN
; RUN: llc < %s -mtriple=armv8-none-linux-gnueabi -mcpu=cortex-a57 | FileCheck %s --check-prefix=NO-STRICT-ALIGN
+; RUN: llc < %s -mtriple=armv8-none-linux-gnueabi -mcpu=cortex-a72 -arm-no-strict-align | FileCheck %s --check-prefix=NO-STRICT-ALIGN
+; RUN: llc < %s -mtriple=armv8-none-linux-gnueabi -mcpu=cortex-a72 -arm-strict-align | FileCheck %s --check-prefix=STRICT-ALIGN
+; RUN: llc < %s -mtriple=armv8-none-linux-gnueabi -mcpu=cortex-a72 | FileCheck %s --check-prefix=NO-STRICT-ALIGN
; ARMv7a
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -arm-no-strict-align | FileCheck %s --check-prefix=NO-STRICT-ALIGN
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -arm-strict-align | FileCheck %s --check-prefix=STRICT-ALIGN
@@ -82,75 +152,185 @@
; XSCALE: .eabi_attribute 8, 1
; XSCALE: .eabi_attribute 9, 1
+; DYN-ROUNDING: .eabi_attribute 19, 1
+
; V6: .eabi_attribute 6, 6
; V6: .eabi_attribute 8, 1
+;; We assume round-to-nearest by default (matches GCC)
+; V6-NOT: .eabi_attribute 19
+;; The default choice made by llc is for a V6 CPU without an FPU.
+;; This is not an interesting detail, but for such CPUs, the default intention is to use
+;; software floating-point support. The choice is not important for targets without
+;; FPU support!
+; V6: .eabi_attribute 20, 1
+; V6: .eabi_attribute 21, 1
+; V6-NOT: .eabi_attribute 22
+; V6: .eabi_attribute 23, 3
; V6: .eabi_attribute 24, 1
; V6: .eabi_attribute 25, 1
; V6-NOT: .eabi_attribute 27
; V6-NOT: .eabi_attribute 28
; V6-NOT: .eabi_attribute 36
+; V6: .eabi_attribute 38, 1
; V6-NOT: .eabi_attribute 42
+; V6-NOT: .eabi_attribute 44
; V6-NOT: .eabi_attribute 68
+; V6-FAST-NOT: .eabi_attribute 19
+;; Despite the V6 CPU having no FPU by default, we chose to flush to
+;; positive zero here. There's no hardware support doing this, but the
+;; fast maths software library might.
+; V6-FAST-NOT: .eabi_attribute 20
+; V6-FAST-NOT: .eabi_attribute 21
+; V6-FAST-NOT: .eabi_attribute 22
+; V6-FAST: .eabi_attribute 23, 1
+
+;; We emit 6, 12 for both v6-M and v6S-M, technically this is incorrect for
+;; V6-M, however we don't model the OS extension so this is fine.
; V6M: .eabi_attribute 6, 12
; V6M-NOT: .eabi_attribute 7
; V6M: .eabi_attribute 8, 0
; V6M: .eabi_attribute 9, 1
+; V6M-NOT: .eabi_attribute 19
+;; The default choice made by llc is for a V6M CPU without an FPU.
+;; This is not an interesting detail, but for such CPUs, the default intention is to use
+;; software floating-point support. The choice is not important for targets without
+;; FPU support!
+; V6M: .eabi_attribute 20, 1
+; V6M: .eabi_attribute 21, 1
+; V6M-NOT: .eabi_attribute 22
+; V6M: .eabi_attribute 23, 3
; V6M: .eabi_attribute 24, 1
; V6M: .eabi_attribute 25, 1
; V6M-NOT: .eabi_attribute 27
; V6M-NOT: .eabi_attribute 28
; V6M-NOT: .eabi_attribute 36
+; V6M: .eabi_attribute 38, 1
; V6M-NOT: .eabi_attribute 42
+; V6M-NOT: .eabi_attribute 44
; V6M-NOT: .eabi_attribute 68
+; V6M-FAST-NOT: .eabi_attribute 19
+;; Despite the V6M CPU having no FPU by default, we chose to flush to
+;; positive zero here. There's no hardware support doing this, but the
+;; fast maths software library might.
+; V6M-FAST-NOT: .eabi_attribute 20
+; V6M-FAST-NOT: .eabi_attribute 21
+; V6M-FAST-NOT: .eabi_attribute 22
+; V6M-FAST: .eabi_attribute 23, 1
+
; ARM1156T2F-S: .cpu arm1156t2f-s
; ARM1156T2F-S: .eabi_attribute 6, 8
; ARM1156T2F-S: .eabi_attribute 8, 1
; ARM1156T2F-S: .eabi_attribute 9, 2
; ARM1156T2F-S: .fpu vfpv2
+; ARM1156T2F-S-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; ARM1156T2F-S: .eabi_attribute 20, 1
; ARM1156T2F-S: .eabi_attribute 21, 1
+; ARM1156T2F-S-NOT: .eabi_attribute 22
; ARM1156T2F-S: .eabi_attribute 23, 3
; ARM1156T2F-S: .eabi_attribute 24, 1
; ARM1156T2F-S: .eabi_attribute 25, 1
; ARM1156T2F-S-NOT: .eabi_attribute 27
; ARM1156T2F-S-NOT: .eabi_attribute 28
; ARM1156T2F-S-NOT: .eabi_attribute 36
+; ARM1156T2F-S: .eabi_attribute 38, 1
; ARM1156T2F-S-NOT: .eabi_attribute 42
+; ARM1156T2F-S-NOT: .eabi_attribute 44
; ARM1156T2F-S-NOT: .eabi_attribute 68
+; ARM1156T2F-S-FAST-NOT: .eabi_attribute 19
+;; V6 cores default to flush to positive zero (value 0). Note that value 2 is also equally
+;; valid for this core, it's an implementation defined question as to which of 0 and 2 you
+;; select. LLVM historically picks 0.
+; ARM1156T2F-S-FAST-NOT: .eabi_attribute 20
+; ARM1156T2F-S-FAST-NOT: .eabi_attribute 21
+; ARM1156T2F-S-FAST-NOT: .eabi_attribute 22
+; ARM1156T2F-S-FAST: .eabi_attribute 23, 1
+
; V7M: .eabi_attribute 6, 10
; V7M: .eabi_attribute 7, 77
; V7M: .eabi_attribute 8, 0
; V7M: .eabi_attribute 9, 2
+; V7M-NOT: .eabi_attribute 19
+;; The default choice made by llc is for a V7M CPU without an FPU.
+;; This is not an interesting detail, but for such CPUs, the default intention is to use
+;; software floating-point support. The choice is not important for targets without
+;; FPU support!
+; V7M: .eabi_attribute 20, 1
+; V7M: .eabi_attribute 21, 1
+; V7M-NOT: .eabi_attribute 22
+; V7M: .eabi_attribute 23, 3
; V7M: .eabi_attribute 24, 1
; V7M: .eabi_attribute 25, 1
; V7M-NOT: .eabi_attribute 27
; V7M-NOT: .eabi_attribute 28
; V7M-NOT: .eabi_attribute 36
+; V7M: .eabi_attribute 38, 1
; V7M-NOT: .eabi_attribute 42
; V7M-NOT: .eabi_attribute 44
; V7M-NOT: .eabi_attribute 68
+; V7M-FAST-NOT: .eabi_attribute 19
+;; Despite the V7M CPU having no FPU by default, we chose to flush
+;; preserving sign. This matches what the hardware would do in the
+;; architecture revision were to exist on the current target.
+; V7M-FAST: .eabi_attribute 20, 2
+; V7M-FAST-NOT: .eabi_attribute 21
+; V7M-FAST-NOT: .eabi_attribute 22
+; V7M-FAST: .eabi_attribute 23, 1
+
; V7: .syntax unified
; V7: .eabi_attribute 6, 10
+; V7-NOT: .eabi_attribute 19
+;; In safe-maths mode we default to an IEEE 754 compliant choice.
; V7: .eabi_attribute 20, 1
; V7: .eabi_attribute 21, 1
+; V7-NOT: .eabi_attribute 22
; V7: .eabi_attribute 23, 3
; V7: .eabi_attribute 24, 1
; V7: .eabi_attribute 25, 1
; V7-NOT: .eabi_attribute 27
; V7-NOT: .eabi_attribute 28
; V7-NOT: .eabi_attribute 36
+; V7: .eabi_attribute 38, 1
; V7-NOT: .eabi_attribute 42
+; V7-NOT: .eabi_attribute 44
; V7-NOT: .eabi_attribute 68
+; V7-FAST-NOT: .eabi_attribute 19
+;; The default CPU does have an FPU and it must be VFPv3 or better, so it flushes
+;; denormals to zero preserving the sign.
+; V7-FAST: .eabi_attribute 20, 2
+; V7-FAST-NOT: .eabi_attribute 21
+; V7-FAST-NOT: .eabi_attribute 22
+; V7-FAST: .eabi_attribute 23, 1
+
; V8: .syntax unified
+; V8: .eabi_attribute 67, "2.09"
; V8: .eabi_attribute 6, 14
+; V8-NOT: .eabi_attribute 19
+; V8: .eabi_attribute 20, 1
+; V8: .eabi_attribute 21, 1
+; V8-NOT: .eabi_attribute 22
+; V8: .eabi_attribute 23, 3
+; V8-NOT: .eabi_attribute 44
+
+; V8-FAST-NOT: .eabi_attribute 19
+;; The default does have an FPU, and for V8-A, it flushes preserving sign.
+; V8-FAST: .eabi_attribute 20, 2
+; V8-FAST-NOT: .eabi_attribute 21
+; V8-FAST-NOT: .eabi_attribute 22
+; V8-FAST: .eabi_attribute 23, 1
; Vt8: .syntax unified
; Vt8: .eabi_attribute 6, 14
+; Vt8-NOT: .eabi_attribute 19
+; Vt8: .eabi_attribute 20, 1
+; Vt8: .eabi_attribute 21, 1
+; Vt8-NOT: .eabi_attribute 22
+; Vt8: .eabi_attribute 23, 3
; V8-FPARMv8: .syntax unified
; V8-FPARMv8: .eabi_attribute 6, 14
@@ -175,74 +355,95 @@
; NO-STRICT-ALIGN: .eabi_attribute 34, 1
; STRICT-ALIGN: .eabi_attribute 34, 0
-; Tag_CPU_arch 'ARMv7'
-; CORTEX-A7-CHECK: .eabi_attribute 6, 10
-; CORTEX-A7-NOFPU: .eabi_attribute 6, 10
-; CORTEX-A7-FPUV4: .eabi_attribute 6, 10
+; Tag_CPU_arch 'ARMv7'
+; CORTEX-A7-CHECK: .eabi_attribute 6, 10
+; CORTEX-A7-NOFPU: .eabi_attribute 6, 10
+
+; CORTEX-A7-FPUV4: .eabi_attribute 6, 10
; Tag_CPU_arch_profile 'A'
-; CORTEX-A7-CHECK: .eabi_attribute 7, 65
-; CORTEX-A7-NOFPU: .eabi_attribute 7, 65
-; CORTEX-A7-FPUV4: .eabi_attribute 7, 65
+; CORTEX-A7-CHECK: .eabi_attribute 7, 65
+; CORTEX-A7-NOFPU: .eabi_attribute 7, 65
+; CORTEX-A7-FPUV4: .eabi_attribute 7, 65
; Tag_ARM_ISA_use
-; CORTEX-A7-CHECK: .eabi_attribute 8, 1
-; CORTEX-A7-NOFPU: .eabi_attribute 8, 1
-; CORTEX-A7-FPUV4: .eabi_attribute 8, 1
+; CORTEX-A7-CHECK: .eabi_attribute 8, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 8, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 8, 1
; Tag_THUMB_ISA_use
-; CORTEX-A7-CHECK: .eabi_attribute 9, 2
-; CORTEX-A7-NOFPU: .eabi_attribute 9, 2
-; CORTEX-A7-FPUV4: .eabi_attribute 9, 2
+; CORTEX-A7-CHECK: .eabi_attribute 9, 2
+; CORTEX-A7-NOFPU: .eabi_attribute 9, 2
+; CORTEX-A7-FPUV4: .eabi_attribute 9, 2
-; CORTEX-A7-CHECK: .fpu neon-vfpv4
+; CORTEX-A7-CHECK: .fpu neon-vfpv4
; CORTEX-A7-NOFPU-NOT: .fpu
-; CORTEX-A7-FPUV4: .fpu vfpv4
+; CORTEX-A7-FPUV4: .fpu vfpv4
+; CORTEX-A7-CHECK-NOT: .eabi_attribute 19
; Tag_ABI_FP_denormal
-; CORTEX-A7-CHECK: .eabi_attribute 20, 1
-; CORTEX-A7-NOFPU: .eabi_attribute 20, 1
-; CORTEX-A7-FPUV4: .eabi_attribute 20, 1
+;; We default to IEEE 754 compliance
+; CORTEX-A7-CHECK: .eabi_attribute 20, 1
+;; The A7 has VFPv3 support by default, so flush preserving sign.
+; CORTEX-A7-CHECK-FAST: .eabi_attribute 20, 2
+; CORTEX-A7-NOFPU: .eabi_attribute 20, 1
+;; Despite there being no FPU, we chose to flush to zero preserving
+;; sign. This matches what the hardware would do for this architecture
+;; revision.
+; CORTEX-A7-NOFPU-FAST: .eabi_attribute 20, 2
+; CORTEX-A7-FPUV4: .eabi_attribute 20, 1
+;; The VFPv4 FPU flushes preserving sign.
+; CORTEX-A7-FPUV4-FAST: .eabi_attribute 20, 2
; Tag_ABI_FP_exceptions
-; CORTEX-A7-CHECK: .eabi_attribute 21, 1
-; CORTEX-A7-NOFPU: .eabi_attribute 21, 1
-; CORTEX-A7-FPUV4: .eabi_attribute 21, 1
+; CORTEX-A7-CHECK: .eabi_attribute 21, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 21, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 21, 1
+
+; Tag_ABI_FP_user_exceptions
+; CORTEX-A7-CHECK-NOT: .eabi_attribute 22
+; CORTEX-A7-NOFPU-NOT: .eabi_attribute 22
+; CORTEX-A7-FPUV4-NOT: .eabi_attribute 22
; Tag_ABI_FP_number_model
-; CORTEX-A7-CHECK: .eabi_attribute 23, 3
-; CORTEX-A7-NOFPU: .eabi_attribute 23, 3
-; CORTEX-A7-FPUV4: .eabi_attribute 23, 3
+; CORTEX-A7-CHECK: .eabi_attribute 23, 3
+; CORTEX-A7-NOFPU: .eabi_attribute 23, 3
+; CORTEX-A7-FPUV4: .eabi_attribute 23, 3
; Tag_ABI_align_needed
-; CORTEX-A7-CHECK: .eabi_attribute 24, 1
-; CORTEX-A7-NOFPU: .eabi_attribute 24, 1
-; CORTEX-A7-FPUV4: .eabi_attribute 24, 1
+; CORTEX-A7-CHECK: .eabi_attribute 24, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 24, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 24, 1
; Tag_ABI_align_preserved
-; CORTEX-A7-CHECK: .eabi_attribute 25, 1
-; CORTEX-A7-NOFPU: .eabi_attribute 25, 1
-; CORTEX-A7-FPUV4: .eabi_attribute 25, 1
+; CORTEX-A7-CHECK: .eabi_attribute 25, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 25, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 25, 1
; Tag_FP_HP_extension
-; CORTEX-A7-CHECK: .eabi_attribute 36, 1
-; CORTEX-A7-NOFPU: .eabi_attribute 36, 1
-; CORTEX-A7-FPUV4: .eabi_attribute 36, 1
+; CORTEX-A7-CHECK: .eabi_attribute 36, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 36, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 36, 1
+
+; Tag_FP_16bit_format
+; CORTEX-A7-CHECK: .eabi_attribute 38, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 38, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 38, 1
; Tag_MPextension_use
-; CORTEX-A7-CHECK: .eabi_attribute 42, 1
-; CORTEX-A7-NOFPU: .eabi_attribute 42, 1
-; CORTEX-A7-FPUV4: .eabi_attribute 42, 1
+; CORTEX-A7-CHECK: .eabi_attribute 42, 1
+; CORTEX-A7-NOFPU: .eabi_attribute 42, 1
+; CORTEX-A7-FPUV4: .eabi_attribute 42, 1
; Tag_DIV_use
-; CORTEX-A7-CHECK: .eabi_attribute 44, 2
-; CORTEX-A7-NOFPU: .eabi_attribute 44, 2
-; CORTEX-A7-FPUV4: .eabi_attribute 44, 2
+; CORTEX-A7-CHECK: .eabi_attribute 44, 2
+; CORTEX-A7-NOFPU: .eabi_attribute 44, 2
+; CORTEX-A7-FPUV4: .eabi_attribute 44, 2
; Tag_Virtualization_use
-; CORTEX-A7-CHECK: .eabi_attribute 68, 3
-; CORTEX-A7-NOFPU: .eabi_attribute 68, 3
-; CORTEX-A7-FPUV4: .eabi_attribute 68, 3
+; CORTEX-A7-CHECK: .eabi_attribute 68, 3
+; CORTEX-A7-NOFPU: .eabi_attribute 68, 3
+; CORTEX-A7-FPUV4: .eabi_attribute 68, 3
; CORTEX-A5-DEFAULT: .cpu cortex-a5
; CORTEX-A5-DEFAULT: .eabi_attribute 6, 10
@@ -250,84 +451,146 @@
; CORTEX-A5-DEFAULT: .eabi_attribute 8, 1
; CORTEX-A5-DEFAULT: .eabi_attribute 9, 2
; CORTEX-A5-DEFAULT: .fpu neon-vfpv4
+; CORTEX-A5-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-A5-DEFAULT: .eabi_attribute 20, 1
; CORTEX-A5-DEFAULT: .eabi_attribute 21, 1
+; CORTEX-A5-DEFAULT-NOT: .eabi_attribute 22
; CORTEX-A5-DEFAULT: .eabi_attribute 23, 3
; CORTEX-A5-DEFAULT: .eabi_attribute 24, 1
; CORTEX-A5-DEFAULT: .eabi_attribute 25, 1
; CORTEX-A5-DEFAULT: .eabi_attribute 42, 1
+; CORTEX-A5-DEFAULT-NOT: .eabi_attribute 44
; CORTEX-A5-DEFAULT: .eabi_attribute 68, 1
+; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 19
+;; The A5 defaults to a VFPv4 FPU, so it flushed preserving sign when -ffast-math
+;; is given.
+; CORTEX-A5-DEFAULT-FAST: .eabi_attribute 20, 2
+; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 21
+; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 22
+; CORTEX-A5-DEFAULT-FAST: .eabi_attribute 23, 1
+
; CORTEX-A5-NONEON: .cpu cortex-a5
; CORTEX-A5-NONEON: .eabi_attribute 6, 10
; CORTEX-A5-NONEON: .eabi_attribute 7, 65
; CORTEX-A5-NONEON: .eabi_attribute 8, 1
; CORTEX-A5-NONEON: .eabi_attribute 9, 2
; CORTEX-A5-NONEON: .fpu vfpv4-d16
+;; We default to IEEE 754 compliance
; CORTEX-A5-NONEON: .eabi_attribute 20, 1
; CORTEX-A5-NONEON: .eabi_attribute 21, 1
+; CORTEX-A5-NONEON-NOT: .eabi_attribute 22
; CORTEX-A5-NONEON: .eabi_attribute 23, 3
; CORTEX-A5-NONEON: .eabi_attribute 24, 1
; CORTEX-A5-NONEON: .eabi_attribute 25, 1
; CORTEX-A5-NONEON: .eabi_attribute 42, 1
; CORTEX-A5-NONEON: .eabi_attribute 68, 1
+; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 19
+;; The A5 defaults to a VFPv4 FPU, so it flushed preserving sign when -ffast-math
+;; is given.
+; CORTEX-A5-NONEON-FAST: .eabi_attribute 20, 2
+; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 21
+; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 22
+; CORTEX-A5-NONEON-FAST: .eabi_attribute 23, 1
+
; CORTEX-A5-NOFPU: .cpu cortex-a5
; CORTEX-A5-NOFPU: .eabi_attribute 6, 10
; CORTEX-A5-NOFPU: .eabi_attribute 7, 65
; CORTEX-A5-NOFPU: .eabi_attribute 8, 1
; CORTEX-A5-NOFPU: .eabi_attribute 9, 2
; CORTEX-A5-NOFPU-NOT: .fpu
+; CORTEX-A5-NOFPU-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-A5-NOFPU: .eabi_attribute 20, 1
; CORTEX-A5-NOFPU: .eabi_attribute 21, 1
+; CORTEX-A5-NOFPU-NOT: .eabi_attribute 22
; CORTEX-A5-NOFPU: .eabi_attribute 23, 3
; CORTEX-A5-NOFPU: .eabi_attribute 24, 1
; CORTEX-A5-NOFPU: .eabi_attribute 25, 1
; CORTEX-A5-NOFPU: .eabi_attribute 42, 1
; CORTEX-A5-NOFPU: .eabi_attribute 68, 1
+; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 19
+;; Despite there being no FPU, we chose to flush to zero preserving
+;; sign. This matches what the hardware would do for this architecture
+;; revision.
+; CORTEX-A5-NOFPU-FAST: .eabi_attribute 20, 2
+; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 21
+; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 22
+; CORTEX-A5-NOFPU-FAST: .eabi_attribute 23, 1
+
; CORTEX-A9-SOFT: .cpu cortex-a9
; CORTEX-A9-SOFT: .eabi_attribute 6, 10
; CORTEX-A9-SOFT: .eabi_attribute 7, 65
; CORTEX-A9-SOFT: .eabi_attribute 8, 1
; CORTEX-A9-SOFT: .eabi_attribute 9, 2
; CORTEX-A9-SOFT: .fpu neon
+; CORTEX-A9-SOFT-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-A9-SOFT: .eabi_attribute 20, 1
; CORTEX-A9-SOFT: .eabi_attribute 21, 1
+; CORTEX-A9-SOFT-NOT: .eabi_attribute 22
; CORTEX-A9-SOFT: .eabi_attribute 23, 3
; CORTEX-A9-SOFT: .eabi_attribute 24, 1
; CORTEX-A9-SOFT: .eabi_attribute 25, 1
; CORTEX-A9-SOFT-NOT: .eabi_attribute 27
; CORTEX-A9-SOFT-NOT: .eabi_attribute 28
; CORTEX-A9-SOFT: .eabi_attribute 36, 1
+; CORTEX-A9-SOFT: .eabi_attribute 38, 1
; CORTEX-A9-SOFT: .eabi_attribute 42, 1
+; CORTEX-A9-SOFT-NOT: .eabi_attribute 44
; CORTEX-A9-SOFT: .eabi_attribute 68, 1
+; CORTEX-A9-SOFT-FAST-NOT: .eabi_attribute 19
+;; The A9 defaults to a VFPv3 FPU, so it flushes preseving sign when
+;; -ffast-math is specified.
+; CORTEX-A9-SOFT-FAST: .eabi_attribute 20, 2
+; CORTEX-A5-SOFT-FAST-NOT: .eabi_attribute 21
+; CORTEX-A5-SOFT-FAST-NOT: .eabi_attribute 22
+; CORTEX-A5-SOFT-FAST: .eabi_attribute 23, 1
+
; CORTEX-A9-HARD: .cpu cortex-a9
; CORTEX-A9-HARD: .eabi_attribute 6, 10
; CORTEX-A9-HARD: .eabi_attribute 7, 65
; CORTEX-A9-HARD: .eabi_attribute 8, 1
; CORTEX-A9-HARD: .eabi_attribute 9, 2
; CORTEX-A9-HARD: .fpu neon
+; CORTEX-A9-HARD-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-A9-HARD: .eabi_attribute 20, 1
; CORTEX-A9-HARD: .eabi_attribute 21, 1
+; CORTEX-A9-HARD-NOT: .eabi_attribute 22
; CORTEX-A9-HARD: .eabi_attribute 23, 3
; CORTEX-A9-HARD: .eabi_attribute 24, 1
; CORTEX-A9-HARD: .eabi_attribute 25, 1
; CORTEX-A9-HARD-NOT: .eabi_attribute 27
; CORTEX-A9-HARD: .eabi_attribute 28, 1
; CORTEX-A9-HARD: .eabi_attribute 36, 1
+; CORTEX-A9-HARD: .eabi_attribute 38, 1
; CORTEX-A9-HARD: .eabi_attribute 42, 1
; CORTEX-A9-HARD: .eabi_attribute 68, 1
+; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 19
+;; The A9 defaults to a VFPv3 FPU, so it flushes preseving sign when
+;; -ffast-math is specified.
+; CORTEX-A9-HARD-FAST: .eabi_attribute 20, 2
+; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 21
+; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 22
+; CORTEX-A9-HARD-FAST: .eabi_attribute 23, 1
+
; CORTEX-A12-DEFAULT: .cpu cortex-a12
; CORTEX-A12-DEFAULT: .eabi_attribute 6, 10
; CORTEX-A12-DEFAULT: .eabi_attribute 7, 65
; CORTEX-A12-DEFAULT: .eabi_attribute 8, 1
; CORTEX-A12-DEFAULT: .eabi_attribute 9, 2
; CORTEX-A12-DEFAULT: .fpu neon-vfpv4
+; CORTEX-A12-DEFAULT-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-A12-DEFAULT: .eabi_attribute 20, 1
; CORTEX-A12-DEFAULT: .eabi_attribute 21, 1
+; CORTEX-A12-DEFAULT-NOT: .eabi_attribute 22
; CORTEX-A12-DEFAULT: .eabi_attribute 23, 3
; CORTEX-A12-DEFAULT: .eabi_attribute 24, 1
; CORTEX-A12-DEFAULT: .eabi_attribute 25, 1
@@ -335,14 +598,25 @@
; CORTEX-A12-DEFAULT: .eabi_attribute 44, 2
; CORTEX-A12-DEFAULT: .eabi_attribute 68, 3
+; CORTEX-A12-DEFAULT-FAST-NOT: .eabi_attribute 19
+;; The A12 defaults to a VFPv3 FPU, so it flushes preseving sign when
+;; -ffast-math is specified.
+; CORTEX-A12-DEFAULT-FAST: .eabi_attribute 20, 2
+; CORTEX-A12-HARD-FAST-NOT: .eabi_attribute 21
+; CORTEX-A12-HARD-FAST-NOT: .eabi_attribute 22
+; CORTEX-A12-HARD-FAST: .eabi_attribute 23, 1
+
; CORTEX-A12-NOFPU: .cpu cortex-a12
; CORTEX-A12-NOFPU: .eabi_attribute 6, 10
; CORTEX-A12-NOFPU: .eabi_attribute 7, 65
; CORTEX-A12-NOFPU: .eabi_attribute 8, 1
; CORTEX-A12-NOFPU: .eabi_attribute 9, 2
; CORTEX-A12-NOFPU-NOT: .fpu
+; CORTEX-A12-NOFPU-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-A12-NOFPU: .eabi_attribute 20, 1
; CORTEX-A12-NOFPU: .eabi_attribute 21, 1
+; CORTEX-A12-NOFPU-NOT: .eabi_attribute 22
; CORTEX-A12-NOFPU: .eabi_attribute 23, 3
; CORTEX-A12-NOFPU: .eabi_attribute 24, 1
; CORTEX-A12-NOFPU: .eabi_attribute 25, 1
@@ -350,32 +624,56 @@
; CORTEX-A12-NOFPU: .eabi_attribute 44, 2
; CORTEX-A12-NOFPU: .eabi_attribute 68, 3
+; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 19
+;; Despite there being no FPU, we chose to flush to zero preserving
+;; sign. This matches what the hardware would do for this architecture
+;; revision.
+; CORTEX-A12-NOFPU-FAST: .eabi_attribute 20, 2
+; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 21
+; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 22
+; CORTEX-A12-NOFPU-FAST: .eabi_attribute 23, 1
+
; CORTEX-A15: .cpu cortex-a15
; CORTEX-A15: .eabi_attribute 6, 10
; CORTEX-A15: .eabi_attribute 7, 65
; CORTEX-A15: .eabi_attribute 8, 1
; CORTEX-A15: .eabi_attribute 9, 2
; CORTEX-A15: .fpu neon-vfpv4
+; CORTEX-A15-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-A15: .eabi_attribute 20, 1
; CORTEX-A15: .eabi_attribute 21, 1
+; CORTEX-A15-NOT: .eabi_attribute 22
; CORTEX-A15: .eabi_attribute 23, 3
; CORTEX-A15: .eabi_attribute 24, 1
; CORTEX-A15: .eabi_attribute 25, 1
; CORTEX-A15-NOT: .eabi_attribute 27
; CORTEX-A15-NOT: .eabi_attribute 28
; CORTEX-A15: .eabi_attribute 36, 1
+; CORTEX-A15: .eabi_attribute 38, 1
; CORTEX-A15: .eabi_attribute 42, 1
; CORTEX-A15: .eabi_attribute 44, 2
; CORTEX-A15: .eabi_attribute 68, 3
+; CORTEX-A15-FAST-NOT: .eabi_attribute 19
+;; The A15 defaults to a VFPv3 FPU, so it flushes preseving sign when
+;; -ffast-math is specified.
+; CORTEX-A15-FAST: .eabi_attribute 20, 2
+; CORTEX-A15-FAST-NOT: .eabi_attribute 21
+; CORTEX-A15-FAST-NOT: .eabi_attribute 22
+; CORTEX-A15-FAST: .eabi_attribute 23, 1
+
; CORTEX-A17-DEFAULT: .cpu cortex-a17
; CORTEX-A17-DEFAULT: .eabi_attribute 6, 10
; CORTEX-A17-DEFAULT: .eabi_attribute 7, 65
; CORTEX-A17-DEFAULT: .eabi_attribute 8, 1
; CORTEX-A17-DEFAULT: .eabi_attribute 9, 2
; CORTEX-A17-DEFAULT: .fpu neon-vfpv4
+; CORTEX-A17-DEFAULT-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-A17-DEFAULT: .eabi_attribute 20, 1
; CORTEX-A17-DEFAULT: .eabi_attribute 21, 1
+; CORTEX-A17-DEFAULT-NOT: .eabi_attribute 22
; CORTEX-A17-DEFAULT: .eabi_attribute 23, 3
; CORTEX-A17-DEFAULT: .eabi_attribute 24, 1
; CORTEX-A17-DEFAULT: .eabi_attribute 25, 1
@@ -383,14 +681,25 @@
; CORTEX-A17-DEFAULT: .eabi_attribute 44, 2
; CORTEX-A17-DEFAULT: .eabi_attribute 68, 3
+; CORTEX-A17-FAST-NOT: .eabi_attribute 19
+;; The A17 defaults to a VFPv3 FPU, so it flushes preseving sign when
+;; -ffast-math is specified.
+; CORTEX-A17-FAST: .eabi_attribute 20, 2
+; CORTEX-A17-FAST-NOT: .eabi_attribute 21
+; CORTEX-A17-FAST-NOT: .eabi_attribute 22
+; CORTEX-A17-FAST: .eabi_attribute 23, 1
+
; CORTEX-A17-NOFPU: .cpu cortex-a17
; CORTEX-A17-NOFPU: .eabi_attribute 6, 10
; CORTEX-A17-NOFPU: .eabi_attribute 7, 65
; CORTEX-A17-NOFPU: .eabi_attribute 8, 1
; CORTEX-A17-NOFPU: .eabi_attribute 9, 2
; CORTEX-A17-NOFPU-NOT: .fpu
+; CORTEX-A17-NOFPU-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-A17-NOFPU: .eabi_attribute 20, 1
; CORTEX-A17-NOFPU: .eabi_attribute 21, 1
+; CORTEX-A17-NOFPU-NOT: .eabi_attribute 22
; CORTEX-A17-NOFPU: .eabi_attribute 23, 3
; CORTEX-A17-NOFPU: .eabi_attribute 24, 1
; CORTEX-A17-NOFPU: .eabi_attribute 25, 1
@@ -398,72 +707,263 @@
; CORTEX-A17-NOFPU: .eabi_attribute 44, 2
; CORTEX-A17-NOFPU: .eabi_attribute 68, 3
+; CORTEX-A17-NOFPU-NOT: .eabi_attribute 19
+;; Despite there being no FPU, we chose to flush to zero preserving
+;; sign. This matches what the hardware would do for this architecture
+;; revision.
+; CORTEX-A17-NOFPU-FAST: .eabi_attribute 20, 2
+; CORTEX-A17-NOFPU-FAST-NOT: .eabi_attribute 21
+; CORTEX-A17-NOFPU-FAST-NOT: .eabi_attribute 22
+; CORTEX-A17-NOFPU-FAST: .eabi_attribute 23, 1
+
; CORTEX-M0: .cpu cortex-m0
; CORTEX-M0: .eabi_attribute 6, 12
; CORTEX-M0-NOT: .eabi_attribute 7
; CORTEX-M0: .eabi_attribute 8, 0
; CORTEX-M0: .eabi_attribute 9, 1
+; CORTEX-M0-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
+; CORTEX-M0: .eabi_attribute 20, 1
+; CORTEX-M0: .eabi_attribute 21, 1
+; CORTEX-M0-NOT: .eabi_attribute 22
+; CORTEX-M0: .eabi_attribute 23, 3
; CORTEX-M0: .eabi_attribute 24, 1
; CORTEX-M0: .eabi_attribute 25, 1
; CORTEX-M0-NOT: .eabi_attribute 27
; CORTEX-M0-NOT: .eabi_attribute 28
; CORTEX-M0-NOT: .eabi_attribute 36
+; CORTEX-M0: .eabi_attribute 38, 1
; CORTEX-M0-NOT: .eabi_attribute 42
+; CORTEX-M0-NOT: .eabi_attribute 44
; CORTEX-M0-NOT: .eabi_attribute 68
+; CORTEX-M0-FAST-NOT: .eabi_attribute 19
+;; Despite the M0 CPU having no FPU in this scenario, we chose to
+;; flush to positive zero here. There's no hardware support doing
+;; this, but the fast maths software library might and such behaviour
+;; would match hardware support on this architecture revision if it
+;; existed.
+; CORTEX-M0-FAST-NOT: .eabi_attribute 20
+; CORTEX-M0-FAST-NOT: .eabi_attribute 21
+; CORTEX-M0-FAST-NOT: .eabi_attribute 22
+; CORTEX-M0-FAST: .eabi_attribute 23, 1
+
+; CORTEX-M0PLUS: .cpu cortex-m0plus
+; CORTEX-M0PLUS: .eabi_attribute 6, 12
+; CORTEX-M0PLUS-NOT: .eabi_attribute 7
+; CORTEX-M0PLUS: .eabi_attribute 8, 0
+; CORTEX-M0PLUS: .eabi_attribute 9, 1
+; CORTEX-M0PLUS-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
+; CORTEX-M0PLUS: .eabi_attribute 20, 1
+; CORTEX-M0PLUS: .eabi_attribute 21, 1
+; CORTEX-M0PLUS-NOT: .eabi_attribute 22
+; CORTEX-M0PLUS: .eabi_attribute 23, 3
+; CORTEX-M0PLUS: .eabi_attribute 24, 1
+; CORTEX-M0PLUS: .eabi_attribute 25, 1
+; CORTEX-M0PLUS-NOT: .eabi_attribute 27
+; CORTEX-M0PLUS-NOT: .eabi_attribute 28
+; CORTEX-M0PLUS-NOT: .eabi_attribute 36
+; CORTEX-M0PLUS: .eabi_attribute 38, 1
+; CORTEX-M0PLUS-NOT: .eabi_attribute 42
+; CORTEX-M0PLUS-NOT: .eabi_attribute 44
+; CORTEX-M0PLUS-NOT: .eabi_attribute 68
+
+; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 19
+;; Despite the M0+ CPU having no FPU in this scenario, we chose to
+;; flush to positive zero here. There's no hardware support doing
+;; this, but the fast maths software library might and such behaviour
+;; would match hardware support on this architecture revision if it
+;; existed.
+; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 20
+; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 21
+; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 22
+; CORTEX-M0PLUS-FAST: .eabi_attribute 23, 1
+
+; CORTEX-M1: .cpu cortex-m1
+; CORTEX-M1: .eabi_attribute 6, 12
+; CORTEX-M1-NOT: .eabi_attribute 7
+; CORTEX-M1: .eabi_attribute 8, 0
+; CORTEX-M1: .eabi_attribute 9, 1
+; CORTEX-M1-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
+; CORTEX-M1: .eabi_attribute 20, 1
+; CORTEX-M1: .eabi_attribute 21, 1
+; CORTEX-M1-NOT: .eabi_attribute 22
+; CORTEX-M1: .eabi_attribute 23, 3
+; CORTEX-M1: .eabi_attribute 24, 1
+; CORTEX-M1: .eabi_attribute 25, 1
+; CORTEX-M1-NOT: .eabi_attribute 27
+; CORTEX-M1-NOT: .eabi_attribute 28
+; CORTEX-M1-NOT: .eabi_attribute 36
+; CORTEX-M1: .eabi_attribute 38, 1
+; CORTEX-M1-NOT: .eabi_attribute 42
+; CORTEX-M1-NOT: .eabi_attribute 44
+; CORTEX-M1-NOT: .eabi_attribute 68
+
+; CORTEX-M1-FAST-NOT: .eabi_attribute 19
+;; Despite the M1 CPU having no FPU in this scenario, we chose to
+;; flush to positive zero here. There's no hardware support doing
+;; this, but the fast maths software library might and such behaviour
+;; would match hardware support on this architecture revision if it
+;; existed.
+; CORTEX-M1-FAST-NOT: .eabi_attribute 20
+; CORTEX-M1-FAST-NOT: .eabi_attribute 21
+; CORTEX-M1-FAST-NOT: .eabi_attribute 22
+; CORTEX-M1-FAST: .eabi_attribute 23, 1
+
+; SC000: .cpu sc000
+; SC000: .eabi_attribute 6, 12
+; SC000-NOT: .eabi_attribute 7
+; SC000: .eabi_attribute 8, 0
+; SC000: .eabi_attribute 9, 1
+; SC000-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
+; SC000: .eabi_attribute 20, 1
+; SC000: .eabi_attribute 21, 1
+; SC000-NOT: .eabi_attribute 22
+; SC000: .eabi_attribute 23, 3
+; SC000: .eabi_attribute 24, 1
+; SC000: .eabi_attribute 25, 1
+; SC000-NOT: .eabi_attribute 27
+; SC000-NOT: .eabi_attribute 28
+; SC000-NOT: .eabi_attribute 36
+; SC000: .eabi_attribute 38, 1
+; SC000-NOT: .eabi_attribute 42
+; SC000-NOT: .eabi_attribute 44
+; SC000-NOT: .eabi_attribute 68
+
+; SC000-FAST-NOT: .eabi_attribute 19
+;; Despite the SC000 CPU having no FPU in this scenario, we chose to
+;; flush to positive zero here. There's no hardware support doing
+;; this, but the fast maths software library might and such behaviour
+;; would match hardware support on this architecture revision if it
+;; existed.
+; SC000-FAST-NOT: .eabi_attribute 20
+; SC000-FAST-NOT: .eabi_attribute 21
+; SC000-FAST-NOT: .eabi_attribute 22
+; SC000-FAST: .eabi_attribute 23, 1
+
; CORTEX-M3: .cpu cortex-m3
; CORTEX-M3: .eabi_attribute 6, 10
; CORTEX-M3: .eabi_attribute 7, 77
; CORTEX-M3: .eabi_attribute 8, 0
; CORTEX-M3: .eabi_attribute 9, 2
+; CORTEX-M3-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-M3: .eabi_attribute 20, 1
; CORTEX-M3: .eabi_attribute 21, 1
+; CORTEX-M3-NOT: .eabi_attribute 22
; CORTEX-M3: .eabi_attribute 23, 3
; CORTEX-M3: .eabi_attribute 24, 1
; CORTEX-M3: .eabi_attribute 25, 1
; CORTEX-M3-NOT: .eabi_attribute 27
; CORTEX-M3-NOT: .eabi_attribute 28
; CORTEX-M3-NOT: .eabi_attribute 36
+; CORTEX-M3: .eabi_attribute 38, 1
; CORTEX-M3-NOT: .eabi_attribute 42
; CORTEX-M3-NOT: .eabi_attribute 44
; CORTEX-M3-NOT: .eabi_attribute 68
+; CORTEX-M3-FAST-NOT: .eabi_attribute 19
+;; Despite there being no FPU, we chose to flush to zero preserving
+;; sign. This matches what the hardware would do for this architecture
+;; revision.
+; CORTEX-M3-FAST: .eabi_attribute 20, 2
+; CORTEX-M3-FAST-NOT: .eabi_attribute 21
+; CORTEX-M3-FAST-NOT: .eabi_attribute 22
+; CORTEX-M3-FAST: .eabi_attribute 23, 1
+
+; SC300: .cpu sc300
+; SC300: .eabi_attribute 6, 10
+; SC300: .eabi_attribute 7, 77
+; SC300: .eabi_attribute 8, 0
+; SC300: .eabi_attribute 9, 2
+; SC300-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
+; SC300: .eabi_attribute 20, 1
+; SC300: .eabi_attribute 21, 1
+; SC300-NOT: .eabi_attribute 22
+; SC300: .eabi_attribute 23, 3
+; SC300: .eabi_attribute 24, 1
+; SC300: .eabi_attribute 25, 1
+; SC300-NOT: .eabi_attribute 27
+; SC300-NOT: .eabi_attribute 28
+; SC300-NOT: .eabi_attribute 36
+; SC300: .eabi_attribute 38, 1
+; SC300-NOT: .eabi_attribute 42
+; SC300-NOT: .eabi_attribute 44
+; SC300-NOT: .eabi_attribute 68
+
+; SC300-FAST-NOT: .eabi_attribute 19
+;; Despite there being no FPU, we chose to flush to zero preserving
+;; sign. This matches what the hardware would do for this architecture
+;; revision.
+; SC300-FAST: .eabi_attribute 20, 2
+; SC300-FAST-NOT: .eabi_attribute 21
+; SC300-FAST-NOT: .eabi_attribute 22
+; SC300-FAST: .eabi_attribute 23, 1
+
; CORTEX-M4-SOFT: .cpu cortex-m4
; CORTEX-M4-SOFT: .eabi_attribute 6, 13
; CORTEX-M4-SOFT: .eabi_attribute 7, 77
; CORTEX-M4-SOFT: .eabi_attribute 8, 0
; CORTEX-M4-SOFT: .eabi_attribute 9, 2
; CORTEX-M4-SOFT: .fpu vfpv4-d16
+; CORTEX-M4-SOFT-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-M4-SOFT: .eabi_attribute 20, 1
; CORTEX-M4-SOFT: .eabi_attribute 21, 1
+; CORTEX-M4-SOFT-NOT: .eabi_attribute 22
; CORTEX-M4-SOFT: .eabi_attribute 23, 3
; CORTEX-M4-SOFT: .eabi_attribute 24, 1
; CORTEX-M4-SOFT: .eabi_attribute 25, 1
; CORTEX-M4-SOFT: .eabi_attribute 27, 1
; CORTEX-M4-SOFT-NOT: .eabi_attribute 28
; CORTEX-M4-SOFT: .eabi_attribute 36, 1
+; CORTEX-M4-SOFT: .eabi_attribute 38, 1
; CORTEX-M4-SOFT-NOT: .eabi_attribute 42
; CORTEX-M4-SOFT-NOT: .eabi_attribute 44
; CORTEX-M4-SOFT-NOT: .eabi_attribute 68
+; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 19
+;; The M4 defaults to a VFPv4 FPU, so it flushes preseving sign when
+;; -ffast-math is specified.
+; CORTEX-M4-SOFT-FAST: .eabi_attribute 20, 2
+; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 21
+; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 22
+; CORTEX-M4-SOFT-FAST: .eabi_attribute 23, 1
+
; CORTEX-M4-HARD: .cpu cortex-m4
; CORTEX-M4-HARD: .eabi_attribute 6, 13
; CORTEX-M4-HARD: .eabi_attribute 7, 77
; CORTEX-M4-HARD: .eabi_attribute 8, 0
; CORTEX-M4-HARD: .eabi_attribute 9, 2
; CORTEX-M4-HARD: .fpu vfpv4-d16
+; CORTEX-M4-HARD-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-M4-HARD: .eabi_attribute 20, 1
; CORTEX-M4-HARD: .eabi_attribute 21, 1
+; CORTEX-M4-HARD-NOT: .eabi_attribute 22
; CORTEX-M4-HARD: .eabi_attribute 23, 3
; CORTEX-M4-HARD: .eabi_attribute 24, 1
; CORTEX-M4-HARD: .eabi_attribute 25, 1
; CORTEX-M4-HARD: .eabi_attribute 27, 1
; CORTEX-M4-HARD: .eabi_attribute 28, 1
; CORTEX-M4-HARD: .eabi_attribute 36, 1
+; CORTEX-M4-HARD: .eabi_attribute 38, 1
; CORTEX-M4-HARD-NOT: .eabi_attribute 42
; CORTEX-M4-HARD-NOT: .eabi_attribute 44
; CORTEX-M4-HARD-NOT: .eabi_attribute 68
+; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 19
+;; The M4 defaults to a VFPv4 FPU, so it flushes preseving sign when
+;; -ffast-math is specified.
+; CORTEX-M4-HARD-FAST: .eabi_attribute 20, 2
+; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 21
+; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 22
+; CORTEX-M4-HARD-FAST: .eabi_attribute 23, 1
+
; CORTEX-M7: .cpu cortex-m7
; CORTEX-M7: .eabi_attribute 6, 13
; CORTEX-M7: .eabi_attribute 7, 77
@@ -473,8 +973,11 @@
; CORTEX-M7-SINGLE: .fpu fpv5-d16
; CORTEX-M7-DOUBLE: .fpu fpv5-d16
; CORTEX-M7: .eabi_attribute 17, 1
+; CORTEX-M7-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-M7: .eabi_attribute 20, 1
; CORTEX-M7: .eabi_attribute 21, 1
+; CORTEX-M7-NOT: .eabi_attribute 22
; CORTEX-M7: .eabi_attribute 23, 3
; CORTEX-M7: .eabi_attribute 24, 1
; CORTEX-M7: .eabi_attribute 25, 1
@@ -482,26 +985,79 @@
; CORTEX-M7-SINGLE: .eabi_attribute 27, 1
; CORTEX-M7-DOUBLE-NOT: .eabi_attribute 27
; CORTEX-M7: .eabi_attribute 36, 1
+; CORTEX-M7: .eabi_attribute 38, 1
+; CORTEX-M7-NOT: .eabi_attribute 44
; CORTEX-M7: .eabi_attribute 14, 0
+; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 19
+;; The M7 has the ARMv8 FP unit, which always flushes preserving sign.
+; CORTEX-M7-FAST: .eabi_attribute 20, 2
+;; Despite there being no FPU, we chose to flush to zero preserving
+;; sign. This matches what the hardware would do for this architecture
+;; revision.
+; CORTEX-M7-NOFPU-FAST: .eabi_attribute 20, 2
+; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 21
+; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 22
+; CORTEX-M7-NOFPU-FAST: .eabi_attribute 23, 1
+
; CORTEX-R5: .cpu cortex-r5
; CORTEX-R5: .eabi_attribute 6, 10
; CORTEX-R5: .eabi_attribute 7, 82
; CORTEX-R5: .eabi_attribute 8, 1
; CORTEX-R5: .eabi_attribute 9, 2
; CORTEX-R5: .fpu vfpv3-d16
+; CORTEX-R5-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
; CORTEX-R5: .eabi_attribute 20, 1
; CORTEX-R5: .eabi_attribute 21, 1
+; CORTEX-R5-NOT: .eabi_attribute 22
; CORTEX-R5: .eabi_attribute 23, 3
; CORTEX-R5: .eabi_attribute 24, 1
; CORTEX-R5: .eabi_attribute 25, 1
; CORTEX-R5: .eabi_attribute 27, 1
; CORTEX-R5-NOT: .eabi_attribute 28
; CORTEX-R5-NOT: .eabi_attribute 36
+; CORTEX-R5: .eabi_attribute 38, 1
; CORTEX-R5-NOT: .eabi_attribute 42
; CORTEX-R5: .eabi_attribute 44, 2
; CORTEX-R5-NOT: .eabi_attribute 68
+; CORTEX-R5-FAST-NOT: .eabi_attribute 19
+;; The R5 has the VFPv3 FP unit, which always flushes preserving sign.
+; CORTEX-R5-FAST: .eabi_attribute 20, 2
+; CORTEX-R5-FAST-NOT: .eabi_attribute 21
+; CORTEX-R5-FAST-NOT: .eabi_attribute 22
+; CORTEX-R5-FAST: .eabi_attribute 23, 1
+
+; CORTEX-R7: .cpu cortex-r7
+; CORTEX-R7: .eabi_attribute 6, 10
+; CORTEX-R7: .eabi_attribute 7, 82
+; CORTEX-R7: .eabi_attribute 8, 1
+; CORTEX-R7: .eabi_attribute 9, 2
+; CORTEX-R7: .fpu vfpv3-d16
+; CORTEX-R7-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
+; CORTEX-R7: .eabi_attribute 20, 1
+; CORTEX-R7: .eabi_attribute 21, 1
+; CORTEX-R7-NOT: .eabi_attribute 22
+; CORTEX-R7: .eabi_attribute 23, 3
+; CORTEX-R7: .eabi_attribute 24, 1
+; CORTEX-R7: .eabi_attribute 25, 1
+; CORTEX-R7: .eabi_attribute 27, 1
+; CORTEX-R7-NOT: .eabi_attribute 28
+; CORTEX-R7-NOT: .eabi_attribute 36
+; CORTEX-R7: .eabi_attribute 38, 1
+; CORTEX-R7: .eabi_attribute 42, 1
+; CORTEX-R7: .eabi_attribute 44, 2
+; CORTEX-R7-NOT: .eabi_attribute 68
+
+; CORTEX-R7-FAST-NOT: .eabi_attribute 19
+;; The R7 has the VFPv3 FP unit, which always flushes preserving sign.
+; CORTEX-R7-FAST: .eabi_attribute 20, 2
+; CORTEX-R7-FAST-NOT: .eabi_attribute 21
+; CORTEX-R7-FAST-NOT: .eabi_attribute 22
+; CORTEX-R7-FAST: .eabi_attribute 23, 1
+
; CORTEX-A53: .cpu cortex-a53
; CORTEX-A53: .eabi_attribute 6, 14
; CORTEX-A53: .eabi_attribute 7, 65
@@ -509,15 +1065,29 @@
; CORTEX-A53: .eabi_attribute 9, 2
; CORTEX-A53: .fpu crypto-neon-fp-armv8
; CORTEX-A53: .eabi_attribute 12, 3
+; CORTEX-A53-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
+; CORTEX-A53: .eabi_attribute 20, 1
+; CORTEX-A53: .eabi_attribute 21, 1
+; CORTEX-A53-NOT: .eabi_attribute 22
+; CORTEX-A53: .eabi_attribute 23, 3
; CORTEX-A53: .eabi_attribute 24, 1
; CORTEX-A53: .eabi_attribute 25, 1
; CORTEX-A53-NOT: .eabi_attribute 27
; CORTEX-A53-NOT: .eabi_attribute 28
; CORTEX-A53: .eabi_attribute 36, 1
+; CORTEX-A53: .eabi_attribute 38, 1
; CORTEX-A53: .eabi_attribute 42, 1
; CORTEX-A53-NOT: .eabi_attribute 44
; CORTEX-A53: .eabi_attribute 68, 3
+; CORTEX-A53-FAST-NOT: .eabi_attribute 19
+;; The A53 has the ARMv8 FP unit, which always flushes preserving sign.
+; CORTEX-A53-FAST: .eabi_attribute 20, 2
+; CORTEX-A53-FAST-NOT: .eabi_attribute 21
+; CORTEX-A53-FAST-NOT: .eabi_attribute 22
+; CORTEX-A53-FAST: .eabi_attribute 23, 1
+
; CORTEX-A57: .cpu cortex-a57
; CORTEX-A57: .eabi_attribute 6, 14
; CORTEX-A57: .eabi_attribute 7, 65
@@ -525,15 +1095,59 @@
; CORTEX-A57: .eabi_attribute 9, 2
; CORTEX-A57: .fpu crypto-neon-fp-armv8
; CORTEX-A57: .eabi_attribute 12, 3
+; CORTEX-A57-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
+; CORTEX-A57: .eabi_attribute 20, 1
+; CORTEX-A57: .eabi_attribute 21, 1
+; CORTEX-A57-NOT: .eabi_attribute 22
+; CORTEX-A57: .eabi_attribute 23, 3
; CORTEX-A57: .eabi_attribute 24, 1
; CORTEX-A57: .eabi_attribute 25, 1
; CORTEX-A57-NOT: .eabi_attribute 27
; CORTEX-A57-NOT: .eabi_attribute 28
; CORTEX-A57: .eabi_attribute 36, 1
+; CORTEX-A57: .eabi_attribute 38, 1
; CORTEX-A57: .eabi_attribute 42, 1
; CORTEX-A57-NOT: .eabi_attribute 44
; CORTEX-A57: .eabi_attribute 68, 3
+; CORTEX-A57-FAST-NOT: .eabi_attribute 19
+;; The A57 has the ARMv8 FP unit, which always flushes preserving sign.
+; CORTEX-A57-FAST: .eabi_attribute 20, 2
+; CORTEX-A57-FAST-NOT: .eabi_attribute 21
+; CORTEX-A57-FAST-NOT: .eabi_attribute 22
+; CORTEX-A57-FAST: .eabi_attribute 23, 1
+
+; CORTEX-A72: .cpu cortex-a72
+; CORTEX-A72: .eabi_attribute 6, 14
+; CORTEX-A72: .eabi_attribute 7, 65
+; CORTEX-A72: .eabi_attribute 8, 1
+; CORTEX-A72: .eabi_attribute 9, 2
+; CORTEX-A72: .fpu crypto-neon-fp-armv8
+; CORTEX-A72: .eabi_attribute 12, 3
+; CORTEX-A72-NOT: .eabi_attribute 19
+;; We default to IEEE 754 compliance
+; CORTEX-A72: .eabi_attribute 20, 1
+; CORTEX-A72: .eabi_attribute 21, 1
+; CORTEX-A72-NOT: .eabi_attribute 22
+; CORTEX-A72: .eabi_attribute 23, 3
+; CORTEX-A72: .eabi_attribute 24, 1
+; CORTEX-A72: .eabi_attribute 25, 1
+; CORTEX-A72-NOT: .eabi_attribute 27
+; CORTEX-A72-NOT: .eabi_attribute 28
+; CORTEX-A72: .eabi_attribute 36, 1
+; CORTEX-A72: .eabi_attribute 38, 1
+; CORTEX-A72: .eabi_attribute 42, 1
+; CORTEX-A72-NOT: .eabi_attribute 44
+; CORTEX-A72: .eabi_attribute 68, 3
+
+; CORTEX-A72-FAST-NOT: .eabi_attribute 19
+;; The A72 has the ARMv8 FP unit, which always flushes preserving sign.
+; CORTEX-A72-FAST: .eabi_attribute 20, 2
+; CORTEX-A72-FAST-NOT: .eabi_attribute 21
+; CORTEX-A72-FAST-NOT: .eabi_attribute 22
+; CORTEX-A72-FAST: .eabi_attribute 23, 1
+
; RELOC-PIC: .eabi_attribute 15, 1
; RELOC-PIC: .eabi_attribute 16, 1
; RELOC-PIC: .eabi_attribute 17, 2
@@ -543,5 +1157,5 @@
; PCS-R9-RESERVE: .eabi_attribute 14, 3
define i32 @f(i64 %z) {
- ret i32 0
+ ret i32 0
}
diff --git a/test/CodeGen/ARM/coalesce-dbgvalue.ll b/test/CodeGen/ARM/coalesce-dbgvalue.ll
index 47d81a6..4e5fb5e 100644
--- a/test/CodeGen/ARM/coalesce-dbgvalue.ll
+++ b/test/CodeGen/ARM/coalesce-dbgvalue.ll
@@ -27,11 +27,11 @@ for.cond1: ; preds = %for.end9, %for.cond
for.body2: ; preds = %for.cond1
store i32 %storemerge11, i32* @b, align 4, !dbg !26
- tail call void @llvm.dbg.value(metadata !27, i64 0, metadata !11, metadata !{metadata !"0x102"}), !dbg !28
+ tail call void @llvm.dbg.value(metadata i32* null, i64 0, metadata !11, metadata !{!"0x102"}), !dbg !28
%0 = load i64* @a, align 8, !dbg !29
%xor = xor i64 %0, %e.1.ph, !dbg !29
%conv3 = trunc i64 %xor to i32, !dbg !29
- tail call void @llvm.dbg.value(metadata !{i32 %conv3}, i64 0, metadata !10, metadata !{metadata !"0x102"}), !dbg !29
+ tail call void @llvm.dbg.value(metadata i32 %conv3, i64 0, metadata !10, metadata !{!"0x102"}), !dbg !29
%tobool4 = icmp eq i32 %conv3, 0, !dbg !29
br i1 %tobool4, label %land.end, label %land.rhs, !dbg !29
@@ -79,33 +79,33 @@ attributes #3 = { nounwind }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!33}
-!0 = metadata !{metadata !"0x11\0012\00clang version 3.4 (trunk 182024) (llvm/trunk 182023)\001\00\000\00\000", metadata !1, metadata !2, metadata !2, metadata !3, metadata !15, metadata !2} ; [ DW_TAG_compile_unit ] [/d/b/pr16110.c] [DW_LANG_C99]
-!1 = metadata !{metadata !"pr16110.c", metadata !"/d/b"}
-!2 = metadata !{}
-!3 = metadata !{metadata !4}
-!4 = metadata !{metadata !"0x2e\00pr16110\00pr16110\00\007\000\001\000\006\000\001\007", metadata !1, metadata !5, metadata !6, null, i32 ()* @pr16110, null, null, metadata !9} ; [ DW_TAG_subprogram ] [line 7] [def] [pr16110]
-!5 = metadata !{metadata !"0x29", metadata !1} ; [ DW_TAG_file_type ] [/d/b/pr16110.c]
-!6 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!7 = metadata !{metadata !8}
-!8 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!9 = metadata !{metadata !10, metadata !11}
-!10 = metadata !{metadata !"0x100\00e\008\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [e] [line 8]
-!11 = metadata !{metadata !"0x100\00f\0013\000", metadata !12, metadata !5, metadata !14} ; [ DW_TAG_auto_variable ] [f] [line 13]
-!12 = metadata !{metadata !"0xb\0012\000\002", metadata !1, metadata !13} ; [ DW_TAG_lexical_block ] [/d/b/pr16110.c]
-!13 = metadata !{metadata !"0xb\0012\000\001", metadata !1, metadata !4} ; [ DW_TAG_lexical_block ] [/d/b/pr16110.c]
-!14 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, null, metadata !8} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from int]
-!15 = metadata !{metadata !16, metadata !18, metadata !19, metadata !20}
-!16 = metadata !{metadata !"0x34\00a\00a\00\001\000\001", null, metadata !5, metadata !17, i64* @a, null} ; [ DW_TAG_variable ] [a] [line 1] [def]
-!17 = metadata !{metadata !"0x24\00long long int\000\0064\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [long long int] [line 0, size 64, align 32, offset 0, enc DW_ATE_signed]
-!18 = metadata !{metadata !"0x34\00b\00b\00\002\000\001", null, metadata !5, metadata !8, i32* @b, null} ; [ DW_TAG_variable ] [b] [line 2] [def]
-!19 = metadata !{metadata !"0x34\00c\00c\00\003\000\001", null, metadata !5, metadata !8, i32* @c, null} ; [ DW_TAG_variable ] [c] [line 3] [def]
-!20 = metadata !{metadata !"0x34\00d\00d\00\004\000\001", null, metadata !5, metadata !8, i32* @d, null} ; [ DW_TAG_variable ] [d] [line 4] [def]
-!21 = metadata !{i32 10, i32 0, metadata !22, null}
-!22 = metadata !{metadata !"0xb\0010\000\000", metadata !1, metadata !4} ; [ DW_TAG_lexical_block ] [/d/b/pr16110.c]
-!26 = metadata !{i32 12, i32 0, metadata !13, null}
-!27 = metadata !{i32* null}
-!28 = metadata !{i32 13, i32 0, metadata !12, null}
-!29 = metadata !{i32 14, i32 0, metadata !12, null}
-!31 = metadata !{i32 16, i32 0, metadata !4, null}
-!32 = metadata !{i32 18, i32 0, metadata !4, null}
-!33 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x11\0012\00clang version 3.4 (trunk 182024) (llvm/trunk 182023)\001\00\000\00\000", !1, !2, !2, !3, !15, !2} ; [ DW_TAG_compile_unit ] [/d/b/pr16110.c] [DW_LANG_C99]
+!1 = !{!"pr16110.c", !"/d/b"}
+!2 = !{}
+!3 = !{!4}
+!4 = !{!"0x2e\00pr16110\00pr16110\00\007\000\001\000\006\000\001\007", !1, !5, !6, null, i32 ()* @pr16110, null, null, !9} ; [ DW_TAG_subprogram ] [line 7] [def] [pr16110]
+!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/d/b/pr16110.c]
+!6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = !{!8}
+!8 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = !{!10, !11}
+!10 = !{!"0x100\00e\008\000", !4, !5, !8} ; [ DW_TAG_auto_variable ] [e] [line 8]
+!11 = !{!"0x100\00f\0013\000", !12, !5, !14} ; [ DW_TAG_auto_variable ] [f] [line 13]
+!12 = !{!"0xb\0012\000\002", !1, !13} ; [ DW_TAG_lexical_block ] [/d/b/pr16110.c]
+!13 = !{!"0xb\0012\000\001", !1, !4} ; [ DW_TAG_lexical_block ] [/d/b/pr16110.c]
+!14 = !{!"0xf\00\000\0032\0032\000\000", null, null, !8} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from int]
+!15 = !{!16, !18, !19, !20}
+!16 = !{!"0x34\00a\00a\00\001\000\001", null, !5, !17, i64* @a, null} ; [ DW_TAG_variable ] [a] [line 1] [def]
+!17 = !{!"0x24\00long long int\000\0064\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [long long int] [line 0, size 64, align 32, offset 0, enc DW_ATE_signed]
+!18 = !{!"0x34\00b\00b\00\002\000\001", null, !5, !8, i32* @b, null} ; [ DW_TAG_variable ] [b] [line 2] [def]
+!19 = !{!"0x34\00c\00c\00\003\000\001", null, !5, !8, i32* @c, null} ; [ DW_TAG_variable ] [c] [line 3] [def]
+!20 = !{!"0x34\00d\00d\00\004\000\001", null, !5, !8, i32* @d, null} ; [ DW_TAG_variable ] [d] [line 4] [def]
+!21 = !MDLocation(line: 10, scope: !22)
+!22 = !{!"0xb\0010\000\000", !1, !4} ; [ DW_TAG_lexical_block ] [/d/b/pr16110.c]
+!26 = !MDLocation(line: 12, scope: !13)
+!27 = !{i32* null}
+!28 = !MDLocation(line: 13, scope: !12)
+!29 = !MDLocation(line: 14, scope: !12)
+!31 = !MDLocation(line: 16, scope: !4)
+!32 = !MDLocation(line: 18, scope: !4)
+!33 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/ARM/coalesce-subregs.ll b/test/CodeGen/ARM/coalesce-subregs.ll
index e7bd5f4..e4e3315 100644
--- a/test/CodeGen/ARM/coalesce-subregs.ll
+++ b/test/CodeGen/ARM/coalesce-subregs.ll
@@ -293,7 +293,6 @@ bb:
; CHECK: adjustCopiesBackFrom
; The shuffle in if.else3 must be preserved even though adjustCopiesBackFrom
; is tempted to remove it.
-; CHECK: %if.else3
; CHECK: vorr d
define internal void @adjustCopiesBackFrom(<2 x i64>* noalias nocapture sret %agg.result, <2 x i64> %in) {
entry:
diff --git a/test/CodeGen/ARM/crc32.ll b/test/CodeGen/ARM/crc32.ll
new file mode 100644
index 0000000..cc94330
--- /dev/null
+++ b/test/CodeGen/ARM/crc32.ll
@@ -0,0 +1,58 @@
+; RUN: llc -mtriple=thumbv8 -o - %s | FileCheck %s
+
+define i32 @test_crc32b(i32 %cur, i8 %next) {
+; CHECK-LABEL: test_crc32b:
+; CHECK: crc32b r0, r0, r1
+ %bits = zext i8 %next to i32
+ %val = call i32 @llvm.arm.crc32b(i32 %cur, i32 %bits)
+ ret i32 %val
+}
+
+define i32 @test_crc32h(i32 %cur, i16 %next) {
+; CHECK-LABEL: test_crc32h:
+; CHECK: crc32h r0, r0, r1
+ %bits = zext i16 %next to i32
+ %val = call i32 @llvm.arm.crc32h(i32 %cur, i32 %bits)
+ ret i32 %val
+}
+
+define i32 @test_crc32w(i32 %cur, i32 %next) {
+; CHECK-LABEL: test_crc32w:
+; CHECK: crc32w r0, r0, r1
+ %val = call i32 @llvm.arm.crc32w(i32 %cur, i32 %next)
+ ret i32 %val
+}
+
+define i32 @test_crc32cb(i32 %cur, i8 %next) {
+; CHECK-LABEL: test_crc32cb:
+; CHECK: crc32cb r0, r0, r1
+ %bits = zext i8 %next to i32
+ %val = call i32 @llvm.arm.crc32cb(i32 %cur, i32 %bits)
+ ret i32 %val
+}
+
+define i32 @test_crc32ch(i32 %cur, i16 %next) {
+; CHECK-LABEL: test_crc32ch:
+; CHECK: crc32ch r0, r0, r1
+ %bits = zext i16 %next to i32
+ %val = call i32 @llvm.arm.crc32ch(i32 %cur, i32 %bits)
+ ret i32 %val
+}
+
+define i32 @test_crc32cw(i32 %cur, i32 %next) {
+; CHECK-LABEL: test_crc32cw:
+; CHECK: crc32cw r0, r0, r1
+ %val = call i32 @llvm.arm.crc32cw(i32 %cur, i32 %next)
+ ret i32 %val
+}
+
+
+declare i32 @llvm.arm.crc32b(i32, i32)
+declare i32 @llvm.arm.crc32h(i32, i32)
+declare i32 @llvm.arm.crc32w(i32, i32)
+declare i32 @llvm.arm.crc32x(i32, i64)
+
+declare i32 @llvm.arm.crc32cb(i32, i32)
+declare i32 @llvm.arm.crc32ch(i32, i32)
+declare i32 @llvm.arm.crc32cw(i32, i32)
+declare i32 @llvm.arm.crc32cx(i32, i64)
diff --git a/test/CodeGen/ARM/cse-ldrlit.ll b/test/CodeGen/ARM/cse-ldrlit.ll
index ea8c0ca..3f5d4c2 100644
--- a/test/CodeGen/ARM/cse-ldrlit.ll
+++ b/test/CodeGen/ARM/cse-ldrlit.ll
@@ -33,8 +33,8 @@ false:
; CHECK-ARM-PIC-LABEL: foo:
; CHECK-ARM-PIC: ldr [[VAR_OFFSET:r[0-9]+]], LCPI0_0
; CHECK-ARM-PIC: LPC0_0:
-; CHECK-ARM-PIC-NEXT: ldr r0, [pc, [[VAR_OFFSET]]]
-; CHECK-ARM-PIC: ldr {{r[1-9][0-9]?}}, [r0, #4]
+; CHECK-ARM-PIC-NEXT: add r0, pc, [[VAR_OFFSET]]
+; CHECK-ARM-PIC: ldr {{r[0-9]+}}, [r0, #4]
; CHECK-ARM-PIC: LCPI0_0:
; CHECK-ARM-PIC-NEXT: .long _var-(LPC0_0+8)
diff --git a/test/CodeGen/ARM/cse-libcalls.ll b/test/CodeGen/ARM/cse-libcalls.ll
index 62b9e43..4f5b759 100644
--- a/test/CodeGen/ARM/cse-libcalls.ll
+++ b/test/CodeGen/ARM/cse-libcalls.ll
@@ -1,9 +1,13 @@
-; RUN: llc < %s -march=arm | grep "bl.*__ltdf" | count 1
+; RUN: llc < %s -march=arm | FileCheck %s
+
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
; Without CSE of libcalls, there are two calls in the output instead of one.
+; CHECK: bl ___ltdf
+; CHECK-NOT: bl ___ltdf
+
define double @u_f_nonbon(double %lambda) nounwind {
entry:
%tmp19.i.i = load double* null, align 4 ; <double> [#uses=2]
diff --git a/test/CodeGen/ARM/dagcombine-concatvector.ll b/test/CodeGen/ARM/dagcombine-concatvector.ll
index 62ed87f..80ef2ab 100644
--- a/test/CodeGen/ARM/dagcombine-concatvector.ll
+++ b/test/CodeGen/ARM/dagcombine-concatvector.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -mtriple=thumbv7s-apple-ios3.0.0 -mcpu=generic | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
-; RUN: llc < %s -mtriple=thumbeb -mattr=v7,neon | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
+; RUN: llc < %s -mtriple=thumbeb -target-abi apcs -mattr=v7,neon | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
; PR15525
; CHECK-LABEL: test1:
diff --git a/test/CodeGen/ARM/debug-frame-vararg.ll b/test/CodeGen/ARM/debug-frame-vararg.ll
index ffc1a6a..65be2db 100644
--- a/test/CodeGen/ARM/debug-frame-vararg.ll
+++ b/test/CodeGen/ARM/debug-frame-vararg.ll
@@ -25,40 +25,40 @@
!llvm.module.flags = !{!9, !10}
!llvm.ident = !{!11}
-!0 = metadata !{metadata !"0x11\0012\00clang version 3.5 \000\00\000\00\000", metadata !1, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2} ; [ DW_TAG_compile_unit ] [/tmp/var.c] [DW_LANG_C99]
-!1 = metadata !{metadata !"var.c", metadata !"/tmp"}
-!2 = metadata !{}
-!3 = metadata !{metadata !4}
-!4 = metadata !{metadata !"0x2e\00sum\00sum\00\005\000\001\000\006\00256\000\005", metadata !1, metadata !5, metadata !6, null, i32 (i32, ...)* @sum, null, null, metadata !2} ; [ DW_TAG_subprogram ] [line 5] [def] [sum]
-!5 = metadata !{metadata !"0x29", metadata !1} ; [ DW_TAG_file_type ] [/tmp/var.c]
-!6 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!7 = metadata !{metadata !8, metadata !8}
-!8 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
-!10 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
-!11 = metadata !{metadata !"clang version 3.5 "}
-!12 = metadata !{metadata !"0x101\00count\0016777221\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_arg_variable ] [count] [line 5]
-!13 = metadata !{i32 5, i32 0, metadata !4, null}
-!14 = metadata !{metadata !"0x100\00vl\006\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [vl] [line 6]
-!15 = metadata !{metadata !"0x16\00va_list\0030\000\000\000\000", metadata !16, null, metadata !17} ; [ DW_TAG_typedef ] [va_list] [line 30, size 0, align 0, offset 0] [from __builtin_va_list]
-!16 = metadata !{metadata !"/linux-x86_64-high/gcc_4.7.2/dbg/llvm/bin/../lib/clang/3.5/include/stdarg.h", metadata !"/tmp"}
-!17 = metadata !{metadata !"0x16\00__builtin_va_list\006\000\000\000\000", metadata !1, null, metadata !18} ; [ DW_TAG_typedef ] [__builtin_va_list] [line 6, size 0, align 0, offset 0] [from __va_list]
-!18 = metadata !{metadata !"0x13\00__va_list\006\0032\0032\000\000\000", metadata !1, null, null, metadata !19, null, null, null} ; [ DW_TAG_structure_type ] [__va_list] [line 6, size 32, align 32, offset 0] [def] [from ]
-!19 = metadata !{metadata !20}
-!20 = metadata !{metadata !"0xd\00__ap\006\0032\0032\000\000", metadata !1, metadata !18, metadata !21} ; [ DW_TAG_member ] [__ap] [line 6, size 32, align 32, offset 0] [from ]
-!21 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, null, null} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from ]
-!22 = metadata !{i32 6, i32 0, metadata !4, null}
-!23 = metadata !{i32 7, i32 0, metadata !4, null}
-!24 = metadata !{metadata !"0x100\00sum\008\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [sum] [line 8]
-!25 = metadata !{i32 8, i32 0, metadata !4, null}
-!26 = metadata !{metadata !"0x100\00i\009\000", metadata !27, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 9]
-!27 = metadata !{metadata !"0xb\009\000\000", metadata !1, metadata !4} ; [ DW_TAG_lexical_block ] [/tmp/var.c]
-!28 = metadata !{i32 9, i32 0, metadata !27, null}
-!29 = metadata !{i32 10, i32 0, metadata !30, null}
-!30 = metadata !{metadata !"0xb\009\000\001", metadata !1, metadata !27} ; [ DW_TAG_lexical_block ] [/tmp/var.c]
-!31 = metadata !{i32 11, i32 0, metadata !30, null}
-!32 = metadata !{i32 12, i32 0, metadata !4, null}
-!33 = metadata !{i32 13, i32 0, metadata !4, null}
+!0 = !{!"0x11\0012\00clang version 3.5 \000\00\000\00\000", !1, !2, !2, !3, !2, !2} ; [ DW_TAG_compile_unit ] [/tmp/var.c] [DW_LANG_C99]
+!1 = !{!"var.c", !"/tmp"}
+!2 = !{}
+!3 = !{!4}
+!4 = !{!"0x2e\00sum\00sum\00\005\000\001\000\006\00256\000\005", !1, !5, !6, null, i32 (i32, ...)* @sum, null, null, !2} ; [ DW_TAG_subprogram ] [line 5] [def] [sum]
+!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/tmp/var.c]
+!6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = !{!8, !8}
+!8 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = !{i32 2, !"Dwarf Version", i32 4}
+!10 = !{i32 1, !"Debug Info Version", i32 2}
+!11 = !{!"clang version 3.5 "}
+!12 = !{!"0x101\00count\0016777221\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [count] [line 5]
+!13 = !MDLocation(line: 5, scope: !4)
+!14 = !{!"0x100\00vl\006\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [vl] [line 6]
+!15 = !{!"0x16\00va_list\0030\000\000\000\000", !16, null, !17} ; [ DW_TAG_typedef ] [va_list] [line 30, size 0, align 0, offset 0] [from __builtin_va_list]
+!16 = !{!"/linux-x86_64-high/gcc_4.7.2/dbg/llvm/bin/../lib/clang/3.5/include/stdarg.h", !"/tmp"}
+!17 = !{!"0x16\00__builtin_va_list\006\000\000\000\000", !1, null, !18} ; [ DW_TAG_typedef ] [__builtin_va_list] [line 6, size 0, align 0, offset 0] [from __va_list]
+!18 = !{!"0x13\00__va_list\006\0032\0032\000\000\000", !1, null, null, !19, null, null, null} ; [ DW_TAG_structure_type ] [__va_list] [line 6, size 32, align 32, offset 0] [def] [from ]
+!19 = !{!20}
+!20 = !{!"0xd\00__ap\006\0032\0032\000\000", !1, !18, !21} ; [ DW_TAG_member ] [__ap] [line 6, size 32, align 32, offset 0] [from ]
+!21 = !{!"0xf\00\000\0032\0032\000\000", null, null, null} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from ]
+!22 = !MDLocation(line: 6, scope: !4)
+!23 = !MDLocation(line: 7, scope: !4)
+!24 = !{!"0x100\00sum\008\000", !4, !5, !8} ; [ DW_TAG_auto_variable ] [sum] [line 8]
+!25 = !MDLocation(line: 8, scope: !4)
+!26 = !{!"0x100\00i\009\000", !27, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 9]
+!27 = !{!"0xb\009\000\000", !1, !4} ; [ DW_TAG_lexical_block ] [/tmp/var.c]
+!28 = !MDLocation(line: 9, scope: !27)
+!29 = !MDLocation(line: 10, scope: !30)
+!30 = !{!"0xb\009\000\001", !1, !27} ; [ DW_TAG_lexical_block ] [/tmp/var.c]
+!31 = !MDLocation(line: 11, scope: !30)
+!32 = !MDLocation(line: 12, scope: !4)
+!33 = !MDLocation(line: 13, scope: !4)
; CHECK-FP-LABEL: sum
; CHECK-FP: .cfi_startproc
@@ -88,24 +88,22 @@
; CHECK-THUMB-FP: .cfi_startproc
; CHECK-THUMB-FP: sub sp, #16
; CHECK-THUMB-FP: .cfi_def_cfa_offset 16
-; CHECK-THUMB-FP: push {r4, r5, r7, lr}
-; CHECK-THUMB-FP: .cfi_def_cfa_offset 32
+; CHECK-THUMB-FP: push {r4, lr}
+; CHECK-THUMB-FP: .cfi_def_cfa_offset 24
; CHECK-THUMB-FP: .cfi_offset lr, -20
-; CHECK-THUMB-FP: .cfi_offset r7, -24
-; CHECK-THUMB-FP: .cfi_offset r5, -28
-; CHECK-THUMB-FP: .cfi_offset r4, -32
+; CHECK-THUMB-FP: .cfi_offset r4, -24
; CHECK-THUMB-FP: sub sp, #8
-; CHECK-THUMB-FP: .cfi_def_cfa_offset 40
+; CHECK-THUMB-FP: .cfi_def_cfa_offset 32
; CHECK-THUMB-FP-ELIM-LABEL: sum
; CHECK-THUMB-FP-ELIM: .cfi_startproc
; CHECK-THUMB-FP-ELIM: sub sp, #16
; CHECK-THUMB-FP-ELIM: .cfi_def_cfa_offset 16
-; CHECK-THUMB-FP-ELIM: push {r4, r5, r7, lr}
+; CHECK-THUMB-FP-ELIM: push {r4, r6, r7, lr}
; CHECK-THUMB-FP-ELIM: .cfi_def_cfa_offset 32
; CHECK-THUMB-FP-ELIM: .cfi_offset lr, -20
; CHECK-THUMB-FP-ELIM: .cfi_offset r7, -24
-; CHECK-THUMB-FP-ELIM: .cfi_offset r5, -28
+; CHECK-THUMB-FP-ELIM: .cfi_offset r6, -28
; CHECK-THUMB-FP-ELIM: .cfi_offset r4, -32
; CHECK-THUMB-FP-ELIM: add r7, sp, #8
; CHECK-THUMB-FP-ELIM: .cfi_def_cfa r7, 24
diff --git a/test/CodeGen/ARM/debug-frame.ll b/test/CodeGen/ARM/debug-frame.ll
index c6243ec..16e2c4c 100644
--- a/test/CodeGen/ARM/debug-frame.ll
+++ b/test/CodeGen/ARM/debug-frame.ll
@@ -128,41 +128,41 @@ declare void @_ZSt9terminatev()
!llvm.module.flags = !{!10, !11}
!llvm.ident = !{!12}
-!0 = metadata !{metadata !"0x11\004\00clang version 3.5 \000\00\000\00\000", metadata !1, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2} ; [ DW_TAG_compile_unit ] [/tmp/exp.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{metadata !"exp.cpp", metadata !"/tmp"}
-!2 = metadata !{}
-!3 = metadata !{metadata !4}
-!4 = metadata !{metadata !"0x2e\00test\00test\00_Z4testiiiiiddddd\004\000\001\000\006\00256\000\005", metadata !1, metadata !5, metadata !6, null, void (i32, i32, i32, i32, i32, double, double, double, double, double)* @_Z4testiiiiiddddd, null, null, metadata !2} ; [ DW_TAG_subprogram ] [line 4] [def] [scope 5] [test]
-!5 = metadata !{metadata !"0x29", metadata !1} ; [ DW_TAG_file_type ] [/tmp/exp.cpp]
-!6 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!7 = metadata !{null, metadata !8, metadata !8, metadata !8, metadata !8, metadata !8, metadata !9, metadata !9, metadata !9, metadata !9, metadata !9}
-!8 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!9 = metadata !{metadata !"0x24\00double\000\0064\0064\000\000\004", null, null} ; [ DW_TAG_base_type ] [double] [line 0, size 64, align 64, offset 0, enc DW_ATE_float]
-!10 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
-!11 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
-!12 = metadata !{metadata !"clang version 3.5 "}
-!13 = metadata !{metadata !"0x101\00a\0016777220\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_arg_variable ] [a] [line 4]
-!14 = metadata !{i32 4, i32 0, metadata !4, null}
-!15 = metadata !{metadata !"0x101\00b\0033554436\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_arg_variable ] [b] [line 4]
-!16 = metadata !{metadata !"0x101\00c\0050331652\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_arg_variable ] [c] [line 4]
-!17 = metadata !{metadata !"0x101\00d\0067108868\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_arg_variable ] [d] [line 4]
-!18 = metadata !{metadata !"0x101\00e\0083886084\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_arg_variable ] [e] [line 4]
-!19 = metadata !{metadata !"0x101\00m\00100663301\000", metadata !4, metadata !5, metadata !9} ; [ DW_TAG_arg_variable ] [m] [line 5]
-!20 = metadata !{i32 5, i32 0, metadata !4, null}
-!21 = metadata !{metadata !"0x101\00n\00117440517\000", metadata !4, metadata !5, metadata !9} ; [ DW_TAG_arg_variable ] [n] [line 5]
-!22 = metadata !{metadata !"0x101\00p\00134217733\000", metadata !4, metadata !5, metadata !9} ; [ DW_TAG_arg_variable ] [p] [line 5]
-!23 = metadata !{metadata !"0x101\00q\00150994949\000", metadata !4, metadata !5, metadata !9} ; [ DW_TAG_arg_variable ] [q] [line 5]
-!24 = metadata !{metadata !"0x101\00r\00167772165\000", metadata !4, metadata !5, metadata !9} ; [ DW_TAG_arg_variable ] [r] [line 5]
-!25 = metadata !{i32 7, i32 0, metadata !26, null}
-!26 = metadata !{metadata !"0xb\006\000\000", metadata !1, metadata !4} ; [ DW_TAG_lexical_block ] [/tmp/exp.cpp]
-!27 = metadata !{i32 8, i32 0, metadata !26, null}
-!28 = metadata !{i32 11, i32 0, metadata !26, null}
-!29 = metadata !{i32 9, i32 0, metadata !30, null}
-!30 = metadata !{metadata !"0xb\008\000\001", metadata !1, metadata !4} ; [ DW_TAG_lexical_block ] [/tmp/exp.cpp]
-!31 = metadata !{i32 10, i32 0, metadata !30, null}
-!32 = metadata !{i32 10, i32 0, metadata !4, null}
-!33 = metadata !{i32 11, i32 0, metadata !4, null}
-!34 = metadata !{i32 11, i32 0, metadata !30, null}
+!0 = !{!"0x11\004\00clang version 3.5 \000\00\000\00\000", !1, !2, !2, !3, !2, !2} ; [ DW_TAG_compile_unit ] [/tmp/exp.cpp] [DW_LANG_C_plus_plus]
+!1 = !{!"exp.cpp", !"/tmp"}
+!2 = !{}
+!3 = !{!4}
+!4 = !{!"0x2e\00test\00test\00_Z4testiiiiiddddd\004\000\001\000\006\00256\000\005", !1, !5, !6, null, void (i32, i32, i32, i32, i32, double, double, double, double, double)* @_Z4testiiiiiddddd, null, null, !2} ; [ DW_TAG_subprogram ] [line 4] [def] [scope 5] [test]
+!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/tmp/exp.cpp]
+!6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = !{null, !8, !8, !8, !8, !8, !9, !9, !9, !9, !9}
+!8 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = !{!"0x24\00double\000\0064\0064\000\000\004", null, null} ; [ DW_TAG_base_type ] [double] [line 0, size 64, align 64, offset 0, enc DW_ATE_float]
+!10 = !{i32 2, !"Dwarf Version", i32 4}
+!11 = !{i32 1, !"Debug Info Version", i32 2}
+!12 = !{!"clang version 3.5 "}
+!13 = !{!"0x101\00a\0016777220\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [a] [line 4]
+!14 = !MDLocation(line: 4, scope: !4)
+!15 = !{!"0x101\00b\0033554436\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [b] [line 4]
+!16 = !{!"0x101\00c\0050331652\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [c] [line 4]
+!17 = !{!"0x101\00d\0067108868\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [d] [line 4]
+!18 = !{!"0x101\00e\0083886084\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [e] [line 4]
+!19 = !{!"0x101\00m\00100663301\000", !4, !5, !9} ; [ DW_TAG_arg_variable ] [m] [line 5]
+!20 = !MDLocation(line: 5, scope: !4)
+!21 = !{!"0x101\00n\00117440517\000", !4, !5, !9} ; [ DW_TAG_arg_variable ] [n] [line 5]
+!22 = !{!"0x101\00p\00134217733\000", !4, !5, !9} ; [ DW_TAG_arg_variable ] [p] [line 5]
+!23 = !{!"0x101\00q\00150994949\000", !4, !5, !9} ; [ DW_TAG_arg_variable ] [q] [line 5]
+!24 = !{!"0x101\00r\00167772165\000", !4, !5, !9} ; [ DW_TAG_arg_variable ] [r] [line 5]
+!25 = !MDLocation(line: 7, scope: !26)
+!26 = !{!"0xb\006\000\000", !1, !4} ; [ DW_TAG_lexical_block ] [/tmp/exp.cpp]
+!27 = !MDLocation(line: 8, scope: !26)
+!28 = !MDLocation(line: 11, scope: !26)
+!29 = !MDLocation(line: 9, scope: !30)
+!30 = !{!"0xb\008\000\001", !1, !4} ; [ DW_TAG_lexical_block ] [/tmp/exp.cpp]
+!31 = !MDLocation(line: 10, scope: !30)
+!32 = !MDLocation(line: 10, scope: !4)
+!33 = !MDLocation(line: 11, scope: !4)
+!34 = !MDLocation(line: 11, scope: !30)
; CHECK-FP-LABEL: _Z4testiiiiiddddd:
; CHECK-FP: .cfi_startproc
diff --git a/test/CodeGen/ARM/debug-info-arg.ll b/test/CodeGen/ARM/debug-info-arg.ll
index 34e9938..8679589 100644
--- a/test/CodeGen/ARM/debug-info-arg.ll
+++ b/test/CodeGen/ARM/debug-info-arg.ll
@@ -7,13 +7,13 @@ target triple = "thumbv7-apple-ios"
%struct.tag_s = type { i32, i32, i32 }
define void @foo(%struct.tag_s* nocapture %this, %struct.tag_s* %c, i64 %x, i64 %y, %struct.tag_s* nocapture %ptr1, %struct.tag_s* nocapture %ptr2) nounwind ssp {
- tail call void @llvm.dbg.value(metadata !{%struct.tag_s* %this}, i64 0, metadata !5, metadata !{metadata !"0x102"}), !dbg !20
- tail call void @llvm.dbg.value(metadata !{%struct.tag_s* %c}, i64 0, metadata !13, metadata !{metadata !"0x102"}), !dbg !21
- tail call void @llvm.dbg.value(metadata !{i64 %x}, i64 0, metadata !14, metadata !{metadata !"0x102"}), !dbg !22
- tail call void @llvm.dbg.value(metadata !{i64 %y}, i64 0, metadata !17, metadata !{metadata !"0x102"}), !dbg !23
+ tail call void @llvm.dbg.value(metadata %struct.tag_s* %this, i64 0, metadata !5, metadata !{!"0x102"}), !dbg !20
+ tail call void @llvm.dbg.value(metadata %struct.tag_s* %c, i64 0, metadata !13, metadata !{!"0x102"}), !dbg !21
+ tail call void @llvm.dbg.value(metadata i64 %x, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !22
+ tail call void @llvm.dbg.value(metadata i64 %y, i64 0, metadata !17, metadata !{!"0x102"}), !dbg !23
;CHECK: @DEBUG_VALUE: foo:y <- [R7+8]
- tail call void @llvm.dbg.value(metadata !{%struct.tag_s* %ptr1}, i64 0, metadata !18, metadata !{metadata !"0x102"}), !dbg !24
- tail call void @llvm.dbg.value(metadata !{%struct.tag_s* %ptr2}, i64 0, metadata !19, metadata !{metadata !"0x102"}), !dbg !25
+ tail call void @llvm.dbg.value(metadata %struct.tag_s* %ptr1, i64 0, metadata !18, metadata !{!"0x102"}), !dbg !24
+ tail call void @llvm.dbg.value(metadata %struct.tag_s* %ptr2, i64 0, metadata !19, metadata !{!"0x102"}), !dbg !25
%1 = icmp eq %struct.tag_s* %c, null, !dbg !26
br i1 %1, label %3, label %2, !dbg !26
@@ -32,37 +32,37 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!33}
-!0 = metadata !{metadata !"0x11\0012\00Apple clang version 3.0 (tags/Apple/clang-211.10.1) (based on LLVM 3.0svn)\001\00\000\00\001", metadata !32, metadata !4, metadata !4, metadata !30, null, null} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{metadata !"0x2e\00foo\00foo\00\0011\000\001\000\006\00256\001\0011", metadata !2, metadata !2, metadata !3, null, void (%struct.tag_s*, %struct.tag_s*, i64, i64, %struct.tag_s*, %struct.tag_s*)* @foo, null, null, metadata !31} ; [ DW_TAG_subprogram ] [line 11] [def] [foo]
-!2 = metadata !{metadata !"0x29", metadata !32} ; [ DW_TAG_file_type ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !32, metadata !2, null, metadata !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{null}
-!5 = metadata !{metadata !"0x101\00this\0016777227\000", metadata !1, metadata !2, metadata !6} ; [ DW_TAG_arg_variable ]
-!6 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !0, metadata !7} ; [ DW_TAG_pointer_type ]
-!7 = metadata !{metadata !"0x13\00tag_s\005\0096\0032\000\000\000", metadata !32, metadata !0, null, metadata !8, null, null, null} ; [ DW_TAG_structure_type ] [tag_s] [line 5, size 96, align 32, offset 0] [def] [from ]
-!8 = metadata !{metadata !9, metadata !11, metadata !12}
-!9 = metadata !{metadata !"0xd\00x\006\0032\0032\000\000", metadata !32, metadata !7, metadata !10} ; [ DW_TAG_member ]
-!10 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, metadata !0} ; [ DW_TAG_base_type ]
-!11 = metadata !{metadata !"0xd\00y\007\0032\0032\0032\000", metadata !32, metadata !7, metadata !10} ; [ DW_TAG_member ]
-!12 = metadata !{metadata !"0xd\00z\008\0032\0032\0064\000", metadata !32, metadata !7, metadata !10} ; [ DW_TAG_member ]
-!13 = metadata !{metadata !"0x101\00c\0033554443\000", metadata !1, metadata !2, metadata !6} ; [ DW_TAG_arg_variable ]
-!14 = metadata !{metadata !"0x101\00x\0050331659\000", metadata !1, metadata !2, metadata !15} ; [ DW_TAG_arg_variable ]
-!15 = metadata !{metadata !"0x16\00UInt64\001\000\000\000\000", metadata !32, metadata !0, metadata !16} ; [ DW_TAG_typedef ]
-!16 = metadata !{metadata !"0x24\00long long unsigned int\000\0064\0032\000\000\007", null, metadata !0} ; [ DW_TAG_base_type ]
-!17 = metadata !{metadata !"0x101\00y\0067108875\000", metadata !1, metadata !2, metadata !15} ; [ DW_TAG_arg_variable ]
-!18 = metadata !{metadata !"0x101\00ptr1\0083886091\000", metadata !1, metadata !2, metadata !6} ; [ DW_TAG_arg_variable ]
-!19 = metadata !{metadata !"0x101\00ptr2\00100663307\000", metadata !1, metadata !2, metadata !6} ; [ DW_TAG_arg_variable ]
-!20 = metadata !{i32 11, i32 24, metadata !1, null}
-!21 = metadata !{i32 11, i32 44, metadata !1, null}
-!22 = metadata !{i32 11, i32 54, metadata !1, null}
-!23 = metadata !{i32 11, i32 64, metadata !1, null}
-!24 = metadata !{i32 11, i32 81, metadata !1, null}
-!25 = metadata !{i32 11, i32 101, metadata !1, null}
-!26 = metadata !{i32 12, i32 3, metadata !27, null}
-!27 = metadata !{metadata !"0xb\0011\00107\000", metadata !2, metadata !1} ; [ DW_TAG_lexical_block ]
-!28 = metadata !{i32 13, i32 5, metadata !27, null}
-!29 = metadata !{i32 14, i32 1, metadata !27, null}
-!30 = metadata !{metadata !1}
-!31 = metadata !{metadata !5, metadata !13, metadata !14, metadata !17, metadata !18, metadata!19}
-!32 = metadata !{metadata !"one.c", metadata !"/Volumes/Athwagate/R10048772"}
-!33 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x11\0012\00Apple clang version 3.0 (tags/Apple/clang-211.10.1) (based on LLVM 3.0svn)\001\00\000\00\001", !32, !4, !4, !30, null, null} ; [ DW_TAG_compile_unit ]
+!1 = !{!"0x2e\00foo\00foo\00\0011\000\001\000\006\00256\001\0011", !2, !2, !3, null, void (%struct.tag_s*, %struct.tag_s*, i64, i64, %struct.tag_s*, %struct.tag_s*)* @foo, null, null, !31} ; [ DW_TAG_subprogram ] [line 11] [def] [foo]
+!2 = !{!"0x29", !32} ; [ DW_TAG_file_type ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", !32, !2, null, !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !{null}
+!5 = !{!"0x101\00this\0016777227\000", !1, !2, !6} ; [ DW_TAG_arg_variable ]
+!6 = !{!"0xf\00\000\0032\0032\000\000", null, !0, !7} ; [ DW_TAG_pointer_type ]
+!7 = !{!"0x13\00tag_s\005\0096\0032\000\000\000", !32, !0, null, !8, null, null, null} ; [ DW_TAG_structure_type ] [tag_s] [line 5, size 96, align 32, offset 0] [def] [from ]
+!8 = !{!9, !11, !12}
+!9 = !{!"0xd\00x\006\0032\0032\000\000", !32, !7, !10} ; [ DW_TAG_member ]
+!10 = !{!"0x24\00int\000\0032\0032\000\000\005", null, !0} ; [ DW_TAG_base_type ]
+!11 = !{!"0xd\00y\007\0032\0032\0032\000", !32, !7, !10} ; [ DW_TAG_member ]
+!12 = !{!"0xd\00z\008\0032\0032\0064\000", !32, !7, !10} ; [ DW_TAG_member ]
+!13 = !{!"0x101\00c\0033554443\000", !1, !2, !6} ; [ DW_TAG_arg_variable ]
+!14 = !{!"0x101\00x\0050331659\000", !1, !2, !15} ; [ DW_TAG_arg_variable ]
+!15 = !{!"0x16\00UInt64\001\000\000\000\000", !32, !0, !16} ; [ DW_TAG_typedef ]
+!16 = !{!"0x24\00long long unsigned int\000\0064\0032\000\000\007", null, !0} ; [ DW_TAG_base_type ]
+!17 = !{!"0x101\00y\0067108875\000", !1, !2, !15} ; [ DW_TAG_arg_variable ]
+!18 = !{!"0x101\00ptr1\0083886091\000", !1, !2, !6} ; [ DW_TAG_arg_variable ]
+!19 = !{!"0x101\00ptr2\00100663307\000", !1, !2, !6} ; [ DW_TAG_arg_variable ]
+!20 = !MDLocation(line: 11, column: 24, scope: !1)
+!21 = !MDLocation(line: 11, column: 44, scope: !1)
+!22 = !MDLocation(line: 11, column: 54, scope: !1)
+!23 = !MDLocation(line: 11, column: 64, scope: !1)
+!24 = !MDLocation(line: 11, column: 81, scope: !1)
+!25 = !MDLocation(line: 11, column: 101, scope: !1)
+!26 = !MDLocation(line: 12, column: 3, scope: !27)
+!27 = !{!"0xb\0011\00107\000", !2, !1} ; [ DW_TAG_lexical_block ]
+!28 = !MDLocation(line: 13, column: 5, scope: !27)
+!29 = !MDLocation(line: 14, column: 1, scope: !27)
+!30 = !{!1}
+!31 = !{!5, !13, !14, !17, !18, !19}
+!32 = !{!"one.c", !"/Volumes/Athwagate/R10048772"}
+!33 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/ARM/debug-info-blocks.ll b/test/CodeGen/ARM/debug-info-blocks.ll
index 3623927..3bf6ad9 100644
--- a/test/CodeGen/ARM/debug-info-blocks.ll
+++ b/test/CodeGen/ARM/debug-info-blocks.ll
@@ -31,22 +31,22 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load
%1 = alloca %0*, align 4
%bounds = alloca %struct.CR, align 4
%data = alloca %struct.CR, align 4
- call void @llvm.dbg.value(metadata !{i8* %.block_descriptor}, i64 0, metadata !27, metadata !{metadata !"0x102"}), !dbg !129
+ call void @llvm.dbg.value(metadata i8* %.block_descriptor, i64 0, metadata !27, metadata !{!"0x102"}), !dbg !129
store %0* %loadedMydata, %0** %1, align 4
- call void @llvm.dbg.declare(metadata !{%0** %1}, metadata !130, metadata !{metadata !"0x102"}), !dbg !131
+ call void @llvm.dbg.declare(metadata %0** %1, metadata !130, metadata !{!"0x102"}), !dbg !131
%2 = bitcast %struct.CR* %bounds to %1*
%3 = getelementptr %1* %2, i32 0, i32 0
store [4 x i32] %bounds.coerce0, [4 x i32]* %3
- call void @llvm.dbg.declare(metadata !{%struct.CR* %bounds}, metadata !132, metadata !{metadata !"0x102"}), !dbg !133
+ call void @llvm.dbg.declare(metadata %struct.CR* %bounds, metadata !132, metadata !{!"0x102"}), !dbg !133
%4 = bitcast %struct.CR* %data to %1*
%5 = getelementptr %1* %4, i32 0, i32 0
store [4 x i32] %data.coerce0, [4 x i32]* %5
- call void @llvm.dbg.declare(metadata !{%struct.CR* %data}, metadata !134, metadata !{metadata !"0x102"}), !dbg !135
+ call void @llvm.dbg.declare(metadata %struct.CR* %data, metadata !134, metadata !{!"0x102"}), !dbg !135
%6 = bitcast i8* %.block_descriptor to %2*
%7 = getelementptr inbounds %2* %6, i32 0, i32 6
- call void @llvm.dbg.declare(metadata !{%2* %6}, metadata !136, metadata !163), !dbg !137
- call void @llvm.dbg.declare(metadata !{%2* %6}, metadata !138, metadata !164), !dbg !137
- call void @llvm.dbg.declare(metadata !{%2* %6}, metadata !139, metadata !165), !dbg !140
+ call void @llvm.dbg.declare(metadata %2* %6, metadata !136, metadata !163), !dbg !137
+ call void @llvm.dbg.declare(metadata %2* %6, metadata !138, metadata !164), !dbg !137
+ call void @llvm.dbg.declare(metadata %2* %6, metadata !139, metadata !165), !dbg !140
%8 = load %0** %1, align 4, !dbg !141
%9 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_13", !dbg !141
%10 = bitcast %0* %8 to i8*, !dbg !141
@@ -95,169 +95,169 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!162}
-!0 = metadata !{metadata !"0x11\0016\00Apple clang version 2.1\000\00\002\00\001", metadata !153, metadata !147, metadata !26, metadata !148, null, null} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{metadata !"0x4\00\00248\0032\0032\000\000\000", metadata !160, metadata !0, null, metadata !3, null, null, null} ; [ DW_TAG_enumeration_type ] [line 248, size 32, align 32, offset 0] [def] [from ]
-!2 = metadata !{metadata !"0x29", metadata !160} ; [ DW_TAG_file_type ]
-!3 = metadata !{metadata !4}
-!4 = metadata !{metadata !"0x28\00Ver1\000"} ; [ DW_TAG_enumerator ]
-!5 = metadata !{metadata !"0x4\00Mode\0079\0032\0032\000\000\000", metadata !160, metadata !0, null, metadata !7, null, null, null} ; [ DW_TAG_enumeration_type ] [Mode] [line 79, size 32, align 32, offset 0] [def] [from ]
-!6 = metadata !{metadata !"0x29", metadata !161} ; [ DW_TAG_file_type ]
-!7 = metadata !{metadata !8}
-!8 = metadata !{metadata !"0x28\00One\000"} ; [ DW_TAG_enumerator ]
-!9 = metadata !{metadata !"0x4\00\0015\0032\0032\000\000\000", metadata !149, metadata !0, null, metadata !11, null, null, null} ; [ DW_TAG_enumeration_type ] [line 15, size 32, align 32, offset 0] [def] [from ]
-!10 = metadata !{metadata !"0x29", metadata !149} ; [ DW_TAG_file_type ]
-!11 = metadata !{metadata !12, metadata !13}
-!12 = metadata !{metadata !"0x28\00Unknown\000"} ; [ DW_TAG_enumerator ]
-!13 = metadata !{metadata !"0x28\00Known\001"} ; [ DW_TAG_enumerator ]
-!14 = metadata !{metadata !"0x4\00\0020\0032\0032\000\000\000", metadata !150, metadata !0, null, metadata !16, null, null, null} ; [ DW_TAG_enumeration_type ] [line 20, size 32, align 32, offset 0] [def] [from ]
-!15 = metadata !{metadata !"0x29", metadata !150} ; [ DW_TAG_file_type ]
-!16 = metadata !{metadata !17, metadata !18}
-!17 = metadata !{metadata !"0x28\00Single\000"} ; [ DW_TAG_enumerator ]
-!18 = metadata !{metadata !"0x28\00Double\001"} ; [ DW_TAG_enumerator ]
-!19 = metadata !{metadata !"0x4\00\0014\0032\0032\000\000\000", metadata !151, metadata !0, null, metadata !21, null, null, null} ; [ DW_TAG_enumeration_type ] [line 14, size 32, align 32, offset 0] [def] [from ]
-!20 = metadata !{metadata !"0x29", metadata !151} ; [ DW_TAG_file_type ]
-!21 = metadata !{metadata !22}
-!22 = metadata !{metadata !"0x28\00Eleven\000"} ; [ DW_TAG_enumerator ]
-!23 = metadata !{metadata !"0x2e\00foobar_func_block_invoke_0\00foobar_func_block_invoke_0\00\00609\001\001\000\006\00256\000\00609", metadata !152, metadata !24, metadata !25, null, void (i8*, %0*, [4 x i32], [4 x i32])* @foobar_func_block_invoke_0, null, null, null} ; [ DW_TAG_subprogram ] [line 609] [local] [def] [foobar_func_block_invoke_0]
-!24 = metadata !{metadata !"0x29", metadata !152} ; [ DW_TAG_file_type ]
-!25 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !152, metadata !24, null, metadata !26, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!26 = metadata !{null}
-!27 = metadata !{metadata !"0x101\00.block_descriptor\0016777825\0064", metadata !23, metadata !24, metadata !28} ; [ DW_TAG_arg_variable ]
-!28 = metadata !{metadata !"0xf\00\000\0032\000\000\000", null, metadata !0, metadata !29} ; [ DW_TAG_pointer_type ]
-!29 = metadata !{metadata !"0x13\00__block_literal_14\00609\00256\0032\000\000\000", metadata !152, metadata !24, null, metadata !30, null, null, null} ; [ DW_TAG_structure_type ] [__block_literal_14] [line 609, size 256, align 32, offset 0] [def] [from ]
-!30 = metadata !{metadata !31, metadata !33, metadata !35, metadata !36, metadata !37, metadata !48, metadata !89, metadata !124}
-!31 = metadata !{metadata !"0xd\00__isa\00609\0032\0032\000\000", metadata !152, metadata !24, metadata !32} ; [ DW_TAG_member ]
-!32 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !0, null} ; [ DW_TAG_pointer_type ]
-!33 = metadata !{metadata !"0xd\00__flags\00609\0032\0032\0032\000", metadata !152, metadata !24, metadata !34} ; [ DW_TAG_member ]
-!34 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, metadata !0} ; [ DW_TAG_base_type ]
-!35 = metadata !{metadata !"0xd\00__reserved\00609\0032\0032\0064\000", metadata !152, metadata !24, metadata !34} ; [ DW_TAG_member ]
-!36 = metadata !{metadata !"0xd\00__FuncPtr\00609\0032\0032\0096\000", metadata !152, metadata !24, metadata !32} ; [ DW_TAG_member ]
-!37 = metadata !{metadata !"0xd\00__descriptor\00609\0032\0032\00128\000", metadata !152, metadata !24, metadata !38} ; [ DW_TAG_member ]
-!38 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !0, metadata !39} ; [ DW_TAG_pointer_type ]
-!39 = metadata !{metadata !"0x13\00__block_descriptor_withcopydispose\00307\00128\0032\000\000\000", metadata !153, metadata !0, null, metadata !41, null, null, null} ; [ DW_TAG_structure_type ] [__block_descriptor_withcopydispose] [line 307, size 128, align 32, offset 0] [def] [from ]
-!40 = metadata !{metadata !"0x29", metadata !153} ; [ DW_TAG_file_type ]
-!41 = metadata !{metadata !42, metadata !44, metadata !45, metadata !47}
-!42 = metadata !{metadata !"0xd\00reserved\00307\0032\0032\000\000", metadata !153, metadata !40, metadata !43} ; [ DW_TAG_member ]
-!43 = metadata !{metadata !"0x24\00long unsigned int\000\0032\0032\000\000\007", null, metadata !0} ; [ DW_TAG_base_type ]
-!44 = metadata !{metadata !"0xd\00Size\00307\0032\0032\0032\000", metadata !153, metadata !40, metadata !43} ; [ DW_TAG_member ]
-!45 = metadata !{metadata !"0xd\00CopyFuncPtr\00307\0032\0032\0064\000", metadata !153, metadata !40, metadata !46} ; [ DW_TAG_member ]
-!46 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !0, metadata !32} ; [ DW_TAG_pointer_type ]
-!47 = metadata !{metadata !"0xd\00DestroyFuncPtr\00307\0032\0032\0096\000", metadata !153, metadata !40, metadata !46} ; [ DW_TAG_member ]
-!48 = metadata !{metadata !"0xd\00mydata\00609\0032\0032\00160\000", metadata !152, metadata !24, metadata !49} ; [ DW_TAG_member ]
-!49 = metadata !{metadata !"0xf\00\000\0032\000\000\000", null, metadata !0, metadata !50} ; [ DW_TAG_pointer_type ]
-!50 = metadata !{metadata !"0x13\00\000\00224\000\000\0016\000", metadata !152, metadata !24, null, metadata !51, null, null, null} ; [ DW_TAG_structure_type ] [line 0, size 224, align 0, offset 0] [def] [from ]
-!51 = metadata !{metadata !52, metadata !53, metadata !54, metadata !55, metadata !56, metadata !57, metadata !58}
-!52 = metadata !{metadata !"0xd\00__isa\000\0032\0032\000\000", metadata !152, metadata !24, metadata !32} ; [ DW_TAG_member ]
-!53 = metadata !{metadata !"0xd\00__forwarding\000\0032\0032\0032\000", metadata !152, metadata !24, metadata !32} ; [ DW_TAG_member ]
-!54 = metadata !{metadata !"0xd\00__flags\000\0032\0032\0064\000", metadata !152, metadata !24, metadata !34} ; [ DW_TAG_member ]
-!55 = metadata !{metadata !"0xd\00__size\000\0032\0032\0096\000", metadata !152, metadata !24, metadata !34} ; [ DW_TAG_member ]
-!56 = metadata !{metadata !"0xd\00__copy_helper\000\0032\0032\00128\000", metadata !152, metadata !24, metadata !32} ; [ DW_TAG_member ]
-!57 = metadata !{metadata !"0xd\00__destroy_helper\000\0032\0032\00160\000", metadata !152, metadata !24, metadata !32} ; [ DW_TAG_member ]
-!58 = metadata !{metadata !"0xd\00mydata\000\0032\0032\00192\000", metadata !152, metadata !24, metadata !59} ; [ DW_TAG_member ]
-!59 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !0, metadata !60} ; [ DW_TAG_pointer_type ]
-!60 = metadata !{metadata !"0x13\00UIMydata\0026\00128\0032\000\000\0016", metadata !154, metadata !24, null, metadata !62, null, null, null} ; [ DW_TAG_structure_type ] [UIMydata] [line 26, size 128, align 32, offset 0] [def] [from ]
-!61 = metadata !{metadata !"0x29", metadata !154} ; [ DW_TAG_file_type ]
-!62 = metadata !{metadata !63, metadata !71, metadata !75, metadata !79}
-!63 = metadata !{metadata !"0x1c\00\000\000\000\000\000", metadata !60, null, metadata !64} ; [ DW_TAG_inheritance ]
-!64 = metadata !{metadata !"0x13\00NSO\0066\0032\0032\000\000\0016", metadata !155, metadata !40, null, metadata !66, null, null, null} ; [ DW_TAG_structure_type ] [NSO] [line 66, size 32, align 32, offset 0] [def] [from ]
-!65 = metadata !{metadata !"0x29", metadata !155} ; [ DW_TAG_file_type ]
-!66 = metadata !{metadata !67}
-!67 = metadata !{metadata !"0xd\00isa\0067\0032\0032\000\002", metadata !155, metadata !65, metadata !68, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!68 = metadata !{metadata !"0x16\00Class\00197\000\000\000\000", metadata !153, metadata !0, metadata !69} ; [ DW_TAG_typedef ]
-!69 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !0, metadata !70} ; [ DW_TAG_pointer_type ]
-!70 = metadata !{metadata !"0x13\00objc_class\000\000\000\000\004\000", metadata !153, metadata !0, null, null, null, null, null} ; [ DW_TAG_structure_type ] [objc_class] [line 0, size 0, align 0, offset 0] [decl] [from ]
-!71 = metadata !{metadata !"0xd\00_mydataRef\0028\0032\0032\0032\000", metadata !154, metadata !61, metadata !72, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!72 = metadata !{metadata !"0x16\00CFTypeRef\00313\000\000\000\000", metadata !152, metadata !0, metadata !73} ; [ DW_TAG_typedef ]
-!73 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !0, metadata !74} ; [ DW_TAG_pointer_type ]
-!74 = metadata !{metadata !"0x26\00\000\000\000\000\000", null, metadata !0, null} ; [ DW_TAG_const_type ]
-!75 = metadata !{metadata !"0xd\00_scale\0029\0032\0032\0064\000", metadata !154, metadata !61, metadata !76, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!76 = metadata !{metadata !"0x16\00Float\0089\000\000\000\000", metadata !156, metadata !0, metadata !78} ; [ DW_TAG_typedef ]
-!77 = metadata !{metadata !"0x29", metadata !156} ; [ DW_TAG_file_type ]
-!78 = metadata !{metadata !"0x24\00float\000\0032\0032\000\000\004", null, metadata !0} ; [ DW_TAG_base_type ]
-!79 = metadata !{metadata !"0xd\00_mydataFlags\0037\008\008\0096\000", metadata !154, metadata !61, metadata !80, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!80 = metadata !{metadata !"0x13\00\0030\008\008\000\000\000", metadata !154, metadata !0, null, metadata !81, null, null, null} ; [ DW_TAG_structure_type ] [line 30, size 8, align 8, offset 0] [def] [from ]
-!81 = metadata !{metadata !82, metadata !84, metadata !85, metadata !86, metadata !87, metadata !88}
-!82 = metadata !{metadata !"0xd\00named\0031\001\0032\000\000", metadata !154, metadata !61, metadata !83} ; [ DW_TAG_member ]
-!83 = metadata !{metadata !"0x24\00unsigned int\000\0032\0032\000\000\007", null, metadata !0} ; [ DW_TAG_base_type ]
-!84 = metadata !{metadata !"0xd\00mydataO\0032\003\0032\001\000", metadata !154, metadata !61, metadata !83} ; [ DW_TAG_member ]
-!85 = metadata !{metadata !"0xd\00cached\0033\001\0032\004\000", metadata !154, metadata !61, metadata !83} ; [ DW_TAG_member ]
-!86 = metadata !{metadata !"0xd\00hasBeenCached\0034\001\0032\005\000", metadata !154, metadata !61, metadata !83} ; [ DW_TAG_member ]
-!87 = metadata !{metadata !"0xd\00hasPattern\0035\001\0032\006\000", metadata !154, metadata !61, metadata !83} ; [ DW_TAG_member ]
-!88 = metadata !{metadata !"0xd\00isCIMydata\0036\001\0032\007\000", metadata !154, metadata !61, metadata !83} ; [ DW_TAG_member ]
-!89 = metadata !{metadata !"0xd\00self\00609\0032\0032\00192\000", metadata !152, metadata !24, metadata !90} ; [ DW_TAG_member ]
-!90 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !0, metadata !91} ; [ DW_TAG_pointer_type ]
-!91 = metadata !{metadata !"0x13\00MyWork\0036\00384\0032\000\000\0016", metadata !152, metadata !40, null, metadata !92, null, null, null} ; [ DW_TAG_structure_type ] [MyWork] [line 36, size 384, align 32, offset 0] [def] [from ]
-!92 = metadata !{metadata !93, metadata !98, metadata !101, metadata !107, metadata !123}
-!93 = metadata !{metadata !"0x1c\00\000\000\000\000\000", metadata !152, metadata !91, metadata !94} ; [ DW_TAG_inheritance ]
-!94 = metadata !{metadata !"0x13\00twork\0043\0032\0032\000\000\0016", metadata !157, metadata !40, null, metadata !96, null, null, null} ; [ DW_TAG_structure_type ] [twork] [line 43, size 32, align 32, offset 0] [def] [from ]
-!95 = metadata !{metadata !"0x29", metadata !157} ; [ DW_TAG_file_type ]
-!96 = metadata !{metadata !97}
-!97 = metadata !{metadata !"0x1c\00\000\000\000\000\000", metadata !94, null, metadata !64} ; [ DW_TAG_inheritance ]
-!98 = metadata !{metadata !"0xd\00_itemID\0038\0064\0032\0032\001", metadata !152, metadata !24, metadata !99, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!99 = metadata !{metadata !"0x16\00uint64_t\0055\000\000\000\000", metadata !153, metadata !0, metadata !100} ; [ DW_TAG_typedef ]
-!100 = metadata !{metadata !"0x24\00long long unsigned int\000\0064\0032\000\000\007", null, metadata !0} ; [ DW_TAG_base_type ]
-!101 = metadata !{metadata !"0xd\00_library\0039\0032\0032\0096\001", metadata !152, metadata !24, metadata !102, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!102 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !0, metadata !103} ; [ DW_TAG_pointer_type ]
-!103 = metadata !{metadata !"0x13\00MyLibrary2\0022\0032\0032\000\000\0016", metadata !158, metadata !40, null, metadata !105, null, null, null} ; [ DW_TAG_structure_type ] [MyLibrary2] [line 22, size 32, align 32, offset 0] [def] [from ]
-!104 = metadata !{metadata !"0x29", metadata !158} ; [ DW_TAG_file_type ]
-!105 = metadata !{metadata !106}
-!106 = metadata !{metadata !"0x1c\00\000\000\000\000\000", metadata !103, null, metadata !64} ; [ DW_TAG_inheritance ]
-!107 = metadata !{metadata !"0xd\00_bounds\0040\00128\0032\00128\001", metadata !152, metadata !24, metadata !108, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!108 = metadata !{metadata !"0x16\00CR\0033\000\000\000\000", metadata !153, metadata !0, metadata !109} ; [ DW_TAG_typedef ]
-!109 = metadata !{metadata !"0x13\00CR\0029\00128\0032\000\000\000", metadata !156, metadata !0, null, metadata !110, null, null, null} ; [ DW_TAG_structure_type ] [CR] [line 29, size 128, align 32, offset 0] [def] [from ]
-!110 = metadata !{metadata !111, metadata !117}
-!111 = metadata !{metadata !"0xd\00origin\0030\0064\0032\000\000", metadata !156, metadata !77, metadata !112} ; [ DW_TAG_member ]
-!112 = metadata !{metadata !"0x16\00CP\0017\000\000\000\000", metadata !156, metadata !0, metadata !113} ; [ DW_TAG_typedef ]
-!113 = metadata !{metadata !"0x13\00CP\0013\0064\0032\000\000\000", metadata !156, metadata !0, null, metadata !114, null, null, null} ; [ DW_TAG_structure_type ] [CP] [line 13, size 64, align 32, offset 0] [def] [from ]
-!114 = metadata !{metadata !115, metadata !116}
-!115 = metadata !{metadata !"0xd\00x\0014\0032\0032\000\000", metadata !156, metadata !77, metadata !76} ; [ DW_TAG_member ]
-!116 = metadata !{metadata !"0xd\00y\0015\0032\0032\0032\000", metadata !156, metadata !77, metadata !76} ; [ DW_TAG_member ]
-!117 = metadata !{metadata !"0xd\00size\0031\0064\0032\0064\000", metadata !156, metadata !77, metadata !118} ; [ DW_TAG_member ]
-!118 = metadata !{metadata !"0x16\00Size\0025\000\000\000\000", metadata !156, metadata !0, metadata !119} ; [ DW_TAG_typedef ]
-!119 = metadata !{metadata !"0x13\00Size\0021\0064\0032\000\000\000", metadata !156, metadata !0, null, metadata !120, null, null, null} ; [ DW_TAG_structure_type ] [Size] [line 21, size 64, align 32, offset 0] [def] [from ]
-!120 = metadata !{metadata !121, metadata !122}
-!121 = metadata !{metadata !"0xd\00width\0022\0032\0032\000\000", metadata !156, metadata !77, metadata !76} ; [ DW_TAG_member ]
-!122 = metadata !{metadata !"0xd\00height\0023\0032\0032\0032\000", metadata !156, metadata !77, metadata !76} ; [ DW_TAG_member ]
-!123 = metadata !{metadata !"0xd\00_data\0040\00128\0032\00256\001", metadata !152, metadata !24, metadata !108, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!124 = metadata !{metadata !"0xd\00semi\00609\0032\0032\00224\000", metadata !152, metadata !24, metadata !125} ; [ DW_TAG_member ]
-!125 = metadata !{metadata !"0x16\00d_t\0035\000\000\000\000", metadata !152, metadata !0, metadata !126} ; [ DW_TAG_typedef ]
-!126 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !0, metadata !127} ; [ DW_TAG_pointer_type ]
-!127 = metadata !{metadata !"0x13\00my_struct\0049\000\000\000\004\000", metadata !159, metadata !0, null, null, null, null, null} ; [ DW_TAG_structure_type ] [my_struct] [line 49, size 0, align 0, offset 0] [decl] [from ]
-!128 = metadata !{metadata !"0x29", metadata !159} ; [ DW_TAG_file_type ]
-!129 = metadata !{i32 609, i32 144, metadata !23, null}
-!130 = metadata !{metadata !"0x101\00loadedMydata\0033555041\000", metadata !23, metadata !24, metadata !59} ; [ DW_TAG_arg_variable ]
-!131 = metadata !{i32 609, i32 155, metadata !23, null}
-!132 = metadata !{metadata !"0x101\00bounds\0050332257\000", metadata !23, metadata !24, metadata !108} ; [ DW_TAG_arg_variable ]
-!133 = metadata !{i32 609, i32 175, metadata !23, null}
-!134 = metadata !{metadata !"0x101\00data\0067109473\000", metadata !23, metadata !24, metadata !108} ; [ DW_TAG_arg_variable ]
-!135 = metadata !{i32 609, i32 190, metadata !23, null}
-!136 = metadata !{metadata !"0x100\00mydata\00604\000", metadata !23, metadata !24, metadata !50} ; [ DW_TAG_auto_variable ]
-!137 = metadata !{i32 604, i32 49, metadata !23, null}
-!138 = metadata !{metadata !"0x100\00self\00604\000", metadata !23, metadata !40, metadata !90} ; [ DW_TAG_auto_variable ]
-!139 = metadata !{metadata !"0x100\00semi\00607\000", metadata !23, metadata !24, metadata !125} ; [ DW_TAG_auto_variable ]
-!140 = metadata !{i32 607, i32 30, metadata !23, null}
-!141 = metadata !{i32 610, i32 17, metadata !142, null}
-!142 = metadata !{metadata !"0xb\00609\00200\0094", metadata !152, metadata !23} ; [ DW_TAG_lexical_block ]
-!143 = metadata !{i32 611, i32 17, metadata !142, null}
-!144 = metadata !{i32 612, i32 17, metadata !142, null}
-!145 = metadata !{i32 613, i32 17, metadata !142, null}
-!146 = metadata !{i32 615, i32 13, metadata !142, null}
-!147 = metadata !{metadata !1, metadata !1, metadata !5, metadata !5, metadata !9, metadata !14, metadata !19, metadata !19, metadata !14, metadata !14, metadata !14, metadata !19, metadata !19, metadata !19}
-!148 = metadata !{metadata !23}
-!149 = metadata !{metadata !"header3.h", metadata !"/Volumes/Sandbox/llvm"}
-!150 = metadata !{metadata !"Private.h", metadata !"/Volumes/Sandbox/llvm"}
-!151 = metadata !{metadata !"header4.h", metadata !"/Volumes/Sandbox/llvm"}
-!152 = metadata !{metadata !"MyLibrary.m", metadata !"/Volumes/Sandbox/llvm"}
-!153 = metadata !{metadata !"MyLibrary.i", metadata !"/Volumes/Sandbox/llvm"}
-!154 = metadata !{metadata !"header11.h", metadata !"/Volumes/Sandbox/llvm"}
-!155 = metadata !{metadata !"NSO.h", metadata !"/Volumes/Sandbox/llvm"}
-!156 = metadata !{metadata !"header12.h", metadata !"/Volumes/Sandbox/llvm"}
-!157 = metadata !{metadata !"header13.h", metadata !"/Volumes/Sandbox/llvm"}
-!158 = metadata !{metadata !"header14.h", metadata !"/Volumes/Sandbox/llvm"}
-!159 = metadata !{metadata !"header15.h", metadata !"/Volumes/Sandbox/llvm"}
-!160 = metadata !{metadata !"header.h", metadata !"/Volumes/Sandbox/llvm"}
-!161 = metadata !{metadata !"header2.h", metadata !"/Volumes/Sandbox/llvm"}
-!162 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
-!163 = metadata !{metadata !"0x102\0034\0020\006\0034\004\006\0034\0024"} ; [ DW_TAG_expression ] [DW_OP_plus 20 DW_OP_deref DW_OP_plus 4 DW_OP_deref DW_OP_plus 24]
-!164 = metadata !{metadata !"0x102\0034\0024"} ; [ DW_TAG_expression ] [DW_OP_plus 24]
-!165 = metadata !{metadata !"0x102\0034\0028"} ; [ DW_TAG_expression ] [DW_OP_plus 28]
+!0 = !{!"0x11\0016\00Apple clang version 2.1\000\00\002\00\001", !153, !147, !26, !148, null, null} ; [ DW_TAG_compile_unit ]
+!1 = !{!"0x4\00\00248\0032\0032\000\000\000", !160, !0, null, !3, null, null, null} ; [ DW_TAG_enumeration_type ] [line 248, size 32, align 32, offset 0] [def] [from ]
+!2 = !{!"0x29", !160} ; [ DW_TAG_file_type ]
+!3 = !{!4}
+!4 = !{!"0x28\00Ver1\000"} ; [ DW_TAG_enumerator ]
+!5 = !{!"0x4\00Mode\0079\0032\0032\000\000\000", !160, !0, null, !7, null, null, null} ; [ DW_TAG_enumeration_type ] [Mode] [line 79, size 32, align 32, offset 0] [def] [from ]
+!6 = !{!"0x29", !161} ; [ DW_TAG_file_type ]
+!7 = !{!8}
+!8 = !{!"0x28\00One\000"} ; [ DW_TAG_enumerator ]
+!9 = !{!"0x4\00\0015\0032\0032\000\000\000", !149, !0, null, !11, null, null, null} ; [ DW_TAG_enumeration_type ] [line 15, size 32, align 32, offset 0] [def] [from ]
+!10 = !{!"0x29", !149} ; [ DW_TAG_file_type ]
+!11 = !{!12, !13}
+!12 = !{!"0x28\00Unknown\000"} ; [ DW_TAG_enumerator ]
+!13 = !{!"0x28\00Known\001"} ; [ DW_TAG_enumerator ]
+!14 = !{!"0x4\00\0020\0032\0032\000\000\000", !150, !0, null, !16, null, null, null} ; [ DW_TAG_enumeration_type ] [line 20, size 32, align 32, offset 0] [def] [from ]
+!15 = !{!"0x29", !150} ; [ DW_TAG_file_type ]
+!16 = !{!17, !18}
+!17 = !{!"0x28\00Single\000"} ; [ DW_TAG_enumerator ]
+!18 = !{!"0x28\00Double\001"} ; [ DW_TAG_enumerator ]
+!19 = !{!"0x4\00\0014\0032\0032\000\000\000", !151, !0, null, !21, null, null, null} ; [ DW_TAG_enumeration_type ] [line 14, size 32, align 32, offset 0] [def] [from ]
+!20 = !{!"0x29", !151} ; [ DW_TAG_file_type ]
+!21 = !{!22}
+!22 = !{!"0x28\00Eleven\000"} ; [ DW_TAG_enumerator ]
+!23 = !{!"0x2e\00foobar_func_block_invoke_0\00foobar_func_block_invoke_0\00\00609\001\001\000\006\00256\000\00609", !152, !24, !25, null, void (i8*, %0*, [4 x i32], [4 x i32])* @foobar_func_block_invoke_0, null, null, null} ; [ DW_TAG_subprogram ] [line 609] [local] [def] [foobar_func_block_invoke_0]
+!24 = !{!"0x29", !152} ; [ DW_TAG_file_type ]
+!25 = !{!"0x15\00\000\000\000\000\000\000", !152, !24, null, !26, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!26 = !{null}
+!27 = !{!"0x101\00.block_descriptor\0016777825\0064", !23, !24, !28} ; [ DW_TAG_arg_variable ]
+!28 = !{!"0xf\00\000\0032\000\000\000", null, !0, !29} ; [ DW_TAG_pointer_type ]
+!29 = !{!"0x13\00__block_literal_14\00609\00256\0032\000\000\000", !152, !24, null, !30, null, null, null} ; [ DW_TAG_structure_type ] [__block_literal_14] [line 609, size 256, align 32, offset 0] [def] [from ]
+!30 = !{!31, !33, !35, !36, !37, !48, !89, !124}
+!31 = !{!"0xd\00__isa\00609\0032\0032\000\000", !152, !24, !32} ; [ DW_TAG_member ]
+!32 = !{!"0xf\00\000\0032\0032\000\000", null, !0, null} ; [ DW_TAG_pointer_type ]
+!33 = !{!"0xd\00__flags\00609\0032\0032\0032\000", !152, !24, !34} ; [ DW_TAG_member ]
+!34 = !{!"0x24\00int\000\0032\0032\000\000\005", null, !0} ; [ DW_TAG_base_type ]
+!35 = !{!"0xd\00__reserved\00609\0032\0032\0064\000", !152, !24, !34} ; [ DW_TAG_member ]
+!36 = !{!"0xd\00__FuncPtr\00609\0032\0032\0096\000", !152, !24, !32} ; [ DW_TAG_member ]
+!37 = !{!"0xd\00__descriptor\00609\0032\0032\00128\000", !152, !24, !38} ; [ DW_TAG_member ]
+!38 = !{!"0xf\00\000\0032\0032\000\000", null, !0, !39} ; [ DW_TAG_pointer_type ]
+!39 = !{!"0x13\00__block_descriptor_withcopydispose\00307\00128\0032\000\000\000", !153, !0, null, !41, null, null, null} ; [ DW_TAG_structure_type ] [__block_descriptor_withcopydispose] [line 307, size 128, align 32, offset 0] [def] [from ]
+!40 = !{!"0x29", !153} ; [ DW_TAG_file_type ]
+!41 = !{!42, !44, !45, !47}
+!42 = !{!"0xd\00reserved\00307\0032\0032\000\000", !153, !40, !43} ; [ DW_TAG_member ]
+!43 = !{!"0x24\00long unsigned int\000\0032\0032\000\000\007", null, !0} ; [ DW_TAG_base_type ]
+!44 = !{!"0xd\00Size\00307\0032\0032\0032\000", !153, !40, !43} ; [ DW_TAG_member ]
+!45 = !{!"0xd\00CopyFuncPtr\00307\0032\0032\0064\000", !153, !40, !46} ; [ DW_TAG_member ]
+!46 = !{!"0xf\00\000\0032\0032\000\000", null, !0, !32} ; [ DW_TAG_pointer_type ]
+!47 = !{!"0xd\00DestroyFuncPtr\00307\0032\0032\0096\000", !153, !40, !46} ; [ DW_TAG_member ]
+!48 = !{!"0xd\00mydata\00609\0032\0032\00160\000", !152, !24, !49} ; [ DW_TAG_member ]
+!49 = !{!"0xf\00\000\0032\000\000\000", null, !0, !50} ; [ DW_TAG_pointer_type ]
+!50 = !{!"0x13\00\000\00224\000\000\0016\000", !152, !24, null, !51, null, null, null} ; [ DW_TAG_structure_type ] [line 0, size 224, align 0, offset 0] [def] [from ]
+!51 = !{!52, !53, !54, !55, !56, !57, !58}
+!52 = !{!"0xd\00__isa\000\0032\0032\000\000", !152, !24, !32} ; [ DW_TAG_member ]
+!53 = !{!"0xd\00__forwarding\000\0032\0032\0032\000", !152, !24, !32} ; [ DW_TAG_member ]
+!54 = !{!"0xd\00__flags\000\0032\0032\0064\000", !152, !24, !34} ; [ DW_TAG_member ]
+!55 = !{!"0xd\00__size\000\0032\0032\0096\000", !152, !24, !34} ; [ DW_TAG_member ]
+!56 = !{!"0xd\00__copy_helper\000\0032\0032\00128\000", !152, !24, !32} ; [ DW_TAG_member ]
+!57 = !{!"0xd\00__destroy_helper\000\0032\0032\00160\000", !152, !24, !32} ; [ DW_TAG_member ]
+!58 = !{!"0xd\00mydata\000\0032\0032\00192\000", !152, !24, !59} ; [ DW_TAG_member ]
+!59 = !{!"0xf\00\000\0032\0032\000\000", null, !0, !60} ; [ DW_TAG_pointer_type ]
+!60 = !{!"0x13\00UIMydata\0026\00128\0032\000\000\0016", !154, !24, null, !62, null, null, null} ; [ DW_TAG_structure_type ] [UIMydata] [line 26, size 128, align 32, offset 0] [def] [from ]
+!61 = !{!"0x29", !154} ; [ DW_TAG_file_type ]
+!62 = !{!63, !71, !75, !79}
+!63 = !{!"0x1c\00\000\000\000\000\000", !60, null, !64} ; [ DW_TAG_inheritance ]
+!64 = !{!"0x13\00NSO\0066\0032\0032\000\000\0016", !155, !40, null, !66, null, null, null} ; [ DW_TAG_structure_type ] [NSO] [line 66, size 32, align 32, offset 0] [def] [from ]
+!65 = !{!"0x29", !155} ; [ DW_TAG_file_type ]
+!66 = !{!67}
+!67 = !{!"0xd\00isa\0067\0032\0032\000\002", !155, !65, !68, !"", !"", !"", i32 0} ; [ DW_TAG_member ]
+!68 = !{!"0x16\00Class\00197\000\000\000\000", !153, !0, !69} ; [ DW_TAG_typedef ]
+!69 = !{!"0xf\00\000\0032\0032\000\000", null, !0, !70} ; [ DW_TAG_pointer_type ]
+!70 = !{!"0x13\00objc_class\000\000\000\000\004\000", !153, !0, null, null, null, null, null} ; [ DW_TAG_structure_type ] [objc_class] [line 0, size 0, align 0, offset 0] [decl] [from ]
+!71 = !{!"0xd\00_mydataRef\0028\0032\0032\0032\000", !154, !61, !72, !"", !"", !"", i32 0} ; [ DW_TAG_member ]
+!72 = !{!"0x16\00CFTypeRef\00313\000\000\000\000", !152, !0, !73} ; [ DW_TAG_typedef ]
+!73 = !{!"0xf\00\000\0032\0032\000\000", null, !0, !74} ; [ DW_TAG_pointer_type ]
+!74 = !{!"0x26\00\000\000\000\000\000", null, !0, null} ; [ DW_TAG_const_type ]
+!75 = !{!"0xd\00_scale\0029\0032\0032\0064\000", !154, !61, !76, !"", !"", !"", i32 0} ; [ DW_TAG_member ]
+!76 = !{!"0x16\00Float\0089\000\000\000\000", !156, !0, !78} ; [ DW_TAG_typedef ]
+!77 = !{!"0x29", !156} ; [ DW_TAG_file_type ]
+!78 = !{!"0x24\00float\000\0032\0032\000\000\004", null, !0} ; [ DW_TAG_base_type ]
+!79 = !{!"0xd\00_mydataFlags\0037\008\008\0096\000", !154, !61, !80, !"", !"", !"", i32 0} ; [ DW_TAG_member ]
+!80 = !{!"0x13\00\0030\008\008\000\000\000", !154, !0, null, !81, null, null, null} ; [ DW_TAG_structure_type ] [line 30, size 8, align 8, offset 0] [def] [from ]
+!81 = !{!82, !84, !85, !86, !87, !88}
+!82 = !{!"0xd\00named\0031\001\0032\000\000", !154, !61, !83} ; [ DW_TAG_member ]
+!83 = !{!"0x24\00unsigned int\000\0032\0032\000\000\007", null, !0} ; [ DW_TAG_base_type ]
+!84 = !{!"0xd\00mydataO\0032\003\0032\001\000", !154, !61, !83} ; [ DW_TAG_member ]
+!85 = !{!"0xd\00cached\0033\001\0032\004\000", !154, !61, !83} ; [ DW_TAG_member ]
+!86 = !{!"0xd\00hasBeenCached\0034\001\0032\005\000", !154, !61, !83} ; [ DW_TAG_member ]
+!87 = !{!"0xd\00hasPattern\0035\001\0032\006\000", !154, !61, !83} ; [ DW_TAG_member ]
+!88 = !{!"0xd\00isCIMydata\0036\001\0032\007\000", !154, !61, !83} ; [ DW_TAG_member ]
+!89 = !{!"0xd\00self\00609\0032\0032\00192\000", !152, !24, !90} ; [ DW_TAG_member ]
+!90 = !{!"0xf\00\000\0032\0032\000\000", null, !0, !91} ; [ DW_TAG_pointer_type ]
+!91 = !{!"0x13\00MyWork\0036\00384\0032\000\000\0016", !152, !40, null, !92, null, null, null} ; [ DW_TAG_structure_type ] [MyWork] [line 36, size 384, align 32, offset 0] [def] [from ]
+!92 = !{!93, !98, !101, !107, !123}
+!93 = !{!"0x1c\00\000\000\000\000\000", !152, !91, !94} ; [ DW_TAG_inheritance ]
+!94 = !{!"0x13\00twork\0043\0032\0032\000\000\0016", !157, !40, null, !96, null, null, null} ; [ DW_TAG_structure_type ] [twork] [line 43, size 32, align 32, offset 0] [def] [from ]
+!95 = !{!"0x29", !157} ; [ DW_TAG_file_type ]
+!96 = !{!97}
+!97 = !{!"0x1c\00\000\000\000\000\000", !94, null, !64} ; [ DW_TAG_inheritance ]
+!98 = !{!"0xd\00_itemID\0038\0064\0032\0032\001", !152, !24, !99, !"", !"", !"", i32 0} ; [ DW_TAG_member ]
+!99 = !{!"0x16\00uint64_t\0055\000\000\000\000", !153, !0, !100} ; [ DW_TAG_typedef ]
+!100 = !{!"0x24\00long long unsigned int\000\0064\0032\000\000\007", null, !0} ; [ DW_TAG_base_type ]
+!101 = !{!"0xd\00_library\0039\0032\0032\0096\001", !152, !24, !102, !"", !"", !"", i32 0} ; [ DW_TAG_member ]
+!102 = !{!"0xf\00\000\0032\0032\000\000", null, !0, !103} ; [ DW_TAG_pointer_type ]
+!103 = !{!"0x13\00MyLibrary2\0022\0032\0032\000\000\0016", !158, !40, null, !105, null, null, null} ; [ DW_TAG_structure_type ] [MyLibrary2] [line 22, size 32, align 32, offset 0] [def] [from ]
+!104 = !{!"0x29", !158} ; [ DW_TAG_file_type ]
+!105 = !{!106}
+!106 = !{!"0x1c\00\000\000\000\000\000", !103, null, !64} ; [ DW_TAG_inheritance ]
+!107 = !{!"0xd\00_bounds\0040\00128\0032\00128\001", !152, !24, !108, !"", !"", !"", i32 0} ; [ DW_TAG_member ]
+!108 = !{!"0x16\00CR\0033\000\000\000\000", !153, !0, !109} ; [ DW_TAG_typedef ]
+!109 = !{!"0x13\00CR\0029\00128\0032\000\000\000", !156, !0, null, !110, null, null, null} ; [ DW_TAG_structure_type ] [CR] [line 29, size 128, align 32, offset 0] [def] [from ]
+!110 = !{!111, !117}
+!111 = !{!"0xd\00origin\0030\0064\0032\000\000", !156, !77, !112} ; [ DW_TAG_member ]
+!112 = !{!"0x16\00CP\0017\000\000\000\000", !156, !0, !113} ; [ DW_TAG_typedef ]
+!113 = !{!"0x13\00CP\0013\0064\0032\000\000\000", !156, !0, null, !114, null, null, null} ; [ DW_TAG_structure_type ] [CP] [line 13, size 64, align 32, offset 0] [def] [from ]
+!114 = !{!115, !116}
+!115 = !{!"0xd\00x\0014\0032\0032\000\000", !156, !77, !76} ; [ DW_TAG_member ]
+!116 = !{!"0xd\00y\0015\0032\0032\0032\000", !156, !77, !76} ; [ DW_TAG_member ]
+!117 = !{!"0xd\00size\0031\0064\0032\0064\000", !156, !77, !118} ; [ DW_TAG_member ]
+!118 = !{!"0x16\00Size\0025\000\000\000\000", !156, !0, !119} ; [ DW_TAG_typedef ]
+!119 = !{!"0x13\00Size\0021\0064\0032\000\000\000", !156, !0, null, !120, null, null, null} ; [ DW_TAG_structure_type ] [Size] [line 21, size 64, align 32, offset 0] [def] [from ]
+!120 = !{!121, !122}
+!121 = !{!"0xd\00width\0022\0032\0032\000\000", !156, !77, !76} ; [ DW_TAG_member ]
+!122 = !{!"0xd\00height\0023\0032\0032\0032\000", !156, !77, !76} ; [ DW_TAG_member ]
+!123 = !{!"0xd\00_data\0040\00128\0032\00256\001", !152, !24, !108, !"", !"", !"", i32 0} ; [ DW_TAG_member ]
+!124 = !{!"0xd\00semi\00609\0032\0032\00224\000", !152, !24, !125} ; [ DW_TAG_member ]
+!125 = !{!"0x16\00d_t\0035\000\000\000\000", !152, !0, !126} ; [ DW_TAG_typedef ]
+!126 = !{!"0xf\00\000\0032\0032\000\000", null, !0, !127} ; [ DW_TAG_pointer_type ]
+!127 = !{!"0x13\00my_struct\0049\000\000\000\004\000", !159, !0, null, null, null, null, null} ; [ DW_TAG_structure_type ] [my_struct] [line 49, size 0, align 0, offset 0] [decl] [from ]
+!128 = !{!"0x29", !159} ; [ DW_TAG_file_type ]
+!129 = !MDLocation(line: 609, column: 144, scope: !23)
+!130 = !{!"0x101\00loadedMydata\0033555041\000", !23, !24, !59} ; [ DW_TAG_arg_variable ]
+!131 = !MDLocation(line: 609, column: 155, scope: !23)
+!132 = !{!"0x101\00bounds\0050332257\000", !23, !24, !108} ; [ DW_TAG_arg_variable ]
+!133 = !MDLocation(line: 609, column: 175, scope: !23)
+!134 = !{!"0x101\00data\0067109473\000", !23, !24, !108} ; [ DW_TAG_arg_variable ]
+!135 = !MDLocation(line: 609, column: 190, scope: !23)
+!136 = !{!"0x100\00mydata\00604\000", !23, !24, !50} ; [ DW_TAG_auto_variable ]
+!137 = !MDLocation(line: 604, column: 49, scope: !23)
+!138 = !{!"0x100\00self\00604\000", !23, !40, !90} ; [ DW_TAG_auto_variable ]
+!139 = !{!"0x100\00semi\00607\000", !23, !24, !125} ; [ DW_TAG_auto_variable ]
+!140 = !MDLocation(line: 607, column: 30, scope: !23)
+!141 = !MDLocation(line: 610, column: 17, scope: !142)
+!142 = !{!"0xb\00609\00200\0094", !152, !23} ; [ DW_TAG_lexical_block ]
+!143 = !MDLocation(line: 611, column: 17, scope: !142)
+!144 = !MDLocation(line: 612, column: 17, scope: !142)
+!145 = !MDLocation(line: 613, column: 17, scope: !142)
+!146 = !MDLocation(line: 615, column: 13, scope: !142)
+!147 = !{!1, !1, !5, !5, !9, !14, !19, !19, !14, !14, !14, !19, !19, !19}
+!148 = !{!23}
+!149 = !{!"header3.h", !"/Volumes/Sandbox/llvm"}
+!150 = !{!"Private.h", !"/Volumes/Sandbox/llvm"}
+!151 = !{!"header4.h", !"/Volumes/Sandbox/llvm"}
+!152 = !{!"MyLibrary.m", !"/Volumes/Sandbox/llvm"}
+!153 = !{!"MyLibrary.i", !"/Volumes/Sandbox/llvm"}
+!154 = !{!"header11.h", !"/Volumes/Sandbox/llvm"}
+!155 = !{!"NSO.h", !"/Volumes/Sandbox/llvm"}
+!156 = !{!"header12.h", !"/Volumes/Sandbox/llvm"}
+!157 = !{!"header13.h", !"/Volumes/Sandbox/llvm"}
+!158 = !{!"header14.h", !"/Volumes/Sandbox/llvm"}
+!159 = !{!"header15.h", !"/Volumes/Sandbox/llvm"}
+!160 = !{!"header.h", !"/Volumes/Sandbox/llvm"}
+!161 = !{!"header2.h", !"/Volumes/Sandbox/llvm"}
+!162 = !{i32 1, !"Debug Info Version", i32 2}
+!163 = !{!"0x102\0034\0020\006\0034\004\006\0034\0024"} ; [ DW_TAG_expression ] [DW_OP_plus 20 DW_OP_deref DW_OP_plus 4 DW_OP_deref DW_OP_plus 24]
+!164 = !{!"0x102\0034\0024"} ; [ DW_TAG_expression ] [DW_OP_plus 24]
+!165 = !{!"0x102\0034\0028"} ; [ DW_TAG_expression ] [DW_OP_plus 28]
diff --git a/test/CodeGen/ARM/debug-info-branch-folding.ll b/test/CodeGen/ARM/debug-info-branch-folding.ll
index db96b49..9475695 100644
--- a/test/CodeGen/ARM/debug-info-branch-folding.ll
+++ b/test/CodeGen/ARM/debug-info-branch-folding.ll
@@ -20,9 +20,9 @@ entry:
for.body9: ; preds = %for.body9, %entry
%add19 = fadd <4 x float> undef, <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 1.000000e+00>, !dbg !39
- tail call void @llvm.dbg.value(metadata !{<4 x float> %add19}, i64 0, metadata !27, metadata !{metadata !"0x102"}), !dbg !39
+ tail call void @llvm.dbg.value(metadata <4 x float> %add19, i64 0, metadata !27, metadata !{!"0x102"}), !dbg !39
%add20 = fadd <4 x float> undef, <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 1.000000e+00>, !dbg !39
- tail call void @llvm.dbg.value(metadata !{<4 x float> %add20}, i64 0, metadata !28, metadata !{metadata !"0x102"}), !dbg !39
+ tail call void @llvm.dbg.value(metadata <4 x float> %add20, i64 0, metadata !28, metadata !{!"0x102"}), !dbg !39
br i1 %cond, label %for.end54, label %for.body9, !dbg !44
for.end54: ; preds = %for.body9
@@ -42,60 +42,60 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.module.flags = !{!56}
!llvm.dbg.cu = !{!2}
-!0 = metadata !{metadata !"0x2e\00test0001\00test0001\00\003\000\001\000\006\00256\001\000", metadata !54, null, metadata !3, i32 0, <4 x float> (float)* @test0001, null, null, metadata !51} ; [ DW_TAG_subprogram ]
-!1 = metadata !{metadata !"0x29", metadata !54} ; [ DW_TAG_file_type ]
-!2 = metadata !{metadata !"0x11\0012\00clang version 3.0 (trunk 129915)\001\00\000\00\001", metadata !54, metadata !17, metadata !17, metadata !50, null, null} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !54, metadata !1, i32 0, metadata !4, i32 0} ; [ DW_TAG_subroutine_type ]
-!4 = metadata !{metadata !5}
-!5 = metadata !{metadata !"0x16\00v4f32\0014\000\000\000\000", metadata !54, metadata !2, metadata !6} ; [ DW_TAG_typedef ]
-!6 = metadata !{metadata !"0x1\00\000\00128\00128\000\000", metadata !54, metadata !2, metadata !7, metadata !8, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 128, align 128, offset 0] [from float]
-!7 = metadata !{metadata !"0x24\00float\000\0032\0032\000\000\004", null, metadata !2} ; [ DW_TAG_base_type ]
-!8 = metadata !{metadata !9}
-!9 = metadata !{metadata !"0x21\000\004"} ; [ DW_TAG_subrange_type ]
-!10 = metadata !{metadata !"0x2e\00main\00main\00\0059\000\001\000\006\00256\001\000", metadata !54, null, metadata !11, null, i32 (i32, i8**, i1)* @main, null, null, metadata !52} ; [ DW_TAG_subprogram ] [line 59] [def] [scope 0] [main]
-!11 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !54, metadata !1, null, metadata !12, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!12 = metadata !{metadata !13}
-!13 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, metadata !2} ; [ DW_TAG_base_type ]
-!14 = metadata !{metadata !"0x2e\00printFV\00printFV\00\0041\001\001\000\006\00256\001\000", metadata !55, null, metadata !16, null, null, null, null, metadata !53} ; [ DW_TAG_subprogram ] [line 41] [local] [def] [scope 0] [printFV]
-!15 = metadata !{metadata !"0x29", metadata !55} ; [ DW_TAG_file_type ]
-!16 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !55, metadata !15, null, metadata !17, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!17 = metadata !{null}
-!18 = metadata !{metadata !"0x101\00a\0016777219\000", metadata !0, metadata !1, metadata !7} ; [ DW_TAG_arg_variable ]
-!19 = metadata !{metadata !"0x101\00argc\0016777275\000", metadata !10, metadata !1, metadata !13} ; [ DW_TAG_arg_variable ]
-!20 = metadata !{metadata !"0x101\00argv\0033554491\000", metadata !10, metadata !1, metadata !21} ; [ DW_TAG_arg_variable ]
-!21 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !2, metadata !22} ; [ DW_TAG_pointer_type ]
-!22 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !2, metadata !23} ; [ DW_TAG_pointer_type ]
-!23 = metadata !{metadata !"0x24\00char\000\008\008\000\000\006", null, metadata !2} ; [ DW_TAG_base_type ]
-!24 = metadata !{metadata !"0x100\00i\0060\000", metadata !25, metadata !1, metadata !13} ; [ DW_TAG_auto_variable ]
-!25 = metadata !{metadata !"0xb\0059\0033\0014", metadata !1, metadata !10} ; [ DW_TAG_lexical_block ]
-!26 = metadata !{metadata !"0x100\00j\0060\000", metadata !25, metadata !1, metadata !13} ; [ DW_TAG_auto_variable ]
-!27 = metadata !{metadata !"0x100\00x\0061\000", metadata !25, metadata !1, metadata !5} ; [ DW_TAG_auto_variable ]
-!28 = metadata !{metadata !"0x100\00y\0062\000", metadata !25, metadata !1, metadata !5} ; [ DW_TAG_auto_variable ]
-!29 = metadata !{metadata !"0x100\00z\0063\000", metadata !25, metadata !1, metadata !5} ; [ DW_TAG_auto_variable ]
-!30 = metadata !{metadata !"0x101\00F\0016777257\000", metadata !14, metadata !15, metadata !31} ; [ DW_TAG_arg_variable ]
-!31 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !2, metadata !32} ; [ DW_TAG_pointer_type ]
-!32 = metadata !{metadata !"0x16\00FV\0025\000\000\000\000", metadata !55, metadata !2, metadata !33} ; [ DW_TAG_typedef ]
-!33 = metadata !{metadata !"0x17\00\0022\00128\00128\000\000\000", metadata !55, metadata !2, i32 0, metadata !34, null} ; [ DW_TAG_union_type ]
-!34 = metadata !{metadata !35, metadata !37}
-!35 = metadata !{metadata !"0xd\00V\0023\00128\00128\000\000", metadata !55, metadata !15, metadata !36} ; [ DW_TAG_member ]
-!36 = metadata !{metadata !"0x16\00v4sf\003\000\000\000\000", metadata !55, metadata !2, metadata !6} ; [ DW_TAG_typedef ]
-!37 = metadata !{metadata !"0xd\00A\0024\00128\0032\000\000", metadata !55, metadata !15, metadata !38} ; [ DW_TAG_member ]
-!38 = metadata !{metadata !"0x1\00\000\00128\0032\000\000", null, metadata !2, metadata !7, metadata !8, i32 0, i32 0} ; [ DW_TAG_array_type ]
-!39 = metadata !{i32 79, i32 7, metadata !40, null}
-!40 = metadata !{metadata !"0xb\0075\0035\0018", metadata !1, metadata !41} ; [ DW_TAG_lexical_block ]
-!41 = metadata !{metadata !"0xb\0075\005\0017", metadata !1, metadata !42} ; [ DW_TAG_lexical_block ]
-!42 = metadata !{metadata !"0xb\0071\0032\0016", metadata !1, metadata !43} ; [ DW_TAG_lexical_block ]
-!43 = metadata !{metadata !"0xb\0071\003\0015", metadata !1, metadata !25} ; [ DW_TAG_lexical_block ]
-!44 = metadata !{i32 75, i32 5, metadata !42, null}
-!45 = metadata !{i32 42, i32 2, metadata !46, metadata !48}
-!46 = metadata !{metadata !"0xb\0042\002\0020", metadata !15, metadata !47} ; [ DW_TAG_lexical_block ]
-!47 = metadata !{metadata !"0xb\0041\0028\0019", metadata !15, metadata !14} ; [ DW_TAG_lexical_block ]
-!48 = metadata !{i32 95, i32 3, metadata !25, null}
-!49 = metadata !{i32 99, i32 3, metadata !25, null}
-!50 = metadata !{metadata !0, metadata !10, metadata !14}
-!51 = metadata !{metadata !18}
-!52 = metadata !{metadata !19, metadata !20, metadata !24, metadata !26, metadata !27, metadata !28, metadata !29}
-!53 = metadata !{metadata !30}
-!54 = metadata !{metadata !"build2.c", metadata !"/private/tmp"}
-!55 = metadata !{metadata !"/Volumes/Lalgate/work/llvm/projects/llvm-test/SingleSource/UnitTests/Vector/helpers.h", metadata !"/private/tmp"}
-!56 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x2e\00test0001\00test0001\00\003\000\001\000\006\00256\001\000", !54, null, !3, i32 0, <4 x float> (float)* @test0001, null, null, !51} ; [ DW_TAG_subprogram ]
+!1 = !{!"0x29", !54} ; [ DW_TAG_file_type ]
+!2 = !{!"0x11\0012\00clang version 3.0 (trunk 129915)\001\00\000\00\001", !54, !17, !17, !50, null, null} ; [ DW_TAG_compile_unit ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", !54, !1, i32 0, !4, i32 0} ; [ DW_TAG_subroutine_type ]
+!4 = !{!5}
+!5 = !{!"0x16\00v4f32\0014\000\000\000\000", !54, !2, !6} ; [ DW_TAG_typedef ]
+!6 = !{!"0x1\00\000\00128\00128\000\000", !54, !2, !7, !8, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 128, align 128, offset 0] [from float]
+!7 = !{!"0x24\00float\000\0032\0032\000\000\004", null, !2} ; [ DW_TAG_base_type ]
+!8 = !{!9}
+!9 = !{!"0x21\000\004"} ; [ DW_TAG_subrange_type ]
+!10 = !{!"0x2e\00main\00main\00\0059\000\001\000\006\00256\001\000", !54, null, !11, null, i32 (i32, i8**, i1)* @main, null, null, !52} ; [ DW_TAG_subprogram ] [line 59] [def] [scope 0] [main]
+!11 = !{!"0x15\00\000\000\000\000\000\000", !54, !1, null, !12, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!12 = !{!13}
+!13 = !{!"0x24\00int\000\0032\0032\000\000\005", null, !2} ; [ DW_TAG_base_type ]
+!14 = !{!"0x2e\00printFV\00printFV\00\0041\001\001\000\006\00256\001\000", !55, null, !16, null, null, null, null, !53} ; [ DW_TAG_subprogram ] [line 41] [local] [def] [scope 0] [printFV]
+!15 = !{!"0x29", !55} ; [ DW_TAG_file_type ]
+!16 = !{!"0x15\00\000\000\000\000\000\000", !55, !15, null, !17, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!17 = !{null}
+!18 = !{!"0x101\00a\0016777219\000", !0, !1, !7} ; [ DW_TAG_arg_variable ]
+!19 = !{!"0x101\00argc\0016777275\000", !10, !1, !13} ; [ DW_TAG_arg_variable ]
+!20 = !{!"0x101\00argv\0033554491\000", !10, !1, !21} ; [ DW_TAG_arg_variable ]
+!21 = !{!"0xf\00\000\0032\0032\000\000", null, !2, !22} ; [ DW_TAG_pointer_type ]
+!22 = !{!"0xf\00\000\0032\0032\000\000", null, !2, !23} ; [ DW_TAG_pointer_type ]
+!23 = !{!"0x24\00char\000\008\008\000\000\006", null, !2} ; [ DW_TAG_base_type ]
+!24 = !{!"0x100\00i\0060\000", !25, !1, !13} ; [ DW_TAG_auto_variable ]
+!25 = !{!"0xb\0059\0033\0014", !1, !10} ; [ DW_TAG_lexical_block ]
+!26 = !{!"0x100\00j\0060\000", !25, !1, !13} ; [ DW_TAG_auto_variable ]
+!27 = !{!"0x100\00x\0061\000", !25, !1, !5} ; [ DW_TAG_auto_variable ]
+!28 = !{!"0x100\00y\0062\000", !25, !1, !5} ; [ DW_TAG_auto_variable ]
+!29 = !{!"0x100\00z\0063\000", !25, !1, !5} ; [ DW_TAG_auto_variable ]
+!30 = !{!"0x101\00F\0016777257\000", !14, !15, !31} ; [ DW_TAG_arg_variable ]
+!31 = !{!"0xf\00\000\0032\0032\000\000", null, !2, !32} ; [ DW_TAG_pointer_type ]
+!32 = !{!"0x16\00FV\0025\000\000\000\000", !55, !2, !33} ; [ DW_TAG_typedef ]
+!33 = !{!"0x17\00\0022\00128\00128\000\000\000", !55, !2, i32 0, !34, null} ; [ DW_TAG_union_type ]
+!34 = !{!35, !37}
+!35 = !{!"0xd\00V\0023\00128\00128\000\000", !55, !15, !36} ; [ DW_TAG_member ]
+!36 = !{!"0x16\00v4sf\003\000\000\000\000", !55, !2, !6} ; [ DW_TAG_typedef ]
+!37 = !{!"0xd\00A\0024\00128\0032\000\000", !55, !15, !38} ; [ DW_TAG_member ]
+!38 = !{!"0x1\00\000\00128\0032\000\000", null, !2, !7, !8, i32 0, i32 0} ; [ DW_TAG_array_type ]
+!39 = !MDLocation(line: 79, column: 7, scope: !40)
+!40 = !{!"0xb\0075\0035\0018", !1, !41} ; [ DW_TAG_lexical_block ]
+!41 = !{!"0xb\0075\005\0017", !1, !42} ; [ DW_TAG_lexical_block ]
+!42 = !{!"0xb\0071\0032\0016", !1, !43} ; [ DW_TAG_lexical_block ]
+!43 = !{!"0xb\0071\003\0015", !1, !25} ; [ DW_TAG_lexical_block ]
+!44 = !MDLocation(line: 75, column: 5, scope: !42)
+!45 = !MDLocation(line: 42, column: 2, scope: !46, inlinedAt: !48)
+!46 = !{!"0xb\0042\002\0020", !15, !47} ; [ DW_TAG_lexical_block ]
+!47 = !{!"0xb\0041\0028\0019", !15, !14} ; [ DW_TAG_lexical_block ]
+!48 = !MDLocation(line: 95, column: 3, scope: !25)
+!49 = !MDLocation(line: 99, column: 3, scope: !25)
+!50 = !{!0, !10, !14}
+!51 = !{!18}
+!52 = !{!19, !20, !24, !26, !27, !28, !29}
+!53 = !{!30}
+!54 = !{!"build2.c", !"/private/tmp"}
+!55 = !{!"/Volumes/Lalgate/work/llvm/projects/llvm-test/SingleSource/UnitTests/Vector/helpers.h", !"/private/tmp"}
+!56 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/ARM/debug-info-d16-reg.ll b/test/CodeGen/ARM/debug-info-d16-reg.ll
index 9791987..85b510f 100644
--- a/test/CodeGen/ARM/debug-info-d16-reg.ll
+++ b/test/CodeGen/ARM/debug-info-d16-reg.ll
@@ -12,9 +12,9 @@ target triple = "thumbv7-apple-darwin10"
define i32 @inlineprinter(i8* %ptr, double %val, i8 zeroext %c) nounwind optsize {
entry:
- tail call void @llvm.dbg.value(metadata !{i8* %ptr}, i64 0, metadata !19, metadata !{metadata !"0x102"}), !dbg !26
- tail call void @llvm.dbg.value(metadata !{double %val}, i64 0, metadata !20, metadata !{metadata !"0x102"}), !dbg !26
- tail call void @llvm.dbg.value(metadata !{i8 %c}, i64 0, metadata !21, metadata !{metadata !"0x102"}), !dbg !26
+ tail call void @llvm.dbg.value(metadata i8* %ptr, i64 0, metadata !19, metadata !{!"0x102"}), !dbg !26
+ tail call void @llvm.dbg.value(metadata double %val, i64 0, metadata !20, metadata !{!"0x102"}), !dbg !26
+ tail call void @llvm.dbg.value(metadata i8 %c, i64 0, metadata !21, metadata !{!"0x102"}), !dbg !26
%0 = zext i8 %c to i32, !dbg !27
%1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0), i8* %ptr, double %val, i32 %0) nounwind, !dbg !27
ret i32 0, !dbg !29
@@ -22,9 +22,9 @@ entry:
define i32 @printer(i8* %ptr, double %val, i8 zeroext %c) nounwind optsize noinline {
entry:
- tail call void @llvm.dbg.value(metadata !{i8* %ptr}, i64 0, metadata !16, metadata !{metadata !"0x102"}), !dbg !30
- tail call void @llvm.dbg.value(metadata !{double %val}, i64 0, metadata !17, metadata !{metadata !"0x102"}), !dbg !30
- tail call void @llvm.dbg.value(metadata !{i8 %c}, i64 0, metadata !18, metadata !{metadata !"0x102"}), !dbg !30
+ tail call void @llvm.dbg.value(metadata i8* %ptr, i64 0, metadata !16, metadata !{!"0x102"}), !dbg !30
+ tail call void @llvm.dbg.value(metadata double %val, i64 0, metadata !17, metadata !{!"0x102"}), !dbg !30
+ tail call void @llvm.dbg.value(metadata i8 %c, i64 0, metadata !18, metadata !{!"0x102"}), !dbg !30
%0 = zext i8 %c to i32, !dbg !31
%1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0), i8* %ptr, double %val, i32 %0) nounwind, !dbg !31
ret i32 0, !dbg !33
@@ -36,18 +36,18 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
define i32 @main(i32 %argc, i8** nocapture %argv) nounwind optsize {
entry:
- tail call void @llvm.dbg.value(metadata !{i32 %argc}, i64 0, metadata !22, metadata !{metadata !"0x102"}), !dbg !34
- tail call void @llvm.dbg.value(metadata !{i8** %argv}, i64 0, metadata !23, metadata !{metadata !"0x102"}), !dbg !34
+ tail call void @llvm.dbg.value(metadata i32 %argc, i64 0, metadata !22, metadata !{!"0x102"}), !dbg !34
+ tail call void @llvm.dbg.value(metadata i8** %argv, i64 0, metadata !23, metadata !{!"0x102"}), !dbg !34
%0 = sitofp i32 %argc to double, !dbg !35
%1 = fadd double %0, 5.555552e+05, !dbg !35
- tail call void @llvm.dbg.value(metadata !{double %1}, i64 0, metadata !24, metadata !{metadata !"0x102"}), !dbg !35
+ tail call void @llvm.dbg.value(metadata double %1, i64 0, metadata !24, metadata !{!"0x102"}), !dbg !35
%2 = tail call i32 @puts(i8* getelementptr inbounds ([6 x i8]* @.str1, i32 0, i32 0)) nounwind, !dbg !36
%3 = getelementptr inbounds i8* bitcast (i32 (i32, i8**)* @main to i8*), i32 %argc, !dbg !37
%4 = trunc i32 %argc to i8, !dbg !37
%5 = add i8 %4, 97, !dbg !37
- tail call void @llvm.dbg.value(metadata !{i8* %3}, i64 0, metadata !19, metadata !{metadata !"0x102"}) nounwind, !dbg !38
- tail call void @llvm.dbg.value(metadata !{double %1}, i64 0, metadata !20, metadata !{metadata !"0x102"}) nounwind, !dbg !38
- tail call void @llvm.dbg.value(metadata !{i8 %5}, i64 0, metadata !21, metadata !{metadata !"0x102"}) nounwind, !dbg !38
+ tail call void @llvm.dbg.value(metadata i8* %3, i64 0, metadata !19, metadata !{!"0x102"}) nounwind, !dbg !38
+ tail call void @llvm.dbg.value(metadata double %1, i64 0, metadata !20, metadata !{!"0x102"}) nounwind, !dbg !38
+ tail call void @llvm.dbg.value(metadata i8 %5, i64 0, metadata !21, metadata !{!"0x102"}) nounwind, !dbg !38
%6 = zext i8 %5 to i32, !dbg !39
%7 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0), i8* %3, double %1, i32 %6) nounwind, !dbg !39
%8 = tail call i32 @printer(i8* %3, double %1, i8 zeroext %5) nounwind, !dbg !40
@@ -59,52 +59,52 @@ declare i32 @puts(i8* nocapture) nounwind
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!48}
-!0 = metadata !{metadata !"0x2e\00printer\00printer\00printer\0012\000\001\000\006\00256\001\0012", metadata !46, metadata !1, metadata !3, null, i32 (i8*, double, i8)* @printer, null, null, metadata !43} ; [ DW_TAG_subprogram ]
-!1 = metadata !{metadata !"0x29", metadata !46} ; [ DW_TAG_file_type ]
-!2 = metadata !{metadata !"0x11\001\00(LLVM build 00)\001\00\000\00\001", metadata !46, metadata !47, metadata !47, metadata !42, null, null} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !46, metadata !1, null, metadata !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5, metadata !6, metadata !7, metadata !8}
-!5 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !46, metadata !1} ; [ DW_TAG_base_type ]
-!6 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", metadata !46, metadata !1, null} ; [ DW_TAG_pointer_type ]
-!7 = metadata !{metadata !"0x24\00double\000\0064\0032\000\000\004", metadata !46, metadata !1} ; [ DW_TAG_base_type ]
-!8 = metadata !{metadata !"0x24\00unsigned char\000\008\008\000\000\008", metadata !46, metadata !1} ; [ DW_TAG_base_type ]
-!9 = metadata !{metadata !"0x2e\00inlineprinter\00inlineprinter\00inlineprinter\005\000\001\000\006\00256\001\005", metadata !46, metadata !1, metadata !3, null, i32 (i8*, double, i8)* @inlineprinter, null, null, metadata !44} ; [ DW_TAG_subprogram ]
-!10 = metadata !{metadata !"0x2e\00main\00main\00main\0018\000\001\000\006\00256\001\0018", metadata !46, metadata !1, metadata !11, null, i32 (i32, i8**)* @main, null, null, metadata !45} ; [ DW_TAG_subprogram ]
-!11 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !46, metadata !1, null, metadata !12, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!12 = metadata !{metadata !5, metadata !5, metadata !13}
-!13 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", metadata !46, metadata !1, metadata !14} ; [ DW_TAG_pointer_type ]
-!14 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", metadata !46, metadata !1, metadata !15} ; [ DW_TAG_pointer_type ]
-!15 = metadata !{metadata !"0x24\00char\000\008\008\000\000\006", metadata !46, metadata !1} ; [ DW_TAG_base_type ]
-!16 = metadata !{metadata !"0x101\00ptr\0011\000", metadata !0, metadata !1, metadata !6} ; [ DW_TAG_arg_variable ]
-!17 = metadata !{metadata !"0x101\00val\0011\000", metadata !0, metadata !1, metadata !7} ; [ DW_TAG_arg_variable ]
-!18 = metadata !{metadata !"0x101\00c\0011\000", metadata !0, metadata !1, metadata !8} ; [ DW_TAG_arg_variable ]
-!19 = metadata !{metadata !"0x101\00ptr\004\000", metadata !9, metadata !1, metadata !6} ; [ DW_TAG_arg_variable ]
-!20 = metadata !{metadata !"0x101\00val\004\000", metadata !9, metadata !1, metadata !7} ; [ DW_TAG_arg_variable ]
-!21 = metadata !{metadata !"0x101\00c\004\000", metadata !9, metadata !1, metadata !8} ; [ DW_TAG_arg_variable ]
-!22 = metadata !{metadata !"0x101\00argc\0017\000", metadata !10, metadata !1, metadata !5} ; [ DW_TAG_arg_variable ]
-!23 = metadata !{metadata !"0x101\00argv\0017\000", metadata !10, metadata !1, metadata !13} ; [ DW_TAG_arg_variable ]
-!24 = metadata !{metadata !"0x100\00dval\0019\000", metadata !25, metadata !1, metadata !7} ; [ DW_TAG_auto_variable ]
-!25 = metadata !{metadata !"0xb\0018\000\002", metadata !46, metadata !10} ; [ DW_TAG_lexical_block ]
-!26 = metadata !{i32 4, i32 0, metadata !9, null}
-!27 = metadata !{i32 6, i32 0, metadata !28, null}
-!28 = metadata !{metadata !"0xb\005\000\001", metadata !46, metadata !9} ; [ DW_TAG_lexical_block ]
-!29 = metadata !{i32 7, i32 0, metadata !28, null}
-!30 = metadata !{i32 11, i32 0, metadata !0, null}
-!31 = metadata !{i32 13, i32 0, metadata !32, null}
-!32 = metadata !{metadata !"0xb\0012\000\000", metadata !46, metadata !0} ; [ DW_TAG_lexical_block ]
-!33 = metadata !{i32 14, i32 0, metadata !32, null}
-!34 = metadata !{i32 17, i32 0, metadata !10, null}
-!35 = metadata !{i32 19, i32 0, metadata !25, null}
-!36 = metadata !{i32 20, i32 0, metadata !25, null}
-!37 = metadata !{i32 21, i32 0, metadata !25, null}
-!38 = metadata !{i32 4, i32 0, metadata !9, metadata !37}
-!39 = metadata !{i32 6, i32 0, metadata !28, metadata !37}
-!40 = metadata !{i32 22, i32 0, metadata !25, null}
-!41 = metadata !{i32 23, i32 0, metadata !25, null}
-!42 = metadata !{metadata !0, metadata !9, metadata !10}
-!43 = metadata !{metadata !16, metadata !17, metadata !18}
-!44 = metadata !{metadata !19, metadata !20, metadata !21}
-!45 = metadata !{metadata !22, metadata !23, metadata !24}
-!46 = metadata !{metadata !"a.c", metadata !"/tmp/"}
-!47 = metadata !{i32 0}
-!48 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x2e\00printer\00printer\00printer\0012\000\001\000\006\00256\001\0012", !46, !1, !3, null, i32 (i8*, double, i8)* @printer, null, null, !43} ; [ DW_TAG_subprogram ]
+!1 = !{!"0x29", !46} ; [ DW_TAG_file_type ]
+!2 = !{!"0x11\001\00(LLVM build 00)\001\00\000\00\001", !46, !47, !47, !42, null, null} ; [ DW_TAG_compile_unit ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", !46, !1, null, !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !{!5, !6, !7, !8}
+!5 = !{!"0x24\00int\000\0032\0032\000\000\005", !46, !1} ; [ DW_TAG_base_type ]
+!6 = !{!"0xf\00\000\0032\0032\000\000", !46, !1, null} ; [ DW_TAG_pointer_type ]
+!7 = !{!"0x24\00double\000\0064\0032\000\000\004", !46, !1} ; [ DW_TAG_base_type ]
+!8 = !{!"0x24\00unsigned char\000\008\008\000\000\008", !46, !1} ; [ DW_TAG_base_type ]
+!9 = !{!"0x2e\00inlineprinter\00inlineprinter\00inlineprinter\005\000\001\000\006\00256\001\005", !46, !1, !3, null, i32 (i8*, double, i8)* @inlineprinter, null, null, !44} ; [ DW_TAG_subprogram ]
+!10 = !{!"0x2e\00main\00main\00main\0018\000\001\000\006\00256\001\0018", !46, !1, !11, null, i32 (i32, i8**)* @main, null, null, !45} ; [ DW_TAG_subprogram ]
+!11 = !{!"0x15\00\000\000\000\000\000\000", !46, !1, null, !12, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!12 = !{!5, !5, !13}
+!13 = !{!"0xf\00\000\0032\0032\000\000", !46, !1, !14} ; [ DW_TAG_pointer_type ]
+!14 = !{!"0xf\00\000\0032\0032\000\000", !46, !1, !15} ; [ DW_TAG_pointer_type ]
+!15 = !{!"0x24\00char\000\008\008\000\000\006", !46, !1} ; [ DW_TAG_base_type ]
+!16 = !{!"0x101\00ptr\0011\000", !0, !1, !6} ; [ DW_TAG_arg_variable ]
+!17 = !{!"0x101\00val\0011\000", !0, !1, !7} ; [ DW_TAG_arg_variable ]
+!18 = !{!"0x101\00c\0011\000", !0, !1, !8} ; [ DW_TAG_arg_variable ]
+!19 = !{!"0x101\00ptr\004\000", !9, !1, !6} ; [ DW_TAG_arg_variable ]
+!20 = !{!"0x101\00val\004\000", !9, !1, !7} ; [ DW_TAG_arg_variable ]
+!21 = !{!"0x101\00c\004\000", !9, !1, !8} ; [ DW_TAG_arg_variable ]
+!22 = !{!"0x101\00argc\0017\000", !10, !1, !5} ; [ DW_TAG_arg_variable ]
+!23 = !{!"0x101\00argv\0017\000", !10, !1, !13} ; [ DW_TAG_arg_variable ]
+!24 = !{!"0x100\00dval\0019\000", !25, !1, !7} ; [ DW_TAG_auto_variable ]
+!25 = !{!"0xb\0018\000\002", !46, !10} ; [ DW_TAG_lexical_block ]
+!26 = !MDLocation(line: 4, scope: !9)
+!27 = !MDLocation(line: 6, scope: !28)
+!28 = !{!"0xb\005\000\001", !46, !9} ; [ DW_TAG_lexical_block ]
+!29 = !MDLocation(line: 7, scope: !28)
+!30 = !MDLocation(line: 11, scope: !0)
+!31 = !MDLocation(line: 13, scope: !32)
+!32 = !{!"0xb\0012\000\000", !46, !0} ; [ DW_TAG_lexical_block ]
+!33 = !MDLocation(line: 14, scope: !32)
+!34 = !MDLocation(line: 17, scope: !10)
+!35 = !MDLocation(line: 19, scope: !25)
+!36 = !MDLocation(line: 20, scope: !25)
+!37 = !MDLocation(line: 21, scope: !25)
+!38 = !MDLocation(line: 4, scope: !9, inlinedAt: !37)
+!39 = !MDLocation(line: 6, scope: !28, inlinedAt: !37)
+!40 = !MDLocation(line: 22, scope: !25)
+!41 = !MDLocation(line: 23, scope: !25)
+!42 = !{!0, !9, !10}
+!43 = !{!16, !17, !18}
+!44 = !{!19, !20, !21}
+!45 = !{!22, !23, !24}
+!46 = !{!"a.c", !"/tmp/"}
+!47 = !{i32 0}
+!48 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/ARM/debug-info-qreg.ll b/test/CodeGen/ARM/debug-info-qreg.ll
index cfcefb8..c05df6a 100644
--- a/test/CodeGen/ARM/debug-info-qreg.ll
+++ b/test/CodeGen/ARM/debug-info-qreg.ll
@@ -2,13 +2,11 @@
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
target triple = "thumbv7-apple-macosx10.6.7"
-;CHECK: sub-register
-;CHECK-NEXT: DW_OP_regx
+;CHECK: sub-register DW_OP_regx
;CHECK-NEXT: ascii
;CHECK-NEXT: DW_OP_piece
;CHECK-NEXT: byte 8
-;CHECK-NEXT: sub-register
-;CHECK-NEXT: DW_OP_regx
+;CHECK-NEXT: sub-register DW_OP_regx
;CHECK-NEXT: ascii
;CHECK-NEXT: DW_OP_piece
;CHECK-NEXT: byte 8
@@ -26,7 +24,7 @@ for.body9: ; preds = %for.body9, %entry
br i1 undef, label %for.end54, label %for.body9, !dbg !44
for.end54: ; preds = %for.body9
- tail call void @llvm.dbg.value(metadata !{<4 x float> %add19}, i64 0, metadata !27, metadata !{metadata !"0x102"}), !dbg !39
+ tail call void @llvm.dbg.value(metadata <4 x float> %add19, i64 0, metadata !27, metadata !{!"0x102"}), !dbg !39
%tmp115 = extractelement <4 x float> %add19, i32 1
%conv6.i75 = fpext float %tmp115 to double, !dbg !45
%call.i82 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.str, i32 0, i32 0), double undef, double %conv6.i75, double undef, double undef) nounwind, !dbg !45
@@ -40,60 +38,60 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!56}
-!0 = metadata !{metadata !"0x2e\00test0001\00test0001\00\003\000\001\000\006\00256\001\003", metadata !54, metadata !1, metadata !3, null, <4 x float> (float)* @test0001, null, null, metadata !51} ; [ DW_TAG_subprogram ] [line 3] [def] [test0001]
-!1 = metadata !{metadata !"0x29", metadata !54} ; [ DW_TAG_file_type ]
-!2 = metadata !{metadata !"0x11\0012\00clang version 3.0 (trunk 129915)\001\00\000\00\001", metadata !54, metadata !17, metadata !17, metadata !50, null, null} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !54, metadata !1, null, metadata !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5}
-!5 = metadata !{metadata !"0x16\00v4f32\0014\000\000\000\000", metadata !54, metadata !2, metadata !6} ; [ DW_TAG_typedef ]
-!6 = metadata !{metadata !"0x1\00\000\00128\00128\000\000", metadata !2, null, metadata !7, metadata !8, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 128, align 128, offset 0] [from float]
-!7 = metadata !{metadata !"0x24\00float\000\0032\0032\000\000\004", null, metadata !2} ; [ DW_TAG_base_type ]
-!8 = metadata !{metadata !9}
-!9 = metadata !{metadata !"0x21\000\004"} ; [ DW_TAG_subrange_type ]
-!10 = metadata !{metadata !"0x2e\00main\00main\00\0059\000\001\000\006\00256\001\0059", metadata !54, metadata !1, metadata !11, null, i32 (i32, i8**)* @main, null, null, metadata !52} ; [ DW_TAG_subprogram ] [line 59] [def] [main]
-!11 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !54, metadata !1, null, metadata !12, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!12 = metadata !{metadata !13}
-!13 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, metadata !2} ; [ DW_TAG_base_type ]
-!14 = metadata !{metadata !"0x2e\00printFV\00printFV\00\0041\001\001\000\006\00256\001\0041", metadata !55, metadata !15, metadata !16, null, null, null, null, metadata !53} ; [ DW_TAG_subprogram ] [line 41] [local] [def] [printFV]
-!15 = metadata !{metadata !"0x29", metadata !55} ; [ DW_TAG_file_type ]
-!16 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !55, metadata !15, null, metadata !17, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!17 = metadata !{null}
-!18 = metadata !{metadata !"0x101\00a\0016777219\000", metadata !0, metadata !1, metadata !7} ; [ DW_TAG_arg_variable ]
-!19 = metadata !{metadata !"0x101\00argc\0016777275\000", metadata !10, metadata !1, metadata !13} ; [ DW_TAG_arg_variable ]
-!20 = metadata !{metadata !"0x101\00argv\0033554491\000", metadata !10, metadata !1, metadata !21} ; [ DW_TAG_arg_variable ]
-!21 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !2, metadata !22} ; [ DW_TAG_pointer_type ]
-!22 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !2, metadata !23} ; [ DW_TAG_pointer_type ]
-!23 = metadata !{metadata !"0x24\00char\000\008\008\000\000\006", null, metadata !2} ; [ DW_TAG_base_type ]
-!24 = metadata !{metadata !"0x100\00i\0060\000", metadata !25, metadata !1, metadata !13} ; [ DW_TAG_auto_variable ]
-!25 = metadata !{metadata !"0xb\0059\0033\0014", metadata !54, metadata !10} ; [ DW_TAG_lexical_block ]
-!26 = metadata !{metadata !"0x100\00j\0060\000", metadata !25, metadata !1, metadata !13} ; [ DW_TAG_auto_variable ]
-!27 = metadata !{metadata !"0x100\00x\0061\000", metadata !25, metadata !1, metadata !5} ; [ DW_TAG_auto_variable ]
-!28 = metadata !{metadata !"0x100\00y\0062\000", metadata !25, metadata !1, metadata !5} ; [ DW_TAG_auto_variable ]
-!29 = metadata !{metadata !"0x100\00z\0063\000", metadata !25, metadata !1, metadata !5} ; [ DW_TAG_auto_variable ]
-!30 = metadata !{metadata !"0x101\00F\0016777257\000", metadata !14, metadata !15, metadata !31} ; [ DW_TAG_arg_variable ]
-!31 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !2, metadata !32} ; [ DW_TAG_pointer_type ]
-!32 = metadata !{metadata !"0x16\00FV\0025\000\000\000\000", metadata !55, metadata !2, metadata !33} ; [ DW_TAG_typedef ]
-!33 = metadata !{metadata !"0x17\00\0022\00128\00128\000\000\000", metadata !55, metadata !2, i32 0, metadata !34, null} ; [ DW_TAG_union_type ]
-!34 = metadata !{metadata !35, metadata !37}
-!35 = metadata !{metadata !"0xd\00V\0023\00128\00128\000\000", metadata !55, metadata !15, metadata !36} ; [ DW_TAG_member ]
-!36 = metadata !{metadata !"0x16\00v4sf\003\000\000\000\000", metadata !55, metadata !2, metadata !6} ; [ DW_TAG_typedef ]
-!37 = metadata !{metadata !"0xd\00A\0024\00128\0032\000\000", metadata !55, metadata !15, metadata !38} ; [ DW_TAG_member ]
-!38 = metadata !{metadata !"0x1\00\000\00128\0032\000\000", null, metadata !2, metadata !7, metadata !8, i32 0, i32 0} ; [ DW_TAG_array_type ]
-!39 = metadata !{i32 79, i32 7, metadata !40, null}
-!40 = metadata !{metadata !"0xb\0075\0035\0018", metadata !54, metadata !41} ; [ DW_TAG_lexical_block ]
-!41 = metadata !{metadata !"0xb\0075\005\0017", metadata !54, metadata !42} ; [ DW_TAG_lexical_block ]
-!42 = metadata !{metadata !"0xb\0071\0032\0016", metadata !54, metadata !43} ; [ DW_TAG_lexical_block ]
-!43 = metadata !{metadata !"0xb\0071\003\0015", metadata !54, metadata !25} ; [ DW_TAG_lexical_block ]
-!44 = metadata !{i32 75, i32 5, metadata !42, null}
-!45 = metadata !{i32 42, i32 2, metadata !46, metadata !48}
-!46 = metadata !{metadata !"0xb\0042\002\0020", metadata !55, metadata !47} ; [ DW_TAG_lexical_block ]
-!47 = metadata !{metadata !"0xb\0041\0028\0019", metadata !55, metadata !14} ; [ DW_TAG_lexical_block ]
-!48 = metadata !{i32 95, i32 3, metadata !25, null}
-!49 = metadata !{i32 99, i32 3, metadata !25, null}
-!50 = metadata !{metadata !0, metadata !10, metadata !14}
-!51 = metadata !{metadata !18}
-!52 = metadata !{metadata !19, metadata !20, metadata !24, metadata !26, metadata !27, metadata !28, metadata !29}
-!53 = metadata !{metadata !30}
-!54 = metadata !{metadata !"build2.c", metadata !"/private/tmp"}
-!55 = metadata !{metadata !"/Volumes/Lalgate/work/llvm/projects/llvm-test/SingleSource/UnitTests/Vector/helpers.h", metadata !"/private/tmp"}
-!56 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x2e\00test0001\00test0001\00\003\000\001\000\006\00256\001\003", !54, !1, !3, null, <4 x float> (float)* @test0001, null, null, !51} ; [ DW_TAG_subprogram ] [line 3] [def] [test0001]
+!1 = !{!"0x29", !54} ; [ DW_TAG_file_type ]
+!2 = !{!"0x11\0012\00clang version 3.0 (trunk 129915)\001\00\000\00\001", !54, !17, !17, !50, null, null} ; [ DW_TAG_compile_unit ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", !54, !1, null, !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !{!5}
+!5 = !{!"0x16\00v4f32\0014\000\000\000\000", !54, !2, !6} ; [ DW_TAG_typedef ]
+!6 = !{!"0x1\00\000\00128\00128\000\000", !2, null, !7, !8, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 128, align 128, offset 0] [from float]
+!7 = !{!"0x24\00float\000\0032\0032\000\000\004", null, !2} ; [ DW_TAG_base_type ]
+!8 = !{!9}
+!9 = !{!"0x21\000\004"} ; [ DW_TAG_subrange_type ]
+!10 = !{!"0x2e\00main\00main\00\0059\000\001\000\006\00256\001\0059", !54, !1, !11, null, i32 (i32, i8**)* @main, null, null, !52} ; [ DW_TAG_subprogram ] [line 59] [def] [main]
+!11 = !{!"0x15\00\000\000\000\000\000\000", !54, !1, null, !12, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!12 = !{!13}
+!13 = !{!"0x24\00int\000\0032\0032\000\000\005", null, !2} ; [ DW_TAG_base_type ]
+!14 = !{!"0x2e\00printFV\00printFV\00\0041\001\001\000\006\00256\001\0041", !55, !15, !16, null, null, null, null, !53} ; [ DW_TAG_subprogram ] [line 41] [local] [def] [printFV]
+!15 = !{!"0x29", !55} ; [ DW_TAG_file_type ]
+!16 = !{!"0x15\00\000\000\000\000\000\000", !55, !15, null, !17, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!17 = !{null}
+!18 = !{!"0x101\00a\0016777219\000", !0, !1, !7} ; [ DW_TAG_arg_variable ]
+!19 = !{!"0x101\00argc\0016777275\000", !10, !1, !13} ; [ DW_TAG_arg_variable ]
+!20 = !{!"0x101\00argv\0033554491\000", !10, !1, !21} ; [ DW_TAG_arg_variable ]
+!21 = !{!"0xf\00\000\0032\0032\000\000", null, !2, !22} ; [ DW_TAG_pointer_type ]
+!22 = !{!"0xf\00\000\0032\0032\000\000", null, !2, !23} ; [ DW_TAG_pointer_type ]
+!23 = !{!"0x24\00char\000\008\008\000\000\006", null, !2} ; [ DW_TAG_base_type ]
+!24 = !{!"0x100\00i\0060\000", !25, !1, !13} ; [ DW_TAG_auto_variable ]
+!25 = !{!"0xb\0059\0033\0014", !54, !10} ; [ DW_TAG_lexical_block ]
+!26 = !{!"0x100\00j\0060\000", !25, !1, !13} ; [ DW_TAG_auto_variable ]
+!27 = !{!"0x100\00x\0061\000", !25, !1, !5} ; [ DW_TAG_auto_variable ]
+!28 = !{!"0x100\00y\0062\000", !25, !1, !5} ; [ DW_TAG_auto_variable ]
+!29 = !{!"0x100\00z\0063\000", !25, !1, !5} ; [ DW_TAG_auto_variable ]
+!30 = !{!"0x101\00F\0016777257\000", !14, !15, !31} ; [ DW_TAG_arg_variable ]
+!31 = !{!"0xf\00\000\0032\0032\000\000", null, !2, !32} ; [ DW_TAG_pointer_type ]
+!32 = !{!"0x16\00FV\0025\000\000\000\000", !55, !2, !33} ; [ DW_TAG_typedef ]
+!33 = !{!"0x17\00\0022\00128\00128\000\000\000", !55, !2, i32 0, !34, null} ; [ DW_TAG_union_type ]
+!34 = !{!35, !37}
+!35 = !{!"0xd\00V\0023\00128\00128\000\000", !55, !15, !36} ; [ DW_TAG_member ]
+!36 = !{!"0x16\00v4sf\003\000\000\000\000", !55, !2, !6} ; [ DW_TAG_typedef ]
+!37 = !{!"0xd\00A\0024\00128\0032\000\000", !55, !15, !38} ; [ DW_TAG_member ]
+!38 = !{!"0x1\00\000\00128\0032\000\000", null, !2, !7, !8, i32 0, i32 0} ; [ DW_TAG_array_type ]
+!39 = !MDLocation(line: 79, column: 7, scope: !40)
+!40 = !{!"0xb\0075\0035\0018", !54, !41} ; [ DW_TAG_lexical_block ]
+!41 = !{!"0xb\0075\005\0017", !54, !42} ; [ DW_TAG_lexical_block ]
+!42 = !{!"0xb\0071\0032\0016", !54, !43} ; [ DW_TAG_lexical_block ]
+!43 = !{!"0xb\0071\003\0015", !54, !25} ; [ DW_TAG_lexical_block ]
+!44 = !MDLocation(line: 75, column: 5, scope: !42)
+!45 = !MDLocation(line: 42, column: 2, scope: !46, inlinedAt: !48)
+!46 = !{!"0xb\0042\002\0020", !55, !47} ; [ DW_TAG_lexical_block ]
+!47 = !{!"0xb\0041\0028\0019", !55, !14} ; [ DW_TAG_lexical_block ]
+!48 = !MDLocation(line: 95, column: 3, scope: !25)
+!49 = !MDLocation(line: 99, column: 3, scope: !25)
+!50 = !{!0, !10, !14}
+!51 = !{!18}
+!52 = !{!19, !20, !24, !26, !27, !28, !29}
+!53 = !{!30}
+!54 = !{!"build2.c", !"/private/tmp"}
+!55 = !{!"/Volumes/Lalgate/work/llvm/projects/llvm-test/SingleSource/UnitTests/Vector/helpers.h", !"/private/tmp"}
+!56 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/ARM/debug-info-s16-reg.ll b/test/CodeGen/ARM/debug-info-s16-reg.ll
index 6bd7172..9b303dd 100644
--- a/test/CodeGen/ARM/debug-info-s16-reg.ll
+++ b/test/CodeGen/ARM/debug-info-s16-reg.ll
@@ -1,8 +1,7 @@
; RUN: llc < %s - | FileCheck %s
; Radar 9309221
; Test dwarf reg no for s16
-;CHECK: super-register
-;CHECK-NEXT: DW_OP_regx
+;CHECK: super-register DW_OP_regx
;CHECK-NEXT: ascii
;CHECK-NEXT: DW_OP_piece
;CHECK-NEXT: 4
@@ -15,9 +14,9 @@ target triple = "thumbv7-apple-macosx10.6.7"
define i32 @inlineprinter(i8* %ptr, float %val, i8 zeroext %c) nounwind optsize ssp {
entry:
- tail call void @llvm.dbg.value(metadata !{i8* %ptr}, i64 0, metadata !8, metadata !{metadata !"0x102"}), !dbg !24
- tail call void @llvm.dbg.value(metadata !{float %val}, i64 0, metadata !10, metadata !{metadata !"0x102"}), !dbg !25
- tail call void @llvm.dbg.value(metadata !{i8 %c}, i64 0, metadata !12, metadata !{metadata !"0x102"}), !dbg !26
+ tail call void @llvm.dbg.value(metadata i8* %ptr, i64 0, metadata !8, metadata !{!"0x102"}), !dbg !24
+ tail call void @llvm.dbg.value(metadata float %val, i64 0, metadata !10, metadata !{!"0x102"}), !dbg !25
+ tail call void @llvm.dbg.value(metadata i8 %c, i64 0, metadata !12, metadata !{!"0x102"}), !dbg !26
%conv = fpext float %val to double, !dbg !27
%conv3 = zext i8 %c to i32, !dbg !27
%call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0), i8* %ptr, double %conv, i32 %conv3) nounwind optsize, !dbg !27
@@ -28,9 +27,9 @@ declare i32 @printf(i8* nocapture, ...) nounwind optsize
define i32 @printer(i8* %ptr, float %val, i8 zeroext %c) nounwind optsize noinline ssp {
entry:
- tail call void @llvm.dbg.value(metadata !{i8* %ptr}, i64 0, metadata !14, metadata !{metadata !"0x102"}), !dbg !30
- tail call void @llvm.dbg.value(metadata !{float %val}, i64 0, metadata !15, metadata !{metadata !"0x102"}), !dbg !31
- tail call void @llvm.dbg.value(metadata !{i8 %c}, i64 0, metadata !16, metadata !{metadata !"0x102"}), !dbg !32
+ tail call void @llvm.dbg.value(metadata i8* %ptr, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !30
+ tail call void @llvm.dbg.value(metadata float %val, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !31
+ tail call void @llvm.dbg.value(metadata i8 %c, i64 0, metadata !16, metadata !{!"0x102"}), !dbg !32
%conv = fpext float %val to double, !dbg !33
%conv3 = zext i8 %c to i32, !dbg !33
%call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0), i8* %ptr, double %conv, i32 %conv3) nounwind optsize, !dbg !33
@@ -39,19 +38,19 @@ entry:
define i32 @main(i32 %argc, i8** nocapture %argv) nounwind optsize ssp {
entry:
- tail call void @llvm.dbg.value(metadata !{i32 %argc}, i64 0, metadata !17, metadata !{metadata !"0x102"}), !dbg !36
- tail call void @llvm.dbg.value(metadata !{i8** %argv}, i64 0, metadata !18, metadata !{metadata !"0x102"}), !dbg !37
+ tail call void @llvm.dbg.value(metadata i32 %argc, i64 0, metadata !17, metadata !{!"0x102"}), !dbg !36
+ tail call void @llvm.dbg.value(metadata i8** %argv, i64 0, metadata !18, metadata !{!"0x102"}), !dbg !37
%conv = sitofp i32 %argc to double, !dbg !38
%add = fadd double %conv, 5.555552e+05, !dbg !38
%conv1 = fptrunc double %add to float, !dbg !38
- tail call void @llvm.dbg.value(metadata !{float %conv1}, i64 0, metadata !22, metadata !{metadata !"0x102"}), !dbg !38
+ tail call void @llvm.dbg.value(metadata float %conv1, i64 0, metadata !22, metadata !{!"0x102"}), !dbg !38
%call = tail call i32 @puts(i8* getelementptr inbounds ([6 x i8]* @.str1, i32 0, i32 0)) nounwind optsize, !dbg !39
%add.ptr = getelementptr i8* bitcast (i32 (i32, i8**)* @main to i8*), i32 %argc, !dbg !40
%add5 = add nsw i32 %argc, 97, !dbg !40
%conv6 = trunc i32 %add5 to i8, !dbg !40
- tail call void @llvm.dbg.value(metadata !{i8* %add.ptr}, i64 0, metadata !8, metadata !{metadata !"0x102"}) nounwind, !dbg !41
- tail call void @llvm.dbg.value(metadata !{float %conv1}, i64 0, metadata !10, metadata !{metadata !"0x102"}) nounwind, !dbg !42
- tail call void @llvm.dbg.value(metadata !{i8 %conv6}, i64 0, metadata !12, metadata !{metadata !"0x102"}) nounwind, !dbg !43
+ tail call void @llvm.dbg.value(metadata i8* %add.ptr, i64 0, metadata !8, metadata !{!"0x102"}) nounwind, !dbg !41
+ tail call void @llvm.dbg.value(metadata float %conv1, i64 0, metadata !10, metadata !{!"0x102"}) nounwind, !dbg !42
+ tail call void @llvm.dbg.value(metadata i8 %conv6, i64 0, metadata !12, metadata !{!"0x102"}) nounwind, !dbg !43
%conv.i = fpext float %conv1 to double, !dbg !44
%conv3.i = and i32 %add5, 255, !dbg !44
%call.i = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0), i8* %add.ptr, double %conv.i, i32 %conv3.i) nounwind optsize, !dbg !44
@@ -66,57 +65,57 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!53}
-!0 = metadata !{metadata !"0x2e\00inlineprinter\00inlineprinter\00\005\000\001\000\006\00256\001\005", metadata !51, metadata !1, metadata !3, null, i32 (i8*, float, i8)* @inlineprinter, null, null, metadata !48} ; [ DW_TAG_subprogram ] [line 5] [def] [inlineprinter]
-!1 = metadata !{metadata !"0x29", metadata !51} ; [ DW_TAG_file_type ]
-!2 = metadata !{metadata !"0x11\0012\00clang version 3.0 (trunk 129915)\001\00\000\00\001", metadata !51, metadata !52, metadata !52, metadata !47, null, null} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !51, metadata !1, null, metadata !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5}
-!5 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, metadata !2} ; [ DW_TAG_base_type ]
-!6 = metadata !{metadata !"0x2e\00printer\00printer\00\0012\000\001\000\006\00256\001\0012", metadata !51, metadata !1, metadata !3, null, i32 (i8*, float, i8)* @printer, null, null, metadata !49} ; [ DW_TAG_subprogram ] [line 12] [def] [printer]
-!7 = metadata !{metadata !"0x2e\00main\00main\00\0018\000\001\000\006\00256\001\0018", metadata !51, metadata !1, metadata !3, null, i32 (i32, i8**)* @main, null, null, metadata !50} ; [ DW_TAG_subprogram ] [line 18] [def] [main]
-!8 = metadata !{metadata !"0x101\00ptr\0016777220\000", metadata !0, metadata !1, metadata !9} ; [ DW_TAG_arg_variable ]
-!9 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !2, null} ; [ DW_TAG_pointer_type ]
-!10 = metadata !{metadata !"0x101\00val\0033554436\000", metadata !0, metadata !1, metadata !11} ; [ DW_TAG_arg_variable ]
-!11 = metadata !{metadata !"0x24\00float\000\0032\0032\000\000\004", null, metadata !2} ; [ DW_TAG_base_type ]
-!12 = metadata !{metadata !"0x101\00c\0050331652\000", metadata !0, metadata !1, metadata !13} ; [ DW_TAG_arg_variable ]
-!13 = metadata !{metadata !"0x24\00unsigned char\000\008\008\000\000\008", null, metadata !2} ; [ DW_TAG_base_type ]
-!14 = metadata !{metadata !"0x101\00ptr\0016777227\000", metadata !6, metadata !1, metadata !9} ; [ DW_TAG_arg_variable ]
-!15 = metadata !{metadata !"0x101\00val\0033554443\000", metadata !6, metadata !1, metadata !11} ; [ DW_TAG_arg_variable ]
-!16 = metadata !{metadata !"0x101\00c\0050331659\000", metadata !6, metadata !1, metadata !13} ; [ DW_TAG_arg_variable ]
-!17 = metadata !{metadata !"0x101\00argc\0016777233\000", metadata !7, metadata !1, metadata !5} ; [ DW_TAG_arg_variable ]
-!18 = metadata !{metadata !"0x101\00argv\0033554449\000", metadata !7, metadata !1, metadata !19} ; [ DW_TAG_arg_variable ]
-!19 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !2, metadata !20} ; [ DW_TAG_pointer_type ]
-!20 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, metadata !2, metadata !21} ; [ DW_TAG_pointer_type ]
-!21 = metadata !{metadata !"0x24\00char\000\008\008\000\000\006", null, metadata !2} ; [ DW_TAG_base_type ]
-!22 = metadata !{metadata !"0x100\00dval\0019\000", metadata !23, metadata !1, metadata !11} ; [ DW_TAG_auto_variable ]
-!23 = metadata !{metadata !"0xb\0018\001\002", metadata !51, metadata !7} ; [ DW_TAG_lexical_block ]
-!24 = metadata !{i32 4, i32 22, metadata !0, null}
-!25 = metadata !{i32 4, i32 33, metadata !0, null}
-!26 = metadata !{i32 4, i32 52, metadata !0, null}
-!27 = metadata !{i32 6, i32 3, metadata !28, null}
-!28 = metadata !{metadata !"0xb\005\001\000", metadata !51, metadata !0} ; [ DW_TAG_lexical_block ]
-!29 = metadata !{i32 7, i32 3, metadata !28, null}
-!30 = metadata !{i32 11, i32 42, metadata !6, null}
-!31 = metadata !{i32 11, i32 53, metadata !6, null}
-!32 = metadata !{i32 11, i32 72, metadata !6, null}
-!33 = metadata !{i32 13, i32 3, metadata !34, null}
-!34 = metadata !{metadata !"0xb\0012\001\001", metadata !51, metadata !6} ; [ DW_TAG_lexical_block ]
-!35 = metadata !{i32 14, i32 3, metadata !34, null}
-!36 = metadata !{i32 17, i32 15, metadata !7, null}
-!37 = metadata !{i32 17, i32 28, metadata !7, null}
-!38 = metadata !{i32 19, i32 31, metadata !23, null}
-!39 = metadata !{i32 20, i32 3, metadata !23, null}
-!40 = metadata !{i32 21, i32 3, metadata !23, null}
-!41 = metadata !{i32 4, i32 22, metadata !0, metadata !40}
-!42 = metadata !{i32 4, i32 33, metadata !0, metadata !40}
-!43 = metadata !{i32 4, i32 52, metadata !0, metadata !40}
-!44 = metadata !{i32 6, i32 3, metadata !28, metadata !40}
-!45 = metadata !{i32 22, i32 3, metadata !23, null}
-!46 = metadata !{i32 23, i32 1, metadata !23, null}
-!47 = metadata !{metadata !0, metadata !6, metadata !7}
-!48 = metadata !{metadata !8, metadata !10, metadata !12}
-!49 = metadata !{metadata !14, metadata !15, metadata !16}
-!50 = metadata !{metadata !17, metadata !18, metadata !22}
-!51 = metadata !{metadata !"a.c", metadata !"/private/tmp"}
-!52 = metadata !{i32 0}
-!53 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x2e\00inlineprinter\00inlineprinter\00\005\000\001\000\006\00256\001\005", !51, !1, !3, null, i32 (i8*, float, i8)* @inlineprinter, null, null, !48} ; [ DW_TAG_subprogram ] [line 5] [def] [inlineprinter]
+!1 = !{!"0x29", !51} ; [ DW_TAG_file_type ]
+!2 = !{!"0x11\0012\00clang version 3.0 (trunk 129915)\001\00\000\00\001", !51, !52, !52, !47, null, null} ; [ DW_TAG_compile_unit ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", !51, !1, null, !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !{!5}
+!5 = !{!"0x24\00int\000\0032\0032\000\000\005", null, !2} ; [ DW_TAG_base_type ]
+!6 = !{!"0x2e\00printer\00printer\00\0012\000\001\000\006\00256\001\0012", !51, !1, !3, null, i32 (i8*, float, i8)* @printer, null, null, !49} ; [ DW_TAG_subprogram ] [line 12] [def] [printer]
+!7 = !{!"0x2e\00main\00main\00\0018\000\001\000\006\00256\001\0018", !51, !1, !3, null, i32 (i32, i8**)* @main, null, null, !50} ; [ DW_TAG_subprogram ] [line 18] [def] [main]
+!8 = !{!"0x101\00ptr\0016777220\000", !0, !1, !9} ; [ DW_TAG_arg_variable ]
+!9 = !{!"0xf\00\000\0032\0032\000\000", null, !2, null} ; [ DW_TAG_pointer_type ]
+!10 = !{!"0x101\00val\0033554436\000", !0, !1, !11} ; [ DW_TAG_arg_variable ]
+!11 = !{!"0x24\00float\000\0032\0032\000\000\004", null, !2} ; [ DW_TAG_base_type ]
+!12 = !{!"0x101\00c\0050331652\000", !0, !1, !13} ; [ DW_TAG_arg_variable ]
+!13 = !{!"0x24\00unsigned char\000\008\008\000\000\008", null, !2} ; [ DW_TAG_base_type ]
+!14 = !{!"0x101\00ptr\0016777227\000", !6, !1, !9} ; [ DW_TAG_arg_variable ]
+!15 = !{!"0x101\00val\0033554443\000", !6, !1, !11} ; [ DW_TAG_arg_variable ]
+!16 = !{!"0x101\00c\0050331659\000", !6, !1, !13} ; [ DW_TAG_arg_variable ]
+!17 = !{!"0x101\00argc\0016777233\000", !7, !1, !5} ; [ DW_TAG_arg_variable ]
+!18 = !{!"0x101\00argv\0033554449\000", !7, !1, !19} ; [ DW_TAG_arg_variable ]
+!19 = !{!"0xf\00\000\0032\0032\000\000", null, !2, !20} ; [ DW_TAG_pointer_type ]
+!20 = !{!"0xf\00\000\0032\0032\000\000", null, !2, !21} ; [ DW_TAG_pointer_type ]
+!21 = !{!"0x24\00char\000\008\008\000\000\006", null, !2} ; [ DW_TAG_base_type ]
+!22 = !{!"0x100\00dval\0019\000", !23, !1, !11} ; [ DW_TAG_auto_variable ]
+!23 = !{!"0xb\0018\001\002", !51, !7} ; [ DW_TAG_lexical_block ]
+!24 = !MDLocation(line: 4, column: 22, scope: !0)
+!25 = !MDLocation(line: 4, column: 33, scope: !0)
+!26 = !MDLocation(line: 4, column: 52, scope: !0)
+!27 = !MDLocation(line: 6, column: 3, scope: !28)
+!28 = !{!"0xb\005\001\000", !51, !0} ; [ DW_TAG_lexical_block ]
+!29 = !MDLocation(line: 7, column: 3, scope: !28)
+!30 = !MDLocation(line: 11, column: 42, scope: !6)
+!31 = !MDLocation(line: 11, column: 53, scope: !6)
+!32 = !MDLocation(line: 11, column: 72, scope: !6)
+!33 = !MDLocation(line: 13, column: 3, scope: !34)
+!34 = !{!"0xb\0012\001\001", !51, !6} ; [ DW_TAG_lexical_block ]
+!35 = !MDLocation(line: 14, column: 3, scope: !34)
+!36 = !MDLocation(line: 17, column: 15, scope: !7)
+!37 = !MDLocation(line: 17, column: 28, scope: !7)
+!38 = !MDLocation(line: 19, column: 31, scope: !23)
+!39 = !MDLocation(line: 20, column: 3, scope: !23)
+!40 = !MDLocation(line: 21, column: 3, scope: !23)
+!41 = !MDLocation(line: 4, column: 22, scope: !0, inlinedAt: !40)
+!42 = !MDLocation(line: 4, column: 33, scope: !0, inlinedAt: !40)
+!43 = !MDLocation(line: 4, column: 52, scope: !0, inlinedAt: !40)
+!44 = !MDLocation(line: 6, column: 3, scope: !28, inlinedAt: !40)
+!45 = !MDLocation(line: 22, column: 3, scope: !23)
+!46 = !MDLocation(line: 23, column: 1, scope: !23)
+!47 = !{!0, !6, !7}
+!48 = !{!8, !10, !12}
+!49 = !{!14, !15, !16}
+!50 = !{!17, !18, !22}
+!51 = !{!"a.c", !"/private/tmp"}
+!52 = !{i32 0}
+!53 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/ARM/debug-info-sreg2.ll b/test/CodeGen/ARM/debug-info-sreg2.ll
index 4374b9e..977a6f2 100644
--- a/test/CodeGen/ARM/debug-info-sreg2.ll
+++ b/test/CodeGen/ARM/debug-info-sreg2.ll
@@ -15,7 +15,7 @@ target triple = "thumbv7-apple-macosx10.6.7"
define void @_Z3foov() optsize ssp {
entry:
%call = tail call float @_Z3barv() optsize, !dbg !11
- tail call void @llvm.dbg.value(metadata !{float %call}, i64 0, metadata !5, metadata !{metadata !"0x102"}), !dbg !11
+ tail call void @llvm.dbg.value(metadata float %call, i64 0, metadata !5, metadata !{!"0x102"}), !dbg !11
%call16 = tail call float @_Z2f2v() optsize, !dbg !12
%cmp7 = fcmp olt float %call, %call16, !dbg !12
br i1 %cmp7, label %for.body, label %for.end, !dbg !12
@@ -43,24 +43,24 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!20}
-!0 = metadata !{metadata !"0x11\004\00clang version 3.0 (trunk 130845)\001\00\000\00\001", metadata !18, metadata !19, metadata !19, metadata !16, null, null} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{metadata !"0x2e\00foo\00foo\00_Z3foov\005\000\001\000\006\00256\001\005", metadata !18, metadata !2, metadata !3, null, void ()* @_Z3foov, null, null, metadata !17} ; [ DW_TAG_subprogram ] [line 5] [def] [foo]
-!2 = metadata !{metadata !"0x29", metadata !18} ; [ DW_TAG_file_type ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !18, metadata !2, null, metadata !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{null}
-!5 = metadata !{metadata !"0x100\00k\006\000", metadata !6, metadata !2, metadata !7} ; [ DW_TAG_auto_variable ]
-!6 = metadata !{metadata !"0xb\005\0012\000", metadata !18, metadata !1} ; [ DW_TAG_lexical_block ]
-!7 = metadata !{metadata !"0x24\00float\000\0032\0032\000\000\004", null, metadata !0} ; [ DW_TAG_base_type ]
-!8 = metadata !{metadata !"0x100\00y\008\000", metadata !9, metadata !2, metadata !7} ; [ DW_TAG_auto_variable ]
-!9 = metadata !{metadata !"0xb\007\0025\002", metadata !18, metadata !10} ; [ DW_TAG_lexical_block ]
-!10 = metadata !{metadata !"0xb\007\003\001", metadata !18, metadata !6} ; [ DW_TAG_lexical_block ]
-!11 = metadata !{i32 6, i32 18, metadata !6, null}
-!12 = metadata !{i32 7, i32 3, metadata !6, null}
-!13 = metadata !{i32 8, i32 20, metadata !9, null}
-!14 = metadata !{i32 7, i32 20, metadata !10, null}
-!15 = metadata !{i32 10, i32 1, metadata !6, null}
-!16 = metadata !{metadata !1}
-!17 = metadata !{metadata !5, metadata !8}
-!18 = metadata !{metadata !"k.cc", metadata !"/private/tmp"}
-!19 = metadata !{i32 0}
-!20 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x11\004\00clang version 3.0 (trunk 130845)\001\00\000\00\001", !18, !19, !19, !16, null, null} ; [ DW_TAG_compile_unit ]
+!1 = !{!"0x2e\00foo\00foo\00_Z3foov\005\000\001\000\006\00256\001\005", !18, !2, !3, null, void ()* @_Z3foov, null, null, !17} ; [ DW_TAG_subprogram ] [line 5] [def] [foo]
+!2 = !{!"0x29", !18} ; [ DW_TAG_file_type ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", !18, !2, null, !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !{null}
+!5 = !{!"0x100\00k\006\000", !6, !2, !7} ; [ DW_TAG_auto_variable ]
+!6 = !{!"0xb\005\0012\000", !18, !1} ; [ DW_TAG_lexical_block ]
+!7 = !{!"0x24\00float\000\0032\0032\000\000\004", null, !0} ; [ DW_TAG_base_type ]
+!8 = !{!"0x100\00y\008\000", !9, !2, !7} ; [ DW_TAG_auto_variable ]
+!9 = !{!"0xb\007\0025\002", !18, !10} ; [ DW_TAG_lexical_block ]
+!10 = !{!"0xb\007\003\001", !18, !6} ; [ DW_TAG_lexical_block ]
+!11 = !MDLocation(line: 6, column: 18, scope: !6)
+!12 = !MDLocation(line: 7, column: 3, scope: !6)
+!13 = !MDLocation(line: 8, column: 20, scope: !9)
+!14 = !MDLocation(line: 7, column: 20, scope: !10)
+!15 = !MDLocation(line: 10, column: 1, scope: !6)
+!16 = !{!1}
+!17 = !{!5, !8}
+!18 = !{!"k.cc", !"/private/tmp"}
+!19 = !{i32 0}
+!20 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/ARM/debug-segmented-stacks.ll b/test/CodeGen/ARM/debug-segmented-stacks.ll
index 2123fa7..7ea5665 100644
--- a/test/CodeGen/ARM/debug-segmented-stacks.ll
+++ b/test/CodeGen/ARM/debug-segmented-stacks.ll
@@ -39,40 +39,40 @@ define void @test_basic() #0 {
; ARM-linux .cfi_same_value r5
}
-!0 = metadata !{metadata !"0x11\0012\00clang version 3.5 \000\00\000\00\000", metadata !1, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2} ; [ DW_TAG_compile_unit ] [/tmp/var.c] [DW_LANG_C99]
-!1 = metadata !{metadata !"var.c", metadata !"/tmp"}
-!2 = metadata !{}
-!3 = metadata !{metadata !4}
-!4 = metadata !{metadata !"0x2e\00test_basic\00test_basic\00\005\000\001\000\006\00256\000\005", metadata !1, metadata !5, metadata !6, null, void ()* @test_basic, null, null, metadata !2} ; [ DW_TAG_subprogram ] [line 5] [def] [sum]
-!5 = metadata !{metadata !"0x29", metadata !1} ; [ DW_TAG_file_type ] [/tmp/var.c]
-!6 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!7 = metadata !{metadata !8, metadata !8}
-!8 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
-!10 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
-!11 = metadata !{metadata !"clang version 3.5 "}
-!12 = metadata !{metadata !"0x101\00count\0016777221\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_arg_variable ] [count] [line 5]
-!13 = metadata !{i32 5, i32 0, metadata !4, null}
-!14 = metadata !{metadata !"0x100\00vl\006\000", metadata !4, metadata !5, metadata !15} ; [ DW_TAG_auto_variable ] [vl] [line 6]
-!15 = metadata !{metadata !"0x16\00va_list\0030\000\000\000\000", metadata !16, null, metadata !17} ; [ DW_TAG_typedef ] [va_list] [line 30, size 0, align 0, offset 0] [from __builtin_va_list]
-!16 = metadata !{metadata !"/linux-x86_64-high/gcc_4.7.2/dbg/llvm/bin/../lib/clang/3.5/include/stdarg.h", metadata !"/tmp"}
-!17 = metadata !{metadata !"0x16\00__builtin_va_list\006\000\000\000\000", metadata !1, null, metadata !18} ; [ DW_TAG_typedef ] [__builtin_va_list] [line 6, size 0, align 0, offset 0] [from __va_list]
-!18 = metadata !{metadata !"0x13\00__va_list\006\0032\0032\000\000\000", metadata !1, null, null, metadata !19, null, null, null} ; [ DW_TAG_structure_type ] [__va_list] [line 6, size 32, align 32, offset 0] [def] [from ]
-!19 = metadata !{metadata !20}
-!20 = metadata !{metadata !"0xd\00__ap\006\0032\0032\000\000", metadata !1, metadata !18, metadata !21} ; [ DW_TAG_member ] [__ap] [line 6, size 32, align 32, offset 0] [from ]
-!21 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, null, null} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from ]
-!22 = metadata !{i32 6, i32 0, metadata !4, null}
-!23 = metadata !{i32 7, i32 0, metadata !4, null}
-!24 = metadata !{metadata !"0x100\00test_basic\008\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [sum] [line 8]
-!25 = metadata !{i32 8, i32 0, metadata !4, null}
-!26 = metadata !{metadata !"0x100\00i\009\000", metadata !27, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 9]
-!27 = metadata !{metadata !"0xb\009\000\000", metadata !1, metadata !4} ; [ DW_TAG_lexical_block ] [/tmp/var.c]
-!28 = metadata !{i32 9, i32 0, metadata !27, null}
-!29 = metadata !{i32 10, i32 0, metadata !30, null}
-!30 = metadata !{metadata !"0xb\009\000\001", metadata !1, metadata !27} ; [ DW_TAG_lexical_block ] [/tmp/var.c]
-!31 = metadata !{i32 11, i32 0, metadata !30, null}
-!32 = metadata !{i32 12, i32 0, metadata !4, null}
-!33 = metadata !{i32 13, i32 0, metadata !4, null}
+!0 = !{!"0x11\0012\00clang version 3.5 \000\00\000\00\000", !1, !2, !2, !3, !2, !2} ; [ DW_TAG_compile_unit ] [/tmp/var.c] [DW_LANG_C99]
+!1 = !{!"var.c", !"/tmp"}
+!2 = !{}
+!3 = !{!4}
+!4 = !{!"0x2e\00test_basic\00test_basic\00\005\000\001\000\006\00256\000\005", !1, !5, !6, null, void ()* @test_basic, null, null, !2} ; [ DW_TAG_subprogram ] [line 5] [def] [sum]
+!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/tmp/var.c]
+!6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = !{!8, !8}
+!8 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = !{i32 2, !"Dwarf Version", i32 4}
+!10 = !{i32 1, !"Debug Info Version", i32 2}
+!11 = !{!"clang version 3.5 "}
+!12 = !{!"0x101\00count\0016777221\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [count] [line 5]
+!13 = !MDLocation(line: 5, scope: !4)
+!14 = !{!"0x100\00vl\006\000", !4, !5, !15} ; [ DW_TAG_auto_variable ] [vl] [line 6]
+!15 = !{!"0x16\00va_list\0030\000\000\000\000", !16, null, !17} ; [ DW_TAG_typedef ] [va_list] [line 30, size 0, align 0, offset 0] [from __builtin_va_list]
+!16 = !{!"/linux-x86_64-high/gcc_4.7.2/dbg/llvm/bin/../lib/clang/3.5/include/stdarg.h", !"/tmp"}
+!17 = !{!"0x16\00__builtin_va_list\006\000\000\000\000", !1, null, !18} ; [ DW_TAG_typedef ] [__builtin_va_list] [line 6, size 0, align 0, offset 0] [from __va_list]
+!18 = !{!"0x13\00__va_list\006\0032\0032\000\000\000", !1, null, null, !19, null, null, null} ; [ DW_TAG_structure_type ] [__va_list] [line 6, size 32, align 32, offset 0] [def] [from ]
+!19 = !{!20}
+!20 = !{!"0xd\00__ap\006\0032\0032\000\000", !1, !18, !21} ; [ DW_TAG_member ] [__ap] [line 6, size 32, align 32, offset 0] [from ]
+!21 = !{!"0xf\00\000\0032\0032\000\000", null, null, null} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from ]
+!22 = !MDLocation(line: 6, scope: !4)
+!23 = !MDLocation(line: 7, scope: !4)
+!24 = !{!"0x100\00test_basic\008\000", !4, !5, !8} ; [ DW_TAG_auto_variable ] [sum] [line 8]
+!25 = !MDLocation(line: 8, scope: !4)
+!26 = !{!"0x100\00i\009\000", !27, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 9]
+!27 = !{!"0xb\009\000\000", !1, !4} ; [ DW_TAG_lexical_block ] [/tmp/var.c]
+!28 = !MDLocation(line: 9, scope: !27)
+!29 = !MDLocation(line: 10, scope: !30)
+!30 = !{!"0xb\009\000\001", !1, !27} ; [ DW_TAG_lexical_block ] [/tmp/var.c]
+!31 = !MDLocation(line: 11, scope: !30)
+!32 = !MDLocation(line: 12, scope: !4)
+!33 = !MDLocation(line: 13, scope: !4)
; Just to prevent the alloca from being optimized away
declare void @dummy_use(i32*, i32)
diff --git a/test/CodeGen/ARM/dyn-stackalloc.ll b/test/CodeGen/ARM/dyn-stackalloc.ll
index 4ac5b8a..05c143d 100644
--- a/test/CodeGen/ARM/dyn-stackalloc.ll
+++ b/test/CodeGen/ARM/dyn-stackalloc.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=arm-eabi %s -o /dev/null
+; RUN: llc -mcpu=generic -mtriple=arm-eabi -verify-machineinstrs < %s | FileCheck %s
%struct.comment = type { i8**, i32*, i32, i8* }
%struct.info = type { i32, i32, i32, i32, i32, i32, i32, i8* }
@@ -7,6 +7,18 @@
@str215 = external global [2 x i8]
define void @t1(%struct.state* %v) {
+
+; Make sure we generate:
+; sub sp, sp, r1
+; instead of:
+; sub r1, sp, r1
+; mov sp, r1
+
+; CHECK-LABEL: @t1
+; CHECK: bic [[REG1:r[0-9]+]],
+; CHECK-NOT: sub r{{[0-9]+}}, sp, [[REG1]]
+; CHECK: sub sp, sp, [[REG1]]
+
%tmp6 = load i32* null
%tmp8 = alloca float, i32 %tmp6
store i32 1, i32* null
diff --git a/test/CodeGen/ARM/emit-big-cst.ll b/test/CodeGen/ARM/emit-big-cst.ll
index 9a3367d..01d789c 100644
--- a/test/CodeGen/ARM/emit-big-cst.ll
+++ b/test/CodeGen/ARM/emit-big-cst.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumbv7-unknown-unknown < %s | FileCheck %s
+; RUN: llc -mtriple=thumbv7-unknown-unknown -target-abi apcs < %s | FileCheck %s
; Check assembly printing of odd constants.
; CHECK: bigCst:
diff --git a/test/CodeGen/ARM/fold-stack-adjust.ll b/test/CodeGen/ARM/fold-stack-adjust.ll
index 514d4a9..c5ff82e 100644
--- a/test/CodeGen/ARM/fold-stack-adjust.ll
+++ b/test/CodeGen/ARM/fold-stack-adjust.ll
@@ -71,7 +71,7 @@ define void @check_vfp_fold() minsize {
; CHECK-IOS-LABEL: check_vfp_fold:
; CHECK-IOS: push {r0, r1, r2, r3, r4, r7, lr}
; CHECK-IOS: sub.w r4, sp, #16
-; CHECK-IOS: bic r4, r4, #15
+; CHECK-IOS: bfc r4, #0, #4
; CHECK-IOS: mov sp, r4
; CHECK-IOS: vst1.64 {d8, d9}, [r4:128]
; ...
diff --git a/test/CodeGen/ARM/frame-register.ll b/test/CodeGen/ARM/frame-register.ll
index e6a55bd..b04e376 100644
--- a/test/CodeGen/ARM/frame-register.ll
+++ b/test/CodeGen/ARM/frame-register.ll
@@ -30,9 +30,9 @@ entry:
; CHECK-ARM: push {r11, lr}
; CHECK-ARM: mov r11, sp
-; CHECK-THUMB: push {r4, r6, r7, lr}
-; CHECK-THUMB: add r7, sp, #8
+; CHECK-THUMB: push {r7, lr}
+; CHECK-THUMB: add r7, sp, #0
; CHECK-DARWIN-ARM: push {r7, lr}
-; CHECK-DARWIN-THUMB: push {r4, r7, lr}
+; CHECK-DARWIN-THUMB: push {r7, lr}
diff --git a/test/CodeGen/ARM/ghc-tcreturn-lowered.ll b/test/CodeGen/ARM/ghc-tcreturn-lowered.ll
new file mode 100644
index 0000000..623b422
--- /dev/null
+++ b/test/CodeGen/ARM/ghc-tcreturn-lowered.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple=thumbv7-eabi -o - %s | FileCheck %s
+
+declare ghccc void @g()
+
+define ghccc void @test_direct_tail() {
+; CHECK-LABEL: test_direct_tail:
+; CHECK: b g
+
+ tail call ghccc void @g()
+ ret void
+}
+
+@ind_func = global void()* zeroinitializer
+
+define ghccc void @test_indirect_tail() {
+; CHECK-LABEL: test_indirect_tail:
+; CHECK: bx {{r[0-9]+}}
+ %func = load void()** @ind_func
+ tail call ghccc void()* %func()
+ ret void
+}
diff --git a/test/CodeGen/ARM/global-merge-1.ll b/test/CodeGen/ARM/global-merge-1.ll
index 341597e..e5d4def 100644
--- a/test/CodeGen/ARM/global-merge-1.ll
+++ b/test/CodeGen/ARM/global-merge-1.ll
@@ -78,8 +78,8 @@ attributes #3 = { nounwind }
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"LLVM version 3.4 "}
-!1 = metadata !{metadata !2, metadata !2, i64 0}
-!2 = metadata !{metadata !"int", metadata !3, i64 0}
-!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
-!4 = metadata !{metadata !"Simple C/C++ TBAA"}
+!0 = !{!"LLVM version 3.4 "}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/ARM/globals.ll b/test/CodeGen/ARM/globals.ll
index 3101500..2c599bf 100644
--- a/test/CodeGen/ARM/globals.ll
+++ b/test/CodeGen/ARM/globals.ll
@@ -43,6 +43,7 @@ define i32 @test1() {
; DarwinPIC: LPC0_0:
; DarwinPIC: ldr r0, [pc, r0]
; DarwinPIC: ldr r0, [r0]
+; DarwinPIC-NOT: ldr
; DarwinPIC: bx lr
; DarwinPIC: .align 2
diff --git a/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll b/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll
index 5d8e477..f76fd30 100644
--- a/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll
+++ b/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll
@@ -59,5 +59,5 @@ declare %classL* @_ZN1M1JI1LS1_EcvPS1_Ev(%classM2*)
declare void @_ZN1F10handleMoveEb(%classF*, i1 zeroext)
declare void @_Z3fn1v()
-!0 = metadata !{metadata !"clang version 3.5"}
-!1 = metadata !{metadata !"branch_weights", i32 62, i32 62}
+!0 = !{!"clang version 3.5"}
+!1 = !{!"branch_weights", i32 62, i32 62}
diff --git a/test/CodeGen/ARM/ifcvt-branch-weight.ll b/test/CodeGen/ARM/ifcvt-branch-weight.ll
index a994d3d..2d12a89 100644
--- a/test/CodeGen/ARM/ifcvt-branch-weight.ll
+++ b/test/CodeGen/ARM/ifcvt-branch-weight.ll
@@ -38,5 +38,5 @@ return:
ret i8 1
}
-!0 = metadata !{metadata !"branch_weights", i32 4, i32 12}
-!1 = metadata !{metadata !"branch_weights", i32 8, i32 16}
+!0 = !{!"branch_weights", i32 4, i32 12}
+!1 = !{!"branch_weights", i32 8, i32 16}
diff --git a/test/CodeGen/ARM/inline-diagnostics.ll b/test/CodeGen/ARM/inline-diagnostics.ll
index 7b77da2..0276abf 100644
--- a/test/CodeGen/ARM/inline-diagnostics.ll
+++ b/test/CodeGen/ARM/inline-diagnostics.ll
@@ -13,4 +13,4 @@ define float @inline_func(float %f1, float %f2) #0 {
ret float %1
}
-!1 = metadata !{i32 271, i32 305}
+!1 = !{i32 271, i32 305}
diff --git a/test/CodeGen/ARM/interrupt-attr.ll b/test/CodeGen/ARM/interrupt-attr.ll
index 96d1ee2..c6da09d 100644
--- a/test/CodeGen/ARM/interrupt-attr.ll
+++ b/test/CodeGen/ARM/interrupt-attr.ll
@@ -15,7 +15,7 @@ define arm_aapcscc void @irq_fn() alignstack(8) "interrupt"="IRQ" {
; CHECK-A: push {r0, r1, r2, r3, r10, r11, r12, lr}
; CHECK-A: add r11, sp, #20
; CHECK-A-NOT: sub sp, sp, #{{[0-9]+}}
-; CHECK-A: bic sp, sp, #7
+; CHECK-A: bfc sp, #0, #3
; CHECK-A: bl bar
; CHECK-A: sub sp, r11, #20
; CHECK-A: pop {r0, r1, r2, r3, r10, r11, r12, lr}
@@ -25,7 +25,7 @@ define arm_aapcscc void @irq_fn() alignstack(8) "interrupt"="IRQ" {
; CHECK-A-THUMB: push.w {r0, r1, r2, r3, r4, r7, r12, lr}
; CHECK-A-THUMB: add r7, sp, #20
; CHECK-A-THUMB: mov r4, sp
-; CHECK-A-THUMB: bic r4, r4, #7
+; CHECK-A-THUMB: bfc r4, #0, #3
; CHECK-A-THUMB: bl bar
; CHECK-A-THUMB: sub.w r4, r7, #20
; CHECK-A-THUMB: mov sp, r4
@@ -38,7 +38,7 @@ define arm_aapcscc void @irq_fn() alignstack(8) "interrupt"="IRQ" {
; CHECK-M: push.w {r4, r10, r11, lr}
; CHECK-M: add.w r11, sp, #8
; CHECK-M: mov r4, sp
-; CHECK-M: bic r4, r4, #7
+; CHECK-M: bfc r4, #0, #3
; CHECK-M: mov sp, r4
; CHECK-M: bl _bar
; CHECK-M: sub.w r4, r11, #8
@@ -56,7 +56,7 @@ define arm_aapcscc void @fiq_fn() alignstack(8) "interrupt"="FIQ" {
; 32 to get past r0, r1, ..., r7
; CHECK-A: add r11, sp, #32
; CHECK-A: sub sp, sp, #{{[0-9]+}}
-; CHECK-A: bic sp, sp, #7
+; CHECK-A: bfc sp, #0, #3
; [...]
; 32 must match above
; CHECK-A: sub sp, r11, #32
@@ -75,7 +75,7 @@ define arm_aapcscc void @swi_fn() alignstack(8) "interrupt"="SWI" {
; CHECK-A: push {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}
; CHECK-A: add r11, sp, #44
; CHECK-A: sub sp, sp, #{{[0-9]+}}
-; CHECK-A: bic sp, sp, #7
+; CHECK-A: bfc sp, #0, #3
; [...]
; CHECK-A: sub sp, r11, #44
; CHECK-A: pop {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}
@@ -91,7 +91,7 @@ define arm_aapcscc void @undef_fn() alignstack(8) "interrupt"="UNDEF" {
; CHECK-A: push {r0, r1, r2, r3, r10, r11, r12, lr}
; CHECK-A: add r11, sp, #20
; CHECK-A-NOT: sub sp, sp, #{{[0-9]+}}
-; CHECK-A: bic sp, sp, #7
+; CHECK-A: bfc sp, #0, #3
; [...]
; CHECK-A: sub sp, r11, #20
; CHECK-A: pop {r0, r1, r2, r3, r10, r11, r12, lr}
@@ -106,7 +106,7 @@ define arm_aapcscc void @abort_fn() alignstack(8) "interrupt"="ABORT" {
; CHECK-A: push {r0, r1, r2, r3, r10, r11, r12, lr}
; CHECK-A: add r11, sp, #20
; CHECK-A-NOT: sub sp, sp, #{{[0-9]+}}
-; CHECK-A: bic sp, sp, #7
+; CHECK-A: bfc sp, #0, #3
; [...]
; CHECK-A: sub sp, r11, #20
; CHECK-A: pop {r0, r1, r2, r3, r10, r11, r12, lr}
diff --git a/test/CodeGen/ARM/isel-v8i32-crash.ll b/test/CodeGen/ARM/isel-v8i32-crash.ll
new file mode 100644
index 0000000..0116fe8
--- /dev/null
+++ b/test/CodeGen/ARM/isel-v8i32-crash.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -mtriple=armv7-linux-gnu | FileCheck %s
+
+; Check we don't crash when trying to combine:
+; (d1 = <float 8.000000e+00, float 8.000000e+00, ...>) (power of 2)
+; vmul.f32 d0, d1, d0
+; vcvt.s32.f32 d0, d0
+; into:
+; vcvt.s32.f32 d0, d0, #3
+; when we have a vector length of 8, due to use of v8i32 types.
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+
+; CHECK: func:
+; CHECK: vcvt.s32.f32 q[[R:[0-9]]], q[[R]], #3
+define void @func(i16* nocapture %pb, float* nocapture readonly %pf) #0 {
+entry:
+ %0 = bitcast float* %pf to <8 x float>*
+ %1 = load <8 x float>* %0, align 4
+ %2 = fmul <8 x float> %1, <float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00>
+ %3 = fptosi <8 x float> %2 to <8 x i16>
+ %4 = bitcast i16* %pb to <8 x i16>*
+ store <8 x i16> %3, <8 x i16>* %4, align 2
+ ret void
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/ARM/krait-cpu-div-attribute.ll b/test/CodeGen/ARM/krait-cpu-div-attribute.ll
new file mode 100644
index 0000000..b7a1dcc
--- /dev/null
+++ b/test/CodeGen/ARM/krait-cpu-div-attribute.ll
@@ -0,0 +1,36 @@
+; Tests the genration of ".arch_extension" attribute for hardware
+; division on krait CPU. For now, krait is recognized as "cortex-a9" + hwdiv
+; Also, tests for the hwdiv instruction on krait CPU
+
+; check for arch_extension/cpu directive
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=krait | FileCheck %s --check-prefix=DIV_EXTENSION
+; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi -mcpu=krait | FileCheck %s --check-prefix=DIV_EXTENSION
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 | FileCheck %s --check-prefix=NODIV_KRAIT
+; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi -mcpu=cortex-a9 | FileCheck %s --check-prefix=NODIV_KRAIT
+; RUN: llc < %s -mcpu=krait -mattr=-hwdiv,-hwdiv-arm | FileCheck %s --check-prefix=NODIV_KRAIT
+
+; check if correct instruction is emitted by integrated assembler
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=krait -filetype=obj | llvm-objdump -mcpu=krait -triple armv7-linux-gnueabi -d - | FileCheck %s --check-prefix=HWDIV
+; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi -mcpu=krait -filetype=obj | llvm-objdump -mcpu=krait -triple thumbv7-linux-gnueabi -d - | FileCheck %s --check-prefix=HWDIV
+
+; arch_extension attribute
+; DIV_EXTENSION: .cpu cortex-a9
+; DIV_EXTENSION: .arch_extension idiv
+; NODIV_KRAIT-NOT: .arch_extension idiv
+; HWDIV: sdiv
+
+define i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ %a = alloca i32, align 4
+ %b = alloca i32, align 4
+ %c = alloca i32, align 4
+ store i32 0, i32* %retval
+ store volatile i32 100, i32* %b, align 4
+ store volatile i32 32, i32* %c, align 4
+ %0 = load volatile i32* %b, align 4
+ %1 = load volatile i32* %c, align 4
+ %div = sdiv i32 %0, %1
+ store volatile i32 %div, i32* %a, align 4
+ ret i32 0
+}
diff --git a/test/CodeGen/ARM/longMAC.ll b/test/CodeGen/ARM/longMAC.ll
index fed6ec0..3f30fd4 100644
--- a/test/CodeGen/ARM/longMAC.ll
+++ b/test/CodeGen/ARM/longMAC.ll
@@ -75,3 +75,44 @@ define i64 @MACLongTest5(i64 %c, i32 %a, i32 %b) {
%add = add i64 %mul, %c
ret i64 %add
}
+
+define i64 @MACLongTest6(i32 %a, i32 %b, i32 %c, i32 %d) {
+;CHECK-LABEL: MACLongTest6:
+;CHECK: smull r12, lr, r1, r0
+;CHECK: smlal r12, lr, r3, r2
+ %conv = sext i32 %a to i64
+ %conv1 = sext i32 %b to i64
+ %mul = mul nsw i64 %conv1, %conv
+ %conv2 = sext i32 %c to i64
+ %conv3 = sext i32 %d to i64
+ %mul4 = mul nsw i64 %conv3, %conv2
+ %add = add nsw i64 %mul4, %mul
+ ret i64 %add
+}
+
+define i64 @MACLongTest7(i64 %acc, i32 %lhs, i32 %rhs) {
+;CHECK-LABEL: MACLongTest7:
+;CHECK-NOT: smlal
+ %conv = sext i32 %lhs to i64
+ %conv1 = sext i32 %rhs to i64
+ %mul = mul nsw i64 %conv1, %conv
+ %shl = shl i64 %mul, 32
+ %shr = lshr i64 %mul, 32
+ %or = or i64 %shl, %shr
+ %add = add i64 %or, %acc
+ ret i64 %add
+}
+
+define i64 @MACLongTest8(i64 %acc, i32 %lhs, i32 %rhs) {
+;CHECK-LABEL: MACLongTest8:
+;CHECK-NOT: smlal
+ %conv = zext i32 %lhs to i64
+ %conv1 = zext i32 %rhs to i64
+ %mul = mul nuw i64 %conv1, %conv
+ %and = and i64 %mul, 4294967295
+ %shl = shl i64 %mul, 32
+ %or = or i64 %and, %shl
+ %add = add i64 %or, %acc
+ ret i64 %add
+}
+
diff --git a/test/CodeGen/ARM/memcpy-inline.ll b/test/CodeGen/ARM/memcpy-inline.ll
index 84ce4a7..33ac4e1 100644
--- a/test/CodeGen/ARM/memcpy-inline.ll
+++ b/test/CodeGen/ARM/memcpy-inline.ll
@@ -46,10 +46,8 @@ entry:
; CHECK: movw [[REG2:r[0-9]+]], #16716
; CHECK: movt [[REG2:r[0-9]+]], #72
; CHECK: str [[REG2]], [r0, #32]
-; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
-; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
-; CHECK: adds r0, #16
-; CHECK: adds r1, #16
+; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]!
+; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]!
; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8]* @.str2, i64 0, i64 0), i64 36, i32 1, i1 false)
@@ -59,10 +57,8 @@ entry:
define void @t3(i8* nocapture %C) nounwind {
entry:
; CHECK-LABEL: t3:
-; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
-; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
-; CHECK: adds r0, #16
-; CHECK: adds r1, #16
+; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]!
+; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]!
; CHECK: vld1.8 {d{{[0-9]+}}}, [r1]
; CHECK: vst1.8 {d{{[0-9]+}}}, [r0]
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8]* @.str3, i64 0, i64 0), i64 24, i32 1, i1 false)
@@ -73,7 +69,8 @@ define void @t4(i8* nocapture %C) nounwind {
entry:
; CHECK-LABEL: t4:
; CHECK: vld1.8 {[[REG3:d[0-9]+]], [[REG4:d[0-9]+]]}, [r1]
-; CHECK: vst1.8 {[[REG3]], [[REG4]]}, [r0]
+; CHECK: vst1.8 {[[REG3]], [[REG4]]}, [r0]!
+; CHECK: strh [[REG5:r[0-9]+]], [r0]
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8]* @.str4, i64 0, i64 0), i64 18, i32 1, i1 false)
ret void
}
diff --git a/test/CodeGen/ARM/metadata-default.ll b/test/CodeGen/ARM/metadata-default.ll
index f6a3fe2..f8e40b4 100644
--- a/test/CodeGen/ARM/metadata-default.ll
+++ b/test/CodeGen/ARM/metadata-default.ll
@@ -9,8 +9,8 @@ define i32 @f(i64 %z) {
!llvm.module.flags = !{!0, !1}
-!0 = metadata !{i32 1, metadata !"wchar_size", i32 4}
-!1 = metadata !{i32 1, metadata !"min_enum_size", i32 4}
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 1, !"min_enum_size", i32 4}
; CHECK: .eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
; CHECK: .eabi_attribute 26, 2 @ Tag_ABI_enum_size
diff --git a/test/CodeGen/ARM/metadata-short-enums.ll b/test/CodeGen/ARM/metadata-short-enums.ll
index bccd332..2f1586d 100644
--- a/test/CodeGen/ARM/metadata-short-enums.ll
+++ b/test/CodeGen/ARM/metadata-short-enums.ll
@@ -9,8 +9,8 @@ define i32 @f(i64 %z) {
!llvm.module.flags = !{!0, !1}
-!0 = metadata !{i32 1, metadata !"wchar_size", i32 4}
-!1 = metadata !{i32 1, metadata !"min_enum_size", i32 1}
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 1, !"min_enum_size", i32 1}
; CHECK: .eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
; CHECK: .eabi_attribute 26, 1 @ Tag_ABI_enum_size
diff --git a/test/CodeGen/ARM/metadata-short-wchar.ll b/test/CodeGen/ARM/metadata-short-wchar.ll
index 6de9bf1..b7f5833 100644
--- a/test/CodeGen/ARM/metadata-short-wchar.ll
+++ b/test/CodeGen/ARM/metadata-short-wchar.ll
@@ -9,8 +9,8 @@ define i32 @f(i64 %z) {
!llvm.module.flags = !{!0, !1}
-!0 = metadata !{i32 1, metadata !"wchar_size", i32 2}
-!1 = metadata !{i32 1, metadata !"min_enum_size", i32 4}
+!0 = !{i32 1, !"wchar_size", i32 2}
+!1 = !{i32 1, !"min_enum_size", i32 4}
; CHECK: .eabi_attribute 18, 2 @ Tag_ABI_PCS_wchar_t
; CHECK: .eabi_attribute 26, 2 @ Tag_ABI_enum_size
diff --git a/test/CodeGen/ARM/named-reg-alloc.ll b/test/CodeGen/ARM/named-reg-alloc.ll
index 3c27d22..380cf39 100644
--- a/test/CodeGen/ARM/named-reg-alloc.ll
+++ b/test/CodeGen/ARM/named-reg-alloc.ll
@@ -11,4 +11,4 @@ entry:
declare i32 @llvm.read_register.i32(metadata) nounwind
-!0 = metadata !{metadata !"r5\00"}
+!0 = !{!"r5\00"}
diff --git a/test/CodeGen/ARM/named-reg-notareg.ll b/test/CodeGen/ARM/named-reg-notareg.ll
index af38b60..3ac03f4 100644
--- a/test/CodeGen/ARM/named-reg-notareg.ll
+++ b/test/CodeGen/ARM/named-reg-notareg.ll
@@ -10,4 +10,4 @@ entry:
declare i32 @llvm.read_register.i32(metadata) nounwind
-!0 = metadata !{metadata !"notareg\00"}
+!0 = !{!"notareg\00"}
diff --git a/test/CodeGen/ARM/none-macho-v4t.ll b/test/CodeGen/ARM/none-macho-v4t.ll
index 4c6e68e..b6018de 100644
--- a/test/CodeGen/ARM/none-macho-v4t.ll
+++ b/test/CodeGen/ARM/none-macho-v4t.ll
@@ -11,11 +11,15 @@ define void @test_call() {
; CHECK: [[PC_LABEL:LPC[0-9]+_[0-9]+]]:
; CHECK-NEXT: add r[[CALLEE_STUB]], pc
; CHECK: ldr [[CALLEE:r[0-9]+]], [r[[CALLEE_STUB]]]
-; CHECK: mov lr, pc
-; CHECK: bx [[CALLEE]]
+; CHECK-NOT: mov lr, pc
+; CHECK: bl [[INDIRECT_PAD:Ltmp[0-9]+]]
; CHECK: [[LITPOOL]]:
; CHECK-NEXT: .long L_callee$non_lazy_ptr-([[PC_LABEL]]+4)
+
+; CHECK: [[INDIRECT_PAD]]:
+; CHECK: bx [[CALLEE]]
+
call void @callee()
ret void
}
diff --git a/test/CodeGen/ARM/null-streamer.ll b/test/CodeGen/ARM/null-streamer.ll
index 350c45e..19ad22a 100644
--- a/test/CodeGen/ARM/null-streamer.ll
+++ b/test/CodeGen/ARM/null-streamer.ll
@@ -5,3 +5,5 @@ define i32 @main() {
entry:
ret i32 0
}
+
+module asm ".fnstart"
diff --git a/test/CodeGen/ARM/odr_comdat.ll b/test/CodeGen/ARM/odr_comdat.ll
deleted file mode 100644
index e28b578..0000000
--- a/test/CodeGen/ARM/odr_comdat.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s -check-prefix=ARMGNUEABI
-
-; Checking that a comdat group gets generated correctly for a static member
-; of instantiated C++ templates.
-; see http://sourcery.mentor.com/public/cxx-abi/abi.html#vague-itemplate
-; section 5.2.6 Instantiated templates
-; "Any static member data object is emitted in a COMDAT identified by its mangled
-; name, in any object file with a reference to its name symbol."
-
-; Case 1: variable is not explicitly initialized, and ends up in a .bss section
-; ARMGNUEABI: .section .bss._ZN1CIiE1iE,"aGw",%nobits,_ZN1CIiE1iE,comdat
-@_ZN1CIiE1iE = weak_odr global i32 0, align 4
-
-; Case 2: variable is explicitly initialized, and ends up in a .data section
-; ARMGNUEABI: .section .data._ZN1CIiE1jE,"aGw",%progbits,_ZN1CIiE1jE,comdat
-@_ZN1CIiE1jE = weak_odr global i32 12, align 4
diff --git a/test/CodeGen/ARM/out-of-registers.ll b/test/CodeGen/ARM/out-of-registers.ll
index 790e416..a83923d 100644
--- a/test/CodeGen/ARM/out-of-registers.ll
+++ b/test/CodeGen/ARM/out-of-registers.ll
@@ -38,5 +38,5 @@ attributes #2 = { nounwind readonly }
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"Snapdragon LLVM ARM Compiler 3.4"}
-!1 = metadata !{metadata !1}
+!0 = !{!"Snapdragon LLVM ARM Compiler 3.4"}
+!1 = !{!1}
diff --git a/test/CodeGen/ARM/section-name.ll b/test/CodeGen/ARM/section-name.ll
index a0aad47..a4c6054 100644
--- a/test/CodeGen/ARM/section-name.ll
+++ b/test/CodeGen/ARM/section-name.ll
@@ -16,7 +16,7 @@ entry:
ret void
}
-; CHECK: .section .text.test3,"axG",%progbits,test3,comdat
+; CHECK: .text
; CHECK: .weak test3
; CHECK: .type test3,%function
define linkonce_odr void @test3() {
diff --git a/test/CodeGen/ARM/setcc-type-mismatch.ll b/test/CodeGen/ARM/setcc-type-mismatch.ll
new file mode 100644
index 0000000..2cfdba1
--- /dev/null
+++ b/test/CodeGen/ARM/setcc-type-mismatch.ll
@@ -0,0 +1,11 @@
+; RUN: llc -mtriple=armv7-linux-gnueabihf %s -o - | FileCheck %s
+
+define void @test_mismatched_setcc(<4 x i22> %l, <4 x i22> %r, <4 x i1>* %addr) {
+; CHECK-LABEL: test_mismatched_setcc:
+; CHECK: vceq.i32 [[CMP128:q[0-9]+]], {{q[0-9]+}}, {{q[0-9]+}}
+; CHECK: vmovn.i32 {{d[0-9]+}}, [[CMP128]]
+
+ %tst = icmp eq <4 x i22> %l, %r
+ store <4 x i1> %tst, <4 x i1>* %addr
+ ret void
+}
diff --git a/test/CodeGen/ARM/sjlj-prepare-critical-edge.ll b/test/CodeGen/ARM/sjlj-prepare-critical-edge.ll
index d8241d0..a7bc22f 100644
--- a/test/CodeGen/ARM/sjlj-prepare-critical-edge.ll
+++ b/test/CodeGen/ARM/sjlj-prepare-critical-edge.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O1 -mtriple thumbv7-apple-ios6
+; RUN: llc < %s -O1 -mtriple thumbv7-apple-ios6 | FileCheck %s
; Just make sure no one tries to make the assumption that the normal edge of an
; invoke is never a critical edge. Previously, this code would assert.
@@ -65,3 +65,129 @@ declare i32 @__gxx_personality_sj0(...)
declare void @release(i8*)
declare void @terminatev()
+
+; Make sure that the instruction DemoteRegToStack inserts to reload
+; %call.i.i.i14.i.i follows the instruction that saves the value to the stack in
+; basic block %entry.do.body.i.i.i_crit_edge.
+; Previously, DemoteRegToStack would insert a load instruction into the entry
+; block to reload %call.i.i.i14.i.i before the phi instruction (%0) in block
+; %do.body.i.i.i.
+
+; CHECK-LABEL: __Z4foo1c:
+; CHECK: blx __Znwm
+; CHECK: {{.*}}@ %entry.do.body.i.i.i_crit_edge
+; CHECK: str r0, [sp, [[OFFSET:#[0-9]+]]]
+; CHECK: ldr [[R0:r[0-9]+]], [sp, [[OFFSET]]]
+; CHECK: {{.*}}@ %do.body.i.i.i
+; CHECK: cmp [[R0]], #0
+
+%"class.std::__1::basic_string" = type { %"class.std::__1::__compressed_pair" }
+%"class.std::__1::__compressed_pair" = type { %"class.std::__1::__libcpp_compressed_pair_imp" }
+%"class.std::__1::__libcpp_compressed_pair_imp" = type { %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep" }
+%"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__rep" = type { %union.anon }
+%union.anon = type { %"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long" }
+%"struct.std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >::__long" = type { i32, i32, i8* }
+
+@.str = private unnamed_addr constant [12 x i8] c"some_string\00", align 1
+
+define void @_Z4foo1c(i8 signext %a) {
+entry:
+ %s1 = alloca %"class.std::__1::basic_string", align 4
+ call void @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6__initEPKcm(%"class.std::__1::basic_string"* %s1, i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 11)
+ %call.i.i.i14.i.i = invoke noalias i8* @_Znwm(i32 1024)
+ to label %do.body.i.i.i unwind label %lpad.body
+
+do.body.i.i.i: ; preds = %entry, %_ZNSt3__116allocator_traitsINS_9allocatorIcEEE9constructIccEEvRS2_PT_RKT0_.exit.i.i.i
+ %lsr.iv = phi i32 [ %lsr.iv.next, %_ZNSt3__116allocator_traitsINS_9allocatorIcEEE9constructIccEEvRS2_PT_RKT0_.exit.i.i.i ], [ -1024, %entry ]
+ %0 = phi i8* [ %incdec.ptr.i.i.i, %_ZNSt3__116allocator_traitsINS_9allocatorIcEEE9constructIccEEvRS2_PT_RKT0_.exit.i.i.i ], [ %call.i.i.i14.i.i, %entry ]
+ %new.isnull.i.i.i.i = icmp eq i8* %0, null
+ br i1 %new.isnull.i.i.i.i, label %_ZNSt3__116allocator_traitsINS_9allocatorIcEEE9constructIccEEvRS2_PT_RKT0_.exit.i.i.i, label %new.notnull.i.i.i.i
+
+new.notnull.i.i.i.i: ; preds = %do.body.i.i.i
+ store i8 %a, i8* %0, align 1
+ br label %_ZNSt3__116allocator_traitsINS_9allocatorIcEEE9constructIccEEvRS2_PT_RKT0_.exit.i.i.i
+
+_ZNSt3__116allocator_traitsINS_9allocatorIcEEE9constructIccEEvRS2_PT_RKT0_.exit.i.i.i: ; preds = %new.notnull.i.i.i.i, %do.body.i.i.i
+ %1 = phi i8* [ null, %do.body.i.i.i ], [ %0, %new.notnull.i.i.i.i ]
+ %incdec.ptr.i.i.i = getelementptr inbounds i8* %1, i32 1
+ %lsr.iv.next = add i32 %lsr.iv, 1
+ %cmp.i16.i.i = icmp eq i32 %lsr.iv.next, 0
+ br i1 %cmp.i16.i.i, label %invoke.cont, label %do.body.i.i.i
+
+invoke.cont: ; preds = %_ZNSt3__116allocator_traitsINS_9allocatorIcEEE9constructIccEEvRS2_PT_RKT0_.exit.i.i.i
+ invoke void @_Z4foo2Pci(i8* %call.i.i.i14.i.i, i32 1024)
+ to label %invoke.cont5 unwind label %lpad2
+
+invoke.cont5: ; preds = %invoke.cont
+ %cmp.i.i.i15 = icmp eq i8* %call.i.i.i14.i.i, null
+ br i1 %cmp.i.i.i15, label %invoke.cont6, label %_ZNSt3__113__vector_baseIcNS_9allocatorIcEEE5clearEv.exit.i.i.i19
+
+_ZNSt3__113__vector_baseIcNS_9allocatorIcEEE5clearEv.exit.i.i.i19: ; preds = %invoke.cont5
+ call void @_ZdlPv(i8* %call.i.i.i14.i.i)
+ br label %invoke.cont6
+
+invoke.cont6: ; preds = %_ZNSt3__113__vector_baseIcNS_9allocatorIcEEE5clearEv.exit.i.i.i19, %invoke.cont5
+ %call10 = call %"class.std::__1::basic_string"* @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEED1Ev(%"class.std::__1::basic_string"* %s1)
+ ret void
+
+lpad.body: ; preds = %entry
+ %2 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*)
+ cleanup
+ %3 = extractvalue { i8*, i32 } %2, 0
+ %4 = extractvalue { i8*, i32 } %2, 1
+ br label %ehcleanup
+
+lpad2: ; preds = %invoke.cont
+ %5 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*)
+ cleanup
+ %6 = extractvalue { i8*, i32 } %5, 0
+ %7 = extractvalue { i8*, i32 } %5, 1
+ %cmp.i.i.i21 = icmp eq i8* %call.i.i.i14.i.i, null
+ br i1 %cmp.i.i.i21, label %ehcleanup, label %_ZNSt3__113__vector_baseIcNS_9allocatorIcEEE5clearEv.exit.i.i.i26
+
+_ZNSt3__113__vector_baseIcNS_9allocatorIcEEE5clearEv.exit.i.i.i26: ; preds = %lpad2
+ call void @_ZdlPv(i8* %call.i.i.i14.i.i)
+ br label %ehcleanup
+
+ehcleanup: ; preds = %_ZNSt3__113__vector_baseIcNS_9allocatorIcEEE5clearEv.exit.i.i.i26, %lpad2, %lpad.body
+ %exn.slot.0 = phi i8* [ %3, %lpad.body ], [ %6, %lpad2 ], [ %6, %_ZNSt3__113__vector_baseIcNS_9allocatorIcEEE5clearEv.exit.i.i.i26 ]
+ %ehselector.slot.0 = phi i32 [ %4, %lpad.body ], [ %7, %lpad2 ], [ %7, %_ZNSt3__113__vector_baseIcNS_9allocatorIcEEE5clearEv.exit.i.i.i26 ]
+ %call12 = invoke %"class.std::__1::basic_string"* @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEED1Ev(%"class.std::__1::basic_string"* %s1)
+ to label %eh.resume unwind label %terminate.lpad
+
+eh.resume: ; preds = %ehcleanup
+ %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn.slot.0, 0
+ %lpad.val13 = insertvalue { i8*, i32 } %lpad.val, i32 %ehselector.slot.0, 1
+ resume { i8*, i32 } %lpad.val13
+
+terminate.lpad: ; preds = %ehcleanup
+ %8 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*)
+ catch i8* null
+ %9 = extractvalue { i8*, i32 } %8, 0
+ call void @__clang_call_terminate(i8* %9)
+ unreachable
+}
+
+declare void @_Z4foo2Pci(i8*, i32)
+
+define linkonce_odr hidden void @__clang_call_terminate(i8*) {
+ %2 = tail call i8* @__cxa_begin_catch(i8* %0)
+ tail call void @_ZSt9terminatev()
+ unreachable
+}
+
+declare i8* @__cxa_begin_catch(i8*)
+declare void @_ZSt9terminatev()
+declare %"class.std::__1::basic_string"* @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEED1Ev(%"class.std::__1::basic_string"* returned)
+declare void @_ZdlPv(i8*) #3
+declare noalias i8* @_Znwm(i32)
+declare void @_ZNSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEE6__initEPKcm(%"class.std::__1::basic_string"*, i8*, i32)
+declare void @_Unwind_SjLj_Register({ i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }*)
+declare void @_Unwind_SjLj_Unregister({ i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }*)
+declare i8* @llvm.frameaddress(i32)
+declare i8* @llvm.stacksave()
+declare void @llvm.stackrestore(i8*)
+declare i32 @llvm.eh.sjlj.setjmp(i8*)
+declare i8* @llvm.eh.sjlj.lsda()
+declare void @llvm.eh.sjlj.callsite(i32)
+declare void @llvm.eh.sjlj.functioncontext(i8*)
diff --git a/test/CodeGen/ARM/spill-q.ll b/test/CodeGen/ARM/spill-q.ll
index 4fa97ea..425fc12 100644
--- a/test/CodeGen/ARM/spill-q.ll
+++ b/test/CodeGen/ARM/spill-q.ll
@@ -11,7 +11,7 @@ declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly
define void @aaa(%quuz* %this, i8* %block) {
; CHECK-LABEL: aaa:
-; CHECK: bic {{.*}}, #15
+; CHECK: bfc {{.*}}, #0, #4
; CHECK: vst1.64 {{.*}}sp:128
; CHECK: vld1.64 {{.*}}sp:128
entry:
diff --git a/test/CodeGen/ARM/stack-alignment.ll b/test/CodeGen/ARM/stack-alignment.ll
new file mode 100644
index 0000000..153f92e
--- /dev/null
+++ b/test/CodeGen/ARM/stack-alignment.ll
@@ -0,0 +1,164 @@
+; RUN: llc -verify-machineinstrs < %s -mtriple=armv4t | FileCheck %s -check-prefix=CHECK-v4A32
+; RUN: llc -verify-machineinstrs < %s -mtriple=armv7a | FileCheck %s -check-prefix=CHECK-v7A32
+; RUN: llc -verify-machineinstrs < %s -mtriple=thumbv7a | FileCheck %s -check-prefix=CHECK-THUMB2
+; FIXME: There are no tests for Thumb1 since dynamic stack alignment is not supported for
+; Thumb1.
+
+define i32 @f_bic_can_be_used_align() nounwind {
+entry:
+; CHECK-LABEL: f_bic_can_be_used_align:
+; CHECK-v7A32: bfc sp, #0, #8
+; CHECK-v4A32: bic sp, sp, #255
+; CHECK-THUMB2: mov r4, sp
+; CHECK-THUMB2-NEXT: bfc r4, #0, #8
+; CHECK-THUMB2-NEXT: mov sp, r4
+ %x = alloca i32, align 256
+ store volatile i32 0, i32* %x, align 256
+ ret i32 0
+}
+
+define i32 @f_too_large_for_bic_align() nounwind {
+entry:
+; CHECK-LABEL: f_too_large_for_bic_align:
+; CHECK-v7A32: bfc sp, #0, #9
+; CHECK-v4A32: lsr sp, sp, #9
+; CHECK-v4A32: lsl sp, sp, #9
+; CHECK-THUMB2: mov r4, sp
+; CHECK-THUMB2-NEXT: bfc r4, #0, #9
+; CHECK-THUMB2-NEXT: mov sp, r4
+ %x = alloca i32, align 512
+ store volatile i32 0, i32* %x, align 512
+ ret i32 0
+}
+
+define i8* @f_alignedDPRCS2Spills(double* %d) #0 {
+entry:
+; CHECK-LABEL: f_too_large_for_bic_align:
+; CHECK-v7A32: bfc sp, #0, #12
+; CHECK-v4A32: lsr sp, sp, #12
+; CHECK-v4A32: lsl sp, sp, #12
+; CHECK-THUMB2: bfc r4, #0, #12
+; CHECK-THUMB2-NEXT: mov sp, r4
+ %a = alloca i8, align 4096
+ %0 = load double* %d, align 4
+ %arrayidx1 = getelementptr inbounds double* %d, i32 1
+ %1 = load double* %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds double* %d, i32 2
+ %2 = load double* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds double* %d, i32 3
+ %3 = load double* %arrayidx3, align 4
+ %arrayidx4 = getelementptr inbounds double* %d, i32 4
+ %4 = load double* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds double* %d, i32 5
+ %5 = load double* %arrayidx5, align 4
+ %arrayidx6 = getelementptr inbounds double* %d, i32 6
+ %6 = load double* %arrayidx6, align 4
+ %arrayidx7 = getelementptr inbounds double* %d, i32 7
+ %7 = load double* %arrayidx7, align 4
+ %arrayidx8 = getelementptr inbounds double* %d, i32 8
+ %8 = load double* %arrayidx8, align 4
+ %arrayidx9 = getelementptr inbounds double* %d, i32 9
+ %9 = load double* %arrayidx9, align 4
+ %arrayidx10 = getelementptr inbounds double* %d, i32 10
+ %10 = load double* %arrayidx10, align 4
+ %arrayidx11 = getelementptr inbounds double* %d, i32 11
+ %11 = load double* %arrayidx11, align 4
+ %arrayidx12 = getelementptr inbounds double* %d, i32 12
+ %12 = load double* %arrayidx12, align 4
+ %arrayidx13 = getelementptr inbounds double* %d, i32 13
+ %13 = load double* %arrayidx13, align 4
+ %arrayidx14 = getelementptr inbounds double* %d, i32 14
+ %14 = load double* %arrayidx14, align 4
+ %arrayidx15 = getelementptr inbounds double* %d, i32 15
+ %15 = load double* %arrayidx15, align 4
+ %arrayidx16 = getelementptr inbounds double* %d, i32 16
+ %16 = load double* %arrayidx16, align 4
+ %arrayidx17 = getelementptr inbounds double* %d, i32 17
+ %17 = load double* %arrayidx17, align 4
+ %arrayidx18 = getelementptr inbounds double* %d, i32 18
+ %18 = load double* %arrayidx18, align 4
+ %arrayidx19 = getelementptr inbounds double* %d, i32 19
+ %19 = load double* %arrayidx19, align 4
+ %arrayidx20 = getelementptr inbounds double* %d, i32 20
+ %20 = load double* %arrayidx20, align 4
+ %arrayidx21 = getelementptr inbounds double* %d, i32 21
+ %21 = load double* %arrayidx21, align 4
+ %arrayidx22 = getelementptr inbounds double* %d, i32 22
+ %22 = load double* %arrayidx22, align 4
+ %arrayidx23 = getelementptr inbounds double* %d, i32 23
+ %23 = load double* %arrayidx23, align 4
+ %arrayidx24 = getelementptr inbounds double* %d, i32 24
+ %24 = load double* %arrayidx24, align 4
+ %arrayidx25 = getelementptr inbounds double* %d, i32 25
+ %25 = load double* %arrayidx25, align 4
+ %arrayidx26 = getelementptr inbounds double* %d, i32 26
+ %26 = load double* %arrayidx26, align 4
+ %arrayidx27 = getelementptr inbounds double* %d, i32 27
+ %27 = load double* %arrayidx27, align 4
+ %arrayidx28 = getelementptr inbounds double* %d, i32 28
+ %28 = load double* %arrayidx28, align 4
+ %arrayidx29 = getelementptr inbounds double* %d, i32 29
+ %29 = load double* %arrayidx29, align 4
+ %div = fdiv double %29, %28
+ %div30 = fdiv double %div, %27
+ %div31 = fdiv double %div30, %26
+ %div32 = fdiv double %div31, %25
+ %div33 = fdiv double %div32, %24
+ %div34 = fdiv double %div33, %23
+ %div35 = fdiv double %div34, %22
+ %div36 = fdiv double %div35, %21
+ %div37 = fdiv double %div36, %20
+ %div38 = fdiv double %div37, %19
+ %div39 = fdiv double %div38, %18
+ %div40 = fdiv double %div39, %17
+ %div41 = fdiv double %div40, %16
+ %div42 = fdiv double %div41, %15
+ %div43 = fdiv double %div42, %14
+ %div44 = fdiv double %div43, %13
+ %div45 = fdiv double %div44, %12
+ %div46 = fdiv double %div45, %11
+ %div47 = fdiv double %div46, %10
+ %div48 = fdiv double %div47, %9
+ %div49 = fdiv double %div48, %8
+ %div50 = fdiv double %div49, %7
+ %div51 = fdiv double %div50, %6
+ %div52 = fdiv double %div51, %5
+ %div53 = fdiv double %div52, %4
+ %div54 = fdiv double %div53, %3
+ %div55 = fdiv double %div54, %2
+ %div56 = fdiv double %div55, %1
+ %div57 = fdiv double %div56, %0
+ %div58 = fdiv double %0, %1
+ %div59 = fdiv double %div58, %2
+ %div60 = fdiv double %div59, %3
+ %div61 = fdiv double %div60, %4
+ %div62 = fdiv double %div61, %5
+ %div63 = fdiv double %div62, %6
+ %div64 = fdiv double %div63, %7
+ %div65 = fdiv double %div64, %8
+ %div66 = fdiv double %div65, %9
+ %div67 = fdiv double %div66, %10
+ %div68 = fdiv double %div67, %11
+ %div69 = fdiv double %div68, %12
+ %div70 = fdiv double %div69, %13
+ %div71 = fdiv double %div70, %14
+ %div72 = fdiv double %div71, %15
+ %div73 = fdiv double %div72, %16
+ %div74 = fdiv double %div73, %17
+ %div75 = fdiv double %div74, %18
+ %div76 = fdiv double %div75, %19
+ %div77 = fdiv double %div76, %20
+ %div78 = fdiv double %div77, %21
+ %div79 = fdiv double %div78, %22
+ %div80 = fdiv double %div79, %23
+ %div81 = fdiv double %div80, %24
+ %div82 = fdiv double %div81, %25
+ %div83 = fdiv double %div82, %26
+ %div84 = fdiv double %div83, %27
+ %div85 = fdiv double %div84, %28
+ %div86 = fdiv double %div85, %29
+ %mul = fmul double %div57, %div86
+ %conv = fptosi double %mul to i32
+ %add.ptr = getelementptr inbounds i8* %a, i32 %conv
+ ret i8* %add.ptr
+}
diff --git a/test/CodeGen/ARM/stack_guard_remat.ll b/test/CodeGen/ARM/stack_guard_remat.ll
index b11ea92..7c89b99 100644
--- a/test/CodeGen/ARM/stack_guard_remat.ll
+++ b/test/CodeGen/ARM/stack_guard_remat.ll
@@ -8,7 +8,7 @@
;PIC: foo2
;PIC: ldr [[R0:r[0-9]+]], [[LABEL0:LCPI[0-9_]+]]
;PIC: [[LABEL1:LPC0_1]]:
-;PIC: ldr [[R1:r[0-9]+]], [pc, [[R0]]]
+;PIC: add [[R1:r[0-9]+]], pc, [[R0]]
;PIC: ldr [[R2:r[0-9]+]], {{\[}}[[R1]]{{\]}}
;PIC: ldr {{r[0-9]+}}, {{\[}}[[R2]]{{\]}}
diff --git a/test/CodeGen/ARM/stackpointer.ll b/test/CodeGen/ARM/stackpointer.ll
index 420a916..320f0d9 100644
--- a/test/CodeGen/ARM/stackpointer.ll
+++ b/test/CodeGen/ARM/stackpointer.ll
@@ -22,4 +22,4 @@ declare void @llvm.write_register.i32(metadata, i32) nounwind
; register unsigned long current_stack_pointer asm("sp");
; CHECK-NOT: .asciz "sp"
-!0 = metadata !{metadata !"sp\00"}
+!0 = !{!"sp\00"}
diff --git a/test/CodeGen/ARM/sub-cmp-peephole.ll b/test/CodeGen/ARM/sub-cmp-peephole.ll
index 19727da..f7328dc 100644
--- a/test/CodeGen/ARM/sub-cmp-peephole.ll
+++ b/test/CodeGen/ARM/sub-cmp-peephole.ll
@@ -88,6 +88,19 @@ if.end11: ; preds = %num2long.exit
ret i32 23
}
+; When considering the producer of cmp's src as the subsuming instruction,
+; only consider that when the comparison is to 0.
+define i32 @cmp_src_nonzero(i32 %a, i32 %b, i32 %x, i32 %y) {
+entry:
+; CHECK-LABEL: cmp_src_nonzero:
+; CHECK: sub
+; CHECK: cmp
+ %sub = sub i32 %a, %b
+ %cmp = icmp eq i32 %sub, 17
+ %ret = select i1 %cmp, i32 %x, i32 %y
+ ret i32 %ret
+}
+
define float @float_sel(i32 %a, i32 %b, float %x, float %y) {
entry:
; CHECK-LABEL: float_sel:
@@ -144,3 +157,50 @@ entry:
store i32 %sub, i32* @t
ret double %ret
}
+
+declare void @abort()
+declare void @exit(i32)
+
+; If the comparison uses the V bit (signed overflow/underflow), we can't
+; omit the comparison.
+define i32 @cmp_slt0(i32 %a, i32 %b, i32 %x, i32 %y) {
+entry:
+; CHECK-LABEL: cmp_slt0
+; CHECK: sub
+; CHECK: cmp
+; CHECK: bge
+ %load = load i32* @t, align 4
+ %sub = sub i32 %load, 17
+ %cmp = icmp slt i32 %sub, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ call void @abort()
+ unreachable
+
+if.else:
+ call void @exit(i32 0)
+ unreachable
+}
+
+; Same for the C bit. (Note the ult X, 0 is trivially
+; false, so the DAG combiner may or may not optimize it).
+define i32 @cmp_ult0(i32 %a, i32 %b, i32 %x, i32 %y) {
+entry:
+; CHECK-LABEL: cmp_ult0
+; CHECK: sub
+; CHECK: cmp
+; CHECK: bhs
+ %load = load i32* @t, align 4
+ %sub = sub i32 %load, 17
+ %cmp = icmp ult i32 %sub, 0
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ call void @abort()
+ unreachable
+
+if.else:
+ call void @exit(i32 0)
+ unreachable
+}
diff --git a/test/CodeGen/ARM/tail-call-weak.ll b/test/CodeGen/ARM/tail-call-weak.ll
new file mode 100644
index 0000000..466c33d
--- /dev/null
+++ b/test/CodeGen/ARM/tail-call-weak.ll
@@ -0,0 +1,19 @@
+; RUN: llc -mtriple thumbv7-windows-coff -filetype asm -o - %s | FileCheck %s -check-prefix CHECK-COFF
+; RUN: llc -mtriple thumbv7-elf -filetype asm -o - %s | FileCheck %s -check-prefix CHECK-ELF
+; RUN: llc -mtriple thumbv7-macho -filetype asm -o - %s | FileCheck %s -check-prefix CHECK-MACHO
+
+declare i8* @f()
+declare extern_weak i8* @g(i8*)
+
+; weak symbol resolution occurs statically in PE/COFF, ensure that we permit
+; tail calls on weak externals when targeting a COFF environment.
+define void @test() {
+ %call = tail call i8* @f()
+ %call1 = tail call i8* @g(i8* %call)
+ ret void
+}
+
+; CHECK-COFF: b g
+; CHECK-ELF: bl g
+; CHECK-MACHO: blx _g
+
diff --git a/test/CodeGen/ARM/tail-call.ll b/test/CodeGen/ARM/tail-call.ll
index c3e7965..ca19b05 100644
--- a/test/CodeGen/ARM/tail-call.ll
+++ b/test/CodeGen/ARM/tail-call.ll
@@ -1,5 +1,6 @@
-; RUN: llc -mtriple armv7 -O0 -o - < %s | FileCheck %s -check-prefix CHECK-TAIL
-; RUN: llc -mtriple armv7 -O0 -disable-tail-calls -o - < %s \
+; RUN: llc -mtriple armv7 -target-abi apcs -O0 -o - < %s \
+; RUN: | FileCheck %s -check-prefix CHECK-TAIL
+; RUN: llc -mtriple armv7 -target-abi apcs -O0 -disable-tail-calls -o - < %s \
; RUN: | FileCheck %s -check-prefix CHECK-NO-TAIL
declare i32 @callee(i32 %i)
diff --git a/test/CodeGen/ARM/tail-merge-branch-weight.ll b/test/CodeGen/ARM/tail-merge-branch-weight.ll
index 9b5d566..95b0a20 100644
--- a/test/CodeGen/ARM/tail-merge-branch-weight.ll
+++ b/test/CodeGen/ARM/tail-merge-branch-weight.ll
@@ -39,6 +39,6 @@ L3: ; preds = %L0, %L1, %L2
ret i32 %retval.0
}
-!0 = metadata !{metadata !"branch_weights", i32 200, i32 800}
-!1 = metadata !{metadata !"branch_weights", i32 600, i32 400}
-!2 = metadata !{metadata !"branch_weights", i32 300, i32 700}
+!0 = !{!"branch_weights", i32 200, i32 800}
+!1 = !{!"branch_weights", i32 600, i32 400}
+!2 = !{!"branch_weights", i32 300, i32 700}
diff --git a/test/CodeGen/ARM/taildup-branch-weight.ll b/test/CodeGen/ARM/taildup-branch-weight.ll
index 0a16071..64e0f4b 100644
--- a/test/CodeGen/ARM/taildup-branch-weight.ll
+++ b/test/CodeGen/ARM/taildup-branch-weight.ll
@@ -27,7 +27,7 @@ B4:
ret void
}
-!0 = metadata !{metadata !"branch_weights", i32 4, i32 124}
+!0 = !{!"branch_weights", i32 4, i32 124}
; CHECK: Machine code for function test1:
; CHECK: Successors according to CFG: BB#1(8) BB#2(248)
@@ -51,4 +51,4 @@ B3:
ret void
}
-!1 = metadata !{metadata !"branch_weights", i32 248, i32 8}
+!1 = !{!"branch_weights", i32 248, i32 8}
diff --git a/test/CodeGen/ARM/thumb1-varalloc.ll b/test/CodeGen/ARM/thumb1-varalloc.ll
index 8d5888d..82c4ad5 100644
--- a/test/CodeGen/ARM/thumb1-varalloc.ll
+++ b/test/CodeGen/ARM/thumb1-varalloc.ll
@@ -43,26 +43,6 @@ bb3:
declare noalias i8* @strdup(i8* nocapture) nounwind
declare i32 @_called_func(i8*, i32*) nounwind
-; Variable ending up at unaligned offset from sp (i.e. not a multiple of 4)
-define void @test_local_var_addr() {
-; CHECK-LABEL: test_local_var_addr:
-
- %addr1 = alloca i8
- %addr2 = alloca i8
-
-; CHECK: mov r0, sp
-; CHECK: adds r0, #{{[0-9]+}}
-; CHECK: blx
- call void @take_ptr(i8* %addr1)
-
-; CHECK: mov r0, sp
-; CHECK: adds r0, #{{[0-9]+}}
-; CHECK: blx
- call void @take_ptr(i8* %addr2)
-
- ret void
-}
-
; Simple variable ending up *at* sp.
define void @test_simple_var() {
; CHECK-LABEL: test_simple_var:
@@ -126,14 +106,16 @@ define void @test_local_var_offset_1020() {
ret void
}
-; Max range addressable with tADDrSPi + tADDi8
-define void @test_local_var_offset_1275() {
-; CHECK-LABEL: test_local_var_offset_1275
+; Max range addressable with tADDrSPi + tADDi8 is 1275, however the automatic
+; 4-byte aligning of objects on the stack combined with 8-byte stack alignment
+; means that 1268 is the max offset we can use.
+define void @test_local_var_offset_1268() {
+; CHECK-LABEL: test_local_var_offset_1268
%addr1 = alloca i8, i32 1
- %addr2 = alloca i8, i32 1275
+ %addr2 = alloca i8, i32 1268
; CHECK: add r0, sp, #1020
-; CHECK: adds r0, #255
+; CHECK: adds r0, #248
; CHECK-NEXT: blx
call void @take_ptr(i8* %addr1)
diff --git a/test/CodeGen/ARM/thumb1_return_sequence.ll b/test/CodeGen/ARM/thumb1_return_sequence.ll
index 318e6e4..c831260 100644
--- a/test/CodeGen/ARM/thumb1_return_sequence.ll
+++ b/test/CodeGen/ARM/thumb1_return_sequence.ll
@@ -3,7 +3,7 @@
; CHECK-V4T-LABEL: clobberframe
; CHECK-V5T-LABEL: clobberframe
-define <4 x i32> @clobberframe() #0 {
+define <4 x i32> @clobberframe(<6 x i32>* %p) #0 {
entry:
; Prologue
; --------
@@ -11,9 +11,10 @@ entry:
; CHECK-V4T: sub sp,
; CHECK-V5T: push {[[SAVED:(r[4567](, )?)+]], lr}
- %b = alloca <4 x i32>, align 16
+ %b = alloca <6 x i32>, align 16
%a = alloca <4 x i32>, align 16
- store <4 x i32> <i32 42, i32 42, i32 42, i32 42>, <4 x i32>* %b, align 16
+ %stuff = load <6 x i32>* %p, align 16
+ store <6 x i32> %stuff, <6 x i32>* %b, align 16
store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32>* %a, align 16
%0 = load <4 x i32>* %a, align 16
ret <4 x i32> %0
@@ -70,40 +71,25 @@ entry:
; CHECK-V4T-LABEL: simpleframe
; CHECK-V5T-LABEL: simpleframe
-define i32 @simpleframe() #0 {
+define i32 @simpleframe(<6 x i32>* %p) #0 {
entry:
; Prologue
; --------
; CHECK-V4T: push {[[SAVED:(r[4567](, )?)+]], lr}
; CHECK-V5T: push {[[SAVED:(r[4567](, )?)+]], lr}
- %a = alloca i32, align 4
- %b = alloca i32, align 4
- %c = alloca i32, align 4
- %d = alloca i32, align 4
- store i32 1, i32* %a, align 4
- store i32 2, i32* %b, align 4
- store i32 3, i32* %c, align 4
- store i32 4, i32* %d, align 4
- %0 = load i32* %a, align 4
- %inc = add nsw i32 %0, 1
- store i32 %inc, i32* %a, align 4
- %1 = load i32* %b, align 4
- %inc1 = add nsw i32 %1, 1
- store i32 %inc1, i32* %b, align 4
- %2 = load i32* %c, align 4
- %inc2 = add nsw i32 %2, 1
- store i32 %inc2, i32* %c, align 4
- %3 = load i32* %d, align 4
- %inc3 = add nsw i32 %3, 1
- store i32 %inc3, i32* %d, align 4
- %4 = load i32* %a, align 4
- %5 = load i32* %b, align 4
- %add = add nsw i32 %4, %5
- %6 = load i32* %c, align 4
- %add4 = add nsw i32 %add, %6
- %7 = load i32* %d, align 4
- %add5 = add nsw i32 %add4, %7
+ %0 = load <6 x i32>* %p, align 16
+ %1 = extractelement <6 x i32> %0, i32 0
+ %2 = extractelement <6 x i32> %0, i32 1
+ %3 = extractelement <6 x i32> %0, i32 2
+ %4 = extractelement <6 x i32> %0, i32 3
+ %5 = extractelement <6 x i32> %0, i32 4
+ %6 = extractelement <6 x i32> %0, i32 5
+ %add1 = add nsw i32 %1, %2
+ %add2 = add nsw i32 %add1, %3
+ %add3 = add nsw i32 %add2, %4
+ %add4 = add nsw i32 %add3, %5
+ %add5 = add nsw i32 %add4, %6
ret i32 %add5
; Epilogue
diff --git a/test/CodeGen/ARM/thumb_indirect_calls.ll b/test/CodeGen/ARM/thumb_indirect_calls.ll
new file mode 100644
index 0000000..16a55a8
--- /dev/null
+++ b/test/CodeGen/ARM/thumb_indirect_calls.ll
@@ -0,0 +1,40 @@
+; RUN: llc -mtriple=thumbv4t-eabi %s -o - | FileCheck ---check-prefix=CHECK -check-prefix=CHECK-V4T %s
+; RUN: llc -mtriple=thumbv5t-eabi %s -o - | FileCheck ---check-prefix=CHECK -check-prefix=CHECK-V5T %s
+
+@f = common global void (i32)* null, align 4
+
+; CHECK-LABEL foo:
+define void @foo(i32 %x) {
+entry:
+ %0 = load void (i32)** @f, align 4
+ tail call void %0(i32 %x)
+ ret void
+
+; CHECK: ldr [[TMP:r[0-3]]], [[F:\.[A-Z0-9_]+]]
+; CHECK: ldr [[CALLEE:r[0-3]]], {{\[}}[[TMP]]{{\]}}
+
+; CHECK-V4T-NOT: blx
+; CHECK-V4T: bl [[INDIRECT_PAD:\.Ltmp[0-9]+]]
+; CHECK-V4T: [[F]]:
+; CHECK-V4T: [[INDIRECT_PAD]]:
+; CHECK-V4T-NEXT: bx [[CALLEE]]
+; CHECK-V5T: blx [[CALLEE]]
+}
+
+; CHECK-LABEL bar:
+define void @bar(void (i32)* nocapture %g, i32 %x, void (i32)* nocapture %h) {
+entry:
+ tail call void %g(i32 %x)
+ tail call void %h(i32 %x)
+ ret void
+
+; CHECK-V4T: bl [[INDIRECT_PAD1:\.Ltmp[0-9]+]]
+; CHECK-V4T: bl [[INDIRECT_PAD2:\.Ltmp[0-9]+]]
+; CHECK-V4T: [[INDIRECT_PAD1]]:
+; CHECK-V4T-NEXT: bx
+; CHECK-V4T: [[INDIRECT_PAD2]]:
+; CHECK-V4T-NEXT: bx
+; CHECK-V5T: blx
+; CHECK-V5T: blx
+}
+
diff --git a/test/CodeGen/ARM/tls1.ll b/test/CodeGen/ARM/tls1.ll
index a1ca0b7..b03f76b 100644
--- a/test/CodeGen/ARM/tls1.ll
+++ b/test/CodeGen/ARM/tls1.ll
@@ -1,11 +1,13 @@
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi | \
-; RUN: grep "i(TPOFF)"
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi | \
-; RUN: grep "__aeabi_read_tp"
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi \
-; RUN: -relocation-model=pic | grep "__tls_get_addr"
+; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi | FileCheck %s
+; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi -relocation-model=pic | \
+; RUN: FileCheck %s --check-prefix=PIC
+; CHECK: i(TPOFF)
+; CHECK: __aeabi_read_tp
+
+; PIC: __tls_get_addr
+
@i = thread_local global i32 15 ; <i32*> [#uses=2]
define i32 @f() {
diff --git a/test/CodeGen/ARM/vdup.ll b/test/CodeGen/ARM/vdup.ll
index 89f355c..6f8b3dd 100644
--- a/test/CodeGen/ARM/vdup.ll
+++ b/test/CodeGen/ARM/vdup.ll
@@ -347,17 +347,17 @@ define <2 x float> @check_spr_splat2(<2 x float> %p, i16 %q) {
define <4 x float> @check_spr_splat4(<4 x float> %p, i16 %q) {
;CHECK-LABEL: check_spr_splat4:
-;CHECK: vdup.32 q
+;CHECK: vld1.16
%conv = sitofp i16 %q to float
%splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 0
%splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
%sub = fsub <4 x float> %splat.splat, %p
ret <4 x float> %sub
}
-
+; Same codegen as above test; scalar is splatted using vld1, so shuffle index is irrelevant.
define <4 x float> @check_spr_splat4_lane1(<4 x float> %p, i16 %q) {
;CHECK-LABEL: check_spr_splat4_lane1:
-;CHECK: vdup.32 q{{.*}}, d{{.*}}[1]
+;CHECK: vld1.16
%conv = sitofp i16 %q to float
%splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 1
%splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
diff --git a/test/CodeGen/ARM/vector-DAGCombine.ll b/test/CodeGen/ARM/vector-DAGCombine.ll
index 759da22..566e955 100644
--- a/test/CodeGen/ARM/vector-DAGCombine.ll
+++ b/test/CodeGen/ARM/vector-DAGCombine.ll
@@ -27,6 +27,14 @@ entry:
ret void
}
+; PR22678
+; Check CONCAT_VECTORS DAG combiner pass doesn't introduce illegal types.
+define void @test_pr22678() {
+ %1 = fptoui <16 x float> undef to <16 x i8>
+ store <16 x i8> %1, <16 x i8>* undef
+ ret void
+}
+
; Radar 8407927: Make sure that VMOVRRD gets optimized away when the result is
; converted back to be used as a vector type.
; CHECK-LABEL: test_vmovrrd_combine:
diff --git a/test/CodeGen/ARM/vector-load.ll b/test/CodeGen/ARM/vector-load.ll
new file mode 100644
index 0000000..c177a55
--- /dev/null
+++ b/test/CodeGen/ARM/vector-load.ll
@@ -0,0 +1,253 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
+target triple = "thumbv7s-apple-ios8.0.0"
+
+define <8 x i8> @load_v8i8(<8 x i8>** %ptr) {
+;CHECK-LABEL: load_v8i8:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <8 x i8>** %ptr
+ %lA = load <8 x i8>* %A, align 1
+ ret <8 x i8> %lA
+}
+
+define <8 x i8> @load_v8i8_update(<8 x i8>** %ptr) {
+;CHECK-LABEL: load_v8i8_update:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <8 x i8>** %ptr
+ %lA = load <8 x i8>* %A, align 1
+ %inc = getelementptr <8 x i8>* %A, i38 1
+ store <8 x i8>* %inc, <8 x i8>** %ptr
+ ret <8 x i8> %lA
+}
+
+define <4 x i16> @load_v4i16(<4 x i16>** %ptr) {
+;CHECK-LABEL: load_v4i16:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <4 x i16>** %ptr
+ %lA = load <4 x i16>* %A, align 1
+ ret <4 x i16> %lA
+}
+
+define <4 x i16> @load_v4i16_update(<4 x i16>** %ptr) {
+;CHECK-LABEL: load_v4i16_update:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <4 x i16>** %ptr
+ %lA = load <4 x i16>* %A, align 1
+ %inc = getelementptr <4 x i16>* %A, i34 1
+ store <4 x i16>* %inc, <4 x i16>** %ptr
+ ret <4 x i16> %lA
+}
+
+define <2 x i32> @load_v2i32(<2 x i32>** %ptr) {
+;CHECK-LABEL: load_v2i32:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <2 x i32>** %ptr
+ %lA = load <2 x i32>* %A, align 1
+ ret <2 x i32> %lA
+}
+
+define <2 x i32> @load_v2i32_update(<2 x i32>** %ptr) {
+;CHECK-LABEL: load_v2i32_update:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i32>** %ptr
+ %lA = load <2 x i32>* %A, align 1
+ %inc = getelementptr <2 x i32>* %A, i32 1
+ store <2 x i32>* %inc, <2 x i32>** %ptr
+ ret <2 x i32> %lA
+}
+
+define <2 x float> @load_v2f32(<2 x float>** %ptr) {
+;CHECK-LABEL: load_v2f32:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <2 x float>** %ptr
+ %lA = load <2 x float>* %A, align 1
+ ret <2 x float> %lA
+}
+
+define <2 x float> @load_v2f32_update(<2 x float>** %ptr) {
+;CHECK-LABEL: load_v2f32_update:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x float>** %ptr
+ %lA = load <2 x float>* %A, align 1
+ %inc = getelementptr <2 x float>* %A, i32 1
+ store <2 x float>* %inc, <2 x float>** %ptr
+ ret <2 x float> %lA
+}
+
+define <1 x i64> @load_v1i64(<1 x i64>** %ptr) {
+;CHECK-LABEL: load_v1i64:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <1 x i64>** %ptr
+ %lA = load <1 x i64>* %A, align 1
+ ret <1 x i64> %lA
+}
+
+define <1 x i64> @load_v1i64_update(<1 x i64>** %ptr) {
+;CHECK-LABEL: load_v1i64_update:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <1 x i64>** %ptr
+ %lA = load <1 x i64>* %A, align 1
+ %inc = getelementptr <1 x i64>* %A, i31 1
+ store <1 x i64>* %inc, <1 x i64>** %ptr
+ ret <1 x i64> %lA
+}
+
+define <16 x i8> @load_v16i8(<16 x i8>** %ptr) {
+;CHECK-LABEL: load_v16i8:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <16 x i8>** %ptr
+ %lA = load <16 x i8>* %A, align 1
+ ret <16 x i8> %lA
+}
+
+define <16 x i8> @load_v16i8_update(<16 x i8>** %ptr) {
+;CHECK-LABEL: load_v16i8_update:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <16 x i8>** %ptr
+ %lA = load <16 x i8>* %A, align 1
+ %inc = getelementptr <16 x i8>* %A, i316 1
+ store <16 x i8>* %inc, <16 x i8>** %ptr
+ ret <16 x i8> %lA
+}
+
+define <8 x i16> @load_v8i16(<8 x i16>** %ptr) {
+;CHECK-LABEL: load_v8i16:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <8 x i16>** %ptr
+ %lA = load <8 x i16>* %A, align 1
+ ret <8 x i16> %lA
+}
+
+define <8 x i16> @load_v8i16_update(<8 x i16>** %ptr) {
+;CHECK-LABEL: load_v8i16_update:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <8 x i16>** %ptr
+ %lA = load <8 x i16>* %A, align 1
+ %inc = getelementptr <8 x i16>* %A, i38 1
+ store <8 x i16>* %inc, <8 x i16>** %ptr
+ ret <8 x i16> %lA
+}
+
+define <4 x i32> @load_v4i32(<4 x i32>** %ptr) {
+;CHECK-LABEL: load_v4i32:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <4 x i32>** %ptr
+ %lA = load <4 x i32>* %A, align 1
+ ret <4 x i32> %lA
+}
+
+define <4 x i32> @load_v4i32_update(<4 x i32>** %ptr) {
+;CHECK-LABEL: load_v4i32_update:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <4 x i32>** %ptr
+ %lA = load <4 x i32>* %A, align 1
+ %inc = getelementptr <4 x i32>* %A, i34 1
+ store <4 x i32>* %inc, <4 x i32>** %ptr
+ ret <4 x i32> %lA
+}
+
+define <4 x float> @load_v4f32(<4 x float>** %ptr) {
+;CHECK-LABEL: load_v4f32:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <4 x float>** %ptr
+ %lA = load <4 x float>* %A, align 1
+ ret <4 x float> %lA
+}
+
+define <4 x float> @load_v4f32_update(<4 x float>** %ptr) {
+;CHECK-LABEL: load_v4f32_update:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <4 x float>** %ptr
+ %lA = load <4 x float>* %A, align 1
+ %inc = getelementptr <4 x float>* %A, i34 1
+ store <4 x float>* %inc, <4 x float>** %ptr
+ ret <4 x float> %lA
+}
+
+define <2 x i64> @load_v2i64(<2 x i64>** %ptr) {
+;CHECK-LABEL: load_v2i64:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <2 x i64>** %ptr
+ %lA = load <2 x i64>* %A, align 1
+ ret <2 x i64> %lA
+}
+
+define <2 x i64> @load_v2i64_update(<2 x i64>** %ptr) {
+;CHECK-LABEL: load_v2i64_update:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i64>** %ptr
+ %lA = load <2 x i64>* %A, align 1
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret <2 x i64> %lA
+}
+
+; Make sure we change the type to match alignment if necessary.
+define <2 x i64> @load_v2i64_update_aligned2(<2 x i64>** %ptr) {
+;CHECK-LABEL: load_v2i64_update_aligned2:
+;CHECK: vld1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i64>** %ptr
+ %lA = load <2 x i64>* %A, align 2
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret <2 x i64> %lA
+}
+
+define <2 x i64> @load_v2i64_update_aligned4(<2 x i64>** %ptr) {
+;CHECK-LABEL: load_v2i64_update_aligned4:
+;CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i64>** %ptr
+ %lA = load <2 x i64>* %A, align 4
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret <2 x i64> %lA
+}
+
+define <2 x i64> @load_v2i64_update_aligned8(<2 x i64>** %ptr) {
+;CHECK-LABEL: load_v2i64_update_aligned8:
+;CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i64>** %ptr
+ %lA = load <2 x i64>* %A, align 8
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret <2 x i64> %lA
+}
+
+define <2 x i64> @load_v2i64_update_aligned16(<2 x i64>** %ptr) {
+;CHECK-LABEL: load_v2i64_update_aligned16:
+;CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:128]!
+ %A = load <2 x i64>** %ptr
+ %lA = load <2 x i64>* %A, align 16
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret <2 x i64> %lA
+}
+
+; Make sure we don't break smaller-than-dreg extloads.
+define <4 x i32> @zextload_v8i8tov8i32(<4 x i8>** %ptr) {
+;CHECK-LABEL: zextload_v8i8tov8i32:
+;CHECK: vld1.32 {{{d[0-9]+}}[0]}, [{{r[0-9]+}}:32]
+;CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
+;CHECK: vmovl.u16 {{q[0-9]+}}, {{d[0-9]+}}
+ %A = load <4 x i8>** %ptr
+ %lA = load <4 x i8>* %A, align 4
+ %zlA = zext <4 x i8> %lA to <4 x i32>
+ ret <4 x i32> %zlA
+}
+
+define <4 x i32> @zextload_v8i8tov8i32_fake_update(<4 x i8>** %ptr) {
+;CHECK-LABEL: zextload_v8i8tov8i32_fake_update:
+;CHECK: ldr.w r[[PTRREG:[0-9]+]], [r0]
+;CHECK: vld1.32 {{{d[0-9]+}}[0]}, [r[[PTRREG]]:32]
+;CHECK: add.w r[[INCREG:[0-9]+]], r[[PTRREG]], #16
+;CHECK: str.w r[[INCREG]], [r0]
+;CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
+;CHECK: vmovl.u16 {{q[0-9]+}}, {{d[0-9]+}}
+ %A = load <4 x i8>** %ptr
+ %lA = load <4 x i8>* %A, align 4
+ %inc = getelementptr <4 x i8>* %A, i38 4
+ store <4 x i8>* %inc, <4 x i8>** %ptr
+ %zlA = zext <4 x i8> %lA to <4 x i32>
+ ret <4 x i32> %zlA
+}
diff --git a/test/CodeGen/ARM/vector-store.ll b/test/CodeGen/ARM/vector-store.ll
new file mode 100644
index 0000000..55cb8f2
--- /dev/null
+++ b/test/CodeGen/ARM/vector-store.ll
@@ -0,0 +1,258 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
+target triple = "thumbv7s-apple-ios8.0.0"
+
+define void @store_v8i8(<8 x i8>** %ptr, <8 x i8> %val) {
+;CHECK-LABEL: store_v8i8:
+;CHECK: str r1, [r0]
+ %A = load <8 x i8>** %ptr
+ store <8 x i8> %val, <8 x i8>* %A, align 1
+ ret void
+}
+
+define void @store_v8i8_update(<8 x i8>** %ptr, <8 x i8> %val) {
+;CHECK-LABEL: store_v8i8_update:
+;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <8 x i8>** %ptr
+ store <8 x i8> %val, <8 x i8>* %A, align 1
+ %inc = getelementptr <8 x i8>* %A, i38 1
+ store <8 x i8>* %inc, <8 x i8>** %ptr
+ ret void
+}
+
+define void @store_v4i16(<4 x i16>** %ptr, <4 x i16> %val) {
+;CHECK-LABEL: store_v4i16:
+;CHECK: str r1, [r0]
+ %A = load <4 x i16>** %ptr
+ store <4 x i16> %val, <4 x i16>* %A, align 1
+ ret void
+}
+
+define void @store_v4i16_update(<4 x i16>** %ptr, <4 x i16> %val) {
+;CHECK-LABEL: store_v4i16_update:
+;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <4 x i16>** %ptr
+ store <4 x i16> %val, <4 x i16>* %A, align 1
+ %inc = getelementptr <4 x i16>* %A, i34 1
+ store <4 x i16>* %inc, <4 x i16>** %ptr
+ ret void
+}
+
+define void @store_v2i32(<2 x i32>** %ptr, <2 x i32> %val) {
+;CHECK-LABEL: store_v2i32:
+;CHECK: str r1, [r0]
+ %A = load <2 x i32>** %ptr
+ store <2 x i32> %val, <2 x i32>* %A, align 1
+ ret void
+}
+
+define void @store_v2i32_update(<2 x i32>** %ptr, <2 x i32> %val) {
+;CHECK-LABEL: store_v2i32_update:
+;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i32>** %ptr
+ store <2 x i32> %val, <2 x i32>* %A, align 1
+ %inc = getelementptr <2 x i32>* %A, i32 1
+ store <2 x i32>* %inc, <2 x i32>** %ptr
+ ret void
+}
+
+define void @store_v2f32(<2 x float>** %ptr, <2 x float> %val) {
+;CHECK-LABEL: store_v2f32:
+;CHECK: str r1, [r0]
+ %A = load <2 x float>** %ptr
+ store <2 x float> %val, <2 x float>* %A, align 1
+ ret void
+}
+
+define void @store_v2f32_update(<2 x float>** %ptr, <2 x float> %val) {
+;CHECK-LABEL: store_v2f32_update:
+;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x float>** %ptr
+ store <2 x float> %val, <2 x float>* %A, align 1
+ %inc = getelementptr <2 x float>* %A, i32 1
+ store <2 x float>* %inc, <2 x float>** %ptr
+ ret void
+}
+
+define void @store_v1i64(<1 x i64>** %ptr, <1 x i64> %val) {
+;CHECK-LABEL: store_v1i64:
+;CHECK: str r1, [r0]
+ %A = load <1 x i64>** %ptr
+ store <1 x i64> %val, <1 x i64>* %A, align 1
+ ret void
+}
+
+define void @store_v1i64_update(<1 x i64>** %ptr, <1 x i64> %val) {
+;CHECK-LABEL: store_v1i64_update:
+;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <1 x i64>** %ptr
+ store <1 x i64> %val, <1 x i64>* %A, align 1
+ %inc = getelementptr <1 x i64>* %A, i31 1
+ store <1 x i64>* %inc, <1 x i64>** %ptr
+ ret void
+}
+
+define void @store_v16i8(<16 x i8>** %ptr, <16 x i8> %val) {
+;CHECK-LABEL: store_v16i8:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <16 x i8>** %ptr
+ store <16 x i8> %val, <16 x i8>* %A, align 1
+ ret void
+}
+
+define void @store_v16i8_update(<16 x i8>** %ptr, <16 x i8> %val) {
+;CHECK-LABEL: store_v16i8_update:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <16 x i8>** %ptr
+ store <16 x i8> %val, <16 x i8>* %A, align 1
+ %inc = getelementptr <16 x i8>* %A, i316 1
+ store <16 x i8>* %inc, <16 x i8>** %ptr
+ ret void
+}
+
+define void @store_v8i16(<8 x i16>** %ptr, <8 x i16> %val) {
+;CHECK-LABEL: store_v8i16:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <8 x i16>** %ptr
+ store <8 x i16> %val, <8 x i16>* %A, align 1
+ ret void
+}
+
+define void @store_v8i16_update(<8 x i16>** %ptr, <8 x i16> %val) {
+;CHECK-LABEL: store_v8i16_update:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <8 x i16>** %ptr
+ store <8 x i16> %val, <8 x i16>* %A, align 1
+ %inc = getelementptr <8 x i16>* %A, i38 1
+ store <8 x i16>* %inc, <8 x i16>** %ptr
+ ret void
+}
+
+define void @store_v4i32(<4 x i32>** %ptr, <4 x i32> %val) {
+;CHECK-LABEL: store_v4i32:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <4 x i32>** %ptr
+ store <4 x i32> %val, <4 x i32>* %A, align 1
+ ret void
+}
+
+define void @store_v4i32_update(<4 x i32>** %ptr, <4 x i32> %val) {
+;CHECK-LABEL: store_v4i32_update:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <4 x i32>** %ptr
+ store <4 x i32> %val, <4 x i32>* %A, align 1
+ %inc = getelementptr <4 x i32>* %A, i34 1
+ store <4 x i32>* %inc, <4 x i32>** %ptr
+ ret void
+}
+
+define void @store_v4f32(<4 x float>** %ptr, <4 x float> %val) {
+;CHECK-LABEL: store_v4f32:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <4 x float>** %ptr
+ store <4 x float> %val, <4 x float>* %A, align 1
+ ret void
+}
+
+define void @store_v4f32_update(<4 x float>** %ptr, <4 x float> %val) {
+;CHECK-LABEL: store_v4f32_update:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <4 x float>** %ptr
+ store <4 x float> %val, <4 x float>* %A, align 1
+ %inc = getelementptr <4 x float>* %A, i34 1
+ store <4 x float>* %inc, <4 x float>** %ptr
+ ret void
+}
+
+define void @store_v2i64(<2 x i64>** %ptr, <2 x i64> %val) {
+;CHECK-LABEL: store_v2i64:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+ %A = load <2 x i64>** %ptr
+ store <2 x i64> %val, <2 x i64>* %A, align 1
+ ret void
+}
+
+define void @store_v2i64_update(<2 x i64>** %ptr, <2 x i64> %val) {
+;CHECK-LABEL: store_v2i64_update:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i64>** %ptr
+ store <2 x i64> %val, <2 x i64>* %A, align 1
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret void
+}
+
+define void @store_v2i64_update_aligned2(<2 x i64>** %ptr, <2 x i64> %val) {
+;CHECK-LABEL: store_v2i64_update_aligned2:
+;CHECK: vst1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i64>** %ptr
+ store <2 x i64> %val, <2 x i64>* %A, align 2
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret void
+}
+
+define void @store_v2i64_update_aligned4(<2 x i64>** %ptr, <2 x i64> %val) {
+;CHECK-LABEL: store_v2i64_update_aligned4:
+;CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i64>** %ptr
+ store <2 x i64> %val, <2 x i64>* %A, align 4
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret void
+}
+
+define void @store_v2i64_update_aligned8(<2 x i64>** %ptr, <2 x i64> %val) {
+;CHECK-LABEL: store_v2i64_update_aligned8:
+;CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+ %A = load <2 x i64>** %ptr
+ store <2 x i64> %val, <2 x i64>* %A, align 8
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret void
+}
+
+define void @store_v2i64_update_aligned16(<2 x i64>** %ptr, <2 x i64> %val) {
+;CHECK-LABEL: store_v2i64_update_aligned16:
+;CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:128]!
+ %A = load <2 x i64>** %ptr
+ store <2 x i64> %val, <2 x i64>* %A, align 16
+ %inc = getelementptr <2 x i64>* %A, i32 1
+ store <2 x i64>* %inc, <2 x i64>** %ptr
+ ret void
+}
+
+define void @truncstore_v4i32tov4i8(<4 x i8>** %ptr, <4 x i32> %val) {
+;CHECK-LABEL: truncstore_v4i32tov4i8:
+;CHECK: ldr.w r9, [sp]
+;CHECK: vmov {{d[0-9]+}}, r3, r9
+;CHECK: vmov {{d[0-9]+}}, r1, r2
+;CHECK: vmovn.i32 [[VECLO:d[0-9]+]], {{q[0-9]+}}
+;CHECK: vuzp.8 [[VECLO]], {{d[0-9]+}}
+;CHECK: ldr r[[PTRREG:[0-9]+]], [r0]
+;CHECK: vst1.32 {[[VECLO]][0]}, [r[[PTRREG]]:32]
+ %A = load <4 x i8>** %ptr
+ %trunc = trunc <4 x i32> %val to <4 x i8>
+ store <4 x i8> %trunc, <4 x i8>* %A, align 4
+ ret void
+}
+
+define void @truncstore_v4i32tov4i8_fake_update(<4 x i8>** %ptr, <4 x i32> %val) {
+;CHECK-LABEL: truncstore_v4i32tov4i8_fake_update:
+;CHECK: ldr.w r9, [sp]
+;CHECK: vmov {{d[0-9]+}}, r3, r9
+;CHECK: vmov {{d[0-9]+}}, r1, r2
+;CHECK: movs [[IMM16:r[0-9]+]], #16
+;CHECK: vmovn.i32 [[VECLO:d[0-9]+]], {{q[0-9]+}}
+;CHECK: vuzp.8 [[VECLO]], {{d[0-9]+}}
+;CHECK: ldr r[[PTRREG:[0-9]+]], [r0]
+;CHECK: vst1.32 {[[VECLO]][0]}, [r[[PTRREG]]:32], [[IMM16]]
+;CHECK: str r[[PTRREG]], [r0]
+ %A = load <4 x i8>** %ptr
+ %trunc = trunc <4 x i32> %val to <4 x i8>
+ store <4 x i8> %trunc, <4 x i8>* %A, align 4
+ %inc = getelementptr <4 x i8>* %A, i38 4
+ store <4 x i8>* %inc, <4 x i8>** %ptr
+ ret void
+}
diff --git a/test/CodeGen/ARM/vfp-regs-dwarf.ll b/test/CodeGen/ARM/vfp-regs-dwarf.ll
index f83adf9..b67f770 100644
--- a/test/CodeGen/ARM/vfp-regs-dwarf.ll
+++ b/test/CodeGen/ARM/vfp-regs-dwarf.ll
@@ -31,14 +31,14 @@ define void @stack_offsets() {
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!8, !9}
-!0 = metadata !{metadata !"0x11\0012\00clang version 3.5.0 \000\00\000\00\001", metadata !1, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2} ; [ DW_TAG_compile_unit ] [/Users/tim/llvm/build/tmp.c] [DW_LANG_C99]
-!1 = metadata !{metadata !"tmp.c", metadata !"/Users/tim/llvm/build"}
-!2 = metadata !{}
-!3 = metadata !{metadata !4}
-!4 = metadata !{metadata !"0x2e\00bar\00bar\00\001\000\001\000\006\000\000\001", metadata !1, metadata !5, metadata !6, null, void ()* @stack_offsets, null, null, metadata !2} ; [ DW_TAG_subprogram ] [line 1] [def] [bar]
-!5 = metadata !{metadata !"0x29", metadata !1} ; [ DW_TAG_file_type ] [/Users/tim/llvm/build/tmp.c]
-!6 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!7 = metadata !{null}
-!8 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
-!9 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x11\0012\00clang version 3.5.0 \000\00\000\00\001", !1, !2, !2, !3, !2, !2} ; [ DW_TAG_compile_unit ] [/Users/tim/llvm/build/tmp.c] [DW_LANG_C99]
+!1 = !{!"tmp.c", !"/Users/tim/llvm/build"}
+!2 = !{}
+!3 = !{!4}
+!4 = !{!"0x2e\00bar\00bar\00\001\000\001\000\006\000\000\001", !1, !5, !6, null, void ()* @stack_offsets, null, null, !2} ; [ DW_TAG_subprogram ] [line 1] [def] [bar]
+!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/Users/tim/llvm/build/tmp.c]
+!6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = !{null}
+!8 = !{i32 2, !"Dwarf Version", i32 4}
+!9 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/ARM/vld1.ll b/test/CodeGen/ARM/vld1.ll
index caeeada..db640f5 100644
--- a/test/CodeGen/ARM/vld1.ll
+++ b/test/CodeGen/ARM/vld1.ll
@@ -119,6 +119,14 @@ define <2 x i64> @vld1Qi64(i64* %A) nounwind {
ret <2 x i64> %tmp1
}
+define <2 x double> @vld1Qf64(double* %A) nounwind {
+;CHECK-LABEL: vld1Qf64:
+;CHECK: vld1.64
+ %tmp0 = bitcast double* %A to i8*
+ %tmp1 = call <2 x double> @llvm.arm.neon.vld1.v2f64(i8* %tmp0, i32 1)
+ ret <2 x double> %tmp1
+}
+
declare <8 x i8> @llvm.arm.neon.vld1.v8i8(i8*, i32) nounwind readonly
declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*, i32) nounwind readonly
declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*, i32) nounwind readonly
@@ -130,6 +138,7 @@ declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly
declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*, i32) nounwind readonly
declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly
declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*, i32) nounwind readonly
+declare <2 x double> @llvm.arm.neon.vld1.v2f64(i8*, i32) nounwind readonly
; Radar 8355607
; Do not crash if the vld1 result is not used.
diff --git a/test/CodeGen/ARM/vst1.ll b/test/CodeGen/ARM/vst1.ll
index 14f3ff0..a6bcf7d 100644
--- a/test/CodeGen/ARM/vst1.ll
+++ b/test/CodeGen/ARM/vst1.ll
@@ -117,6 +117,15 @@ define void @vst1Qi64(i64* %A, <2 x i64>* %B) nounwind {
ret void
}
+define void @vst1Qf64(double* %A, <2 x double>* %B) nounwind {
+;CHECK-LABEL: vst1Qf64:
+;CHECK: vst1.64
+ %tmp0 = bitcast double* %A to i8*
+ %tmp1 = load <2 x double>* %B
+ call void @llvm.arm.neon.vst1.v2f64(i8* %tmp0, <2 x double> %tmp1, i32 1)
+ ret void
+}
+
declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>, i32) nounwind
declare void @llvm.arm.neon.vst1.v4i16(i8*, <4 x i16>, i32) nounwind
declare void @llvm.arm.neon.vst1.v2i32(i8*, <2 x i32>, i32) nounwind
@@ -128,3 +137,4 @@ declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind
declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>, i32) nounwind
declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) nounwind
declare void @llvm.arm.neon.vst1.v2i64(i8*, <2 x i64>, i32) nounwind
+declare void @llvm.arm.neon.vst1.v2f64(i8*, <2 x double>, i32) nounwind
diff --git a/test/CodeGen/BPF/alu8.ll b/test/CodeGen/BPF/alu8.ll
new file mode 100644
index 0000000..0233225
--- /dev/null
+++ b/test/CodeGen/BPF/alu8.ll
@@ -0,0 +1,46 @@
+; RUN: llc -march=bpf -show-mc-encoding < %s | FileCheck %s
+; test little endian only for now
+
+define i8 @mov(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: mov:
+; CHECK: mov r0, r2 # encoding: [0xbf,0x20,0x00,0x00,0x00,0x00,0x00,0x00]
+; CHECK: ret # encoding: [0x95,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+ ret i8 %b
+}
+
+define i8 @add(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: add:
+; CHECK: add r1, r2 # encoding: [0x0f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+; CHECK: mov r0, r1 # encoding: [0xbf,0x10,0x00,0x00,0x00,0x00,0x00,0x00]
+ %1 = add i8 %a, %b
+ ret i8 %1
+}
+
+define i8 @and(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: and:
+; CHECK: and r1, r2 # encoding: [0x5f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %1 = and i8 %a, %b
+ ret i8 %1
+}
+
+define i8 @bis(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: bis:
+; CHECK: or r1, r2 # encoding: [0x4f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %1 = or i8 %a, %b
+ ret i8 %1
+}
+
+define i8 @xorand(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: xorand:
+; CHECK: xori r2, -1 # encoding: [0xa7,0x02,0x00,0x00,0xff,0xff,0xff,0xff]
+ %1 = xor i8 %b, -1
+ %2 = and i8 %a, %1
+ ret i8 %2
+}
+
+define i8 @xor(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: xor:
+; CHECK: xor r1, r2 # encoding: [0xaf,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %1 = xor i8 %a, %b
+ ret i8 %1
+}
diff --git a/test/CodeGen/BPF/atomics.ll b/test/CodeGen/BPF/atomics.ll
new file mode 100644
index 0000000..2f9730d
--- /dev/null
+++ b/test/CodeGen/BPF/atomics.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=bpf -verify-machineinstrs -show-mc-encoding | FileCheck %s
+; test little endian only for now
+
+; CHECK-LABEL: test_load_add_32
+; CHECK: xadd32
+; CHECK: encoding: [0xc3
+define void @test_load_add_32(i32* %p, i32 zeroext %v) {
+entry:
+ atomicrmw add i32* %p, i32 %v seq_cst
+ ret void
+}
+
+; CHECK-LABEL: test_load_add_64
+; CHECK: xadd64
+; CHECK: encoding: [0xdb
+define void @test_load_add_64(i64* %p, i64 zeroext %v) {
+entry:
+ atomicrmw add i64* %p, i64 %v seq_cst
+ ret void
+}
diff --git a/test/CodeGen/BPF/basictest.ll b/test/CodeGen/BPF/basictest.ll
new file mode 100644
index 0000000..0cbfff8
--- /dev/null
+++ b/test/CodeGen/BPF/basictest.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+define i32 @test0(i32 %X) {
+ %tmp.1 = add i32 %X, 1
+ ret i32 %tmp.1
+; CHECK-LABEL: test0:
+; CHECK: addi r1, 1
+}
+
+; CHECK-LABEL: store_imm:
+; CHECK: stw 0(r1), r0
+; CHECK: stw 4(r2), r0
+define i32 @store_imm(i32* %a, i32* %b) {
+entry:
+ store i32 0, i32* %a, align 4
+ %0 = getelementptr inbounds i32* %b, i32 1
+ store i32 0, i32* %0, align 4
+ ret i32 0
+}
+
+@G = external global i8
+define zeroext i8 @loadG() {
+ %tmp = load i8* @G
+ ret i8 %tmp
+; CHECK-LABEL: loadG:
+; CHECK: ld_64 r1
+; CHECK: ldb r0, 0(r1)
+}
diff --git a/test/CodeGen/BPF/byval.ll b/test/CodeGen/BPF/byval.ll
new file mode 100644
index 0000000..065604b
--- /dev/null
+++ b/test/CodeGen/BPF/byval.ll
@@ -0,0 +1,27 @@
+; RUN: not llc -march=bpf < %s 2> %t1
+; RUN: FileCheck %s < %t1
+; CHECK: by value not supported
+
+%struct.S = type { [10 x i32] }
+
+; Function Attrs: nounwind uwtable
+define void @bar(i32 %a) #0 {
+entry:
+ %.compoundliteral = alloca %struct.S, align 8
+ %arrayinit.begin = getelementptr inbounds %struct.S* %.compoundliteral, i64 0, i32 0, i64 0
+ store i32 1, i32* %arrayinit.begin, align 8
+ %arrayinit.element = getelementptr inbounds %struct.S* %.compoundliteral, i64 0, i32 0, i64 1
+ store i32 2, i32* %arrayinit.element, align 4
+ %arrayinit.element2 = getelementptr inbounds %struct.S* %.compoundliteral, i64 0, i32 0, i64 2
+ store i32 3, i32* %arrayinit.element2, align 8
+ %arrayinit.start = getelementptr inbounds %struct.S* %.compoundliteral, i64 0, i32 0, i64 3
+ %scevgep4 = bitcast i32* %arrayinit.start to i8*
+ call void @llvm.memset.p0i8.i64(i8* %scevgep4, i8 0, i64 28, i32 4, i1 false)
+ call void @foo(i32 %a, %struct.S* byval align 8 %.compoundliteral) #3
+ ret void
+}
+
+declare void @foo(i32, %struct.S* byval align 8) #1
+
+; Function Attrs: nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #3
diff --git a/test/CodeGen/BPF/cc_args.ll b/test/CodeGen/BPF/cc_args.ll
new file mode 100644
index 0000000..5085fe5
--- /dev/null
+++ b/test/CodeGen/BPF/cc_args.ll
@@ -0,0 +1,96 @@
+; RUN: llc < %s -march=bpf -show-mc-encoding | FileCheck %s
+; test little endian only for now
+
+define void @test() #0 {
+entry:
+; CHECK: test:
+
+; CHECK: mov r1, 123 # encoding: [0xb7,0x01,0x00,0x00,0x7b,0x00,0x00,0x00]
+; CHECK: call f_i16
+ call void @f_i16(i16 123)
+
+; CHECK: mov r1, 12345678 # encoding: [0xb7,0x01,0x00,0x00,0x4e,0x61,0xbc,0x00]
+; CHECK: call f_i32
+ call void @f_i32(i32 12345678)
+
+; CHECK: ld_64 r1, 72623859790382856 # encoding: [0x18,0x01,0x00,0x00,0x08,0x07,0x06,0x05,0x00,0x00,0x00,0x00,0x04,0x03,0x02,0x01]
+; CHECK: call f_i64
+ call void @f_i64(i64 72623859790382856)
+
+; CHECK: mov r1, 1234
+; CHECK: mov r2, 5678
+; CHECK: call f_i32_i32
+ call void @f_i32_i32(i32 1234, i32 5678)
+
+; CHECK: mov r1, 2
+; CHECK: mov r2, 3
+; CHECK: mov r3, 4
+; CHECK: call f_i16_i32_i16
+ call void @f_i16_i32_i16(i16 2, i32 3, i16 4)
+
+; CHECK: mov r1, 5
+; CHECK: ld_64 r2, 7262385979038285
+; CHECK: mov r3, 6
+; CHECK: call f_i16_i64_i16
+ call void @f_i16_i64_i16(i16 5, i64 7262385979038285, i16 6)
+
+ ret void
+}
+
+@g_i16 = common global i16 0, align 2
+@g_i32 = common global i32 0, align 2
+@g_i64 = common global i64 0, align 4
+
+define void @f_i16(i16 %a) #0 {
+; CHECK: f_i16:
+; CHECK: sth 0(r2), r1 # encoding: [0x6b,0x12,0x00,0x00,0x00,0x00,0x00,0x00]
+ store volatile i16 %a, i16* @g_i16, align 2
+ ret void
+}
+
+define void @f_i32(i32 %a) #0 {
+; CHECK: f_i32:
+; CHECK: sth 0(r2), r1 # encoding: [0x6b,0x12,0x00,0x00,0x00,0x00,0x00,0x00]
+; CHECK: sth 2(r2), r1 # encoding: [0x6b,0x12,0x02,0x00,0x00,0x00,0x00,0x00]
+ store volatile i32 %a, i32* @g_i32, align 2
+ ret void
+}
+
+define void @f_i64(i64 %a) #0 {
+; CHECK: f_i64:
+; CHECK: stw 0(r2), r1
+; CHECK: stw 4(r2), r1 # encoding: [0x63,0x12,0x04,0x00,0x00,0x00,0x00,0x00]
+ store volatile i64 %a, i64* @g_i64, align 2
+ ret void
+}
+
+define void @f_i32_i32(i32 %a, i32 %b) #0 {
+; CHECK: f_i32_i32:
+; CHECK: stw 0(r3), r1
+ store volatile i32 %a, i32* @g_i32, align 4
+; CHECK: stw 0(r3), r2
+ store volatile i32 %b, i32* @g_i32, align 4
+ ret void
+}
+
+define void @f_i16_i32_i16(i16 %a, i32 %b, i16 %c) #0 {
+; CHECK: f_i16_i32_i16:
+; CHECK: sth 0(r4), r1
+ store volatile i16 %a, i16* @g_i16, align 2
+; CHECK: stw 0(r1), r2
+ store volatile i32 %b, i32* @g_i32, align 4
+; CHECK: sth 0(r4), r3
+ store volatile i16 %c, i16* @g_i16, align 2
+ ret void
+}
+
+define void @f_i16_i64_i16(i16 %a, i64 %b, i16 %c) #0 {
+; CHECK: f_i16_i64_i16:
+; CHECK: sth 0(r4), r1
+ store volatile i16 %a, i16* @g_i16, align 2
+; CHECK: std 0(r1), r2 # encoding: [0x7b,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ store volatile i64 %b, i64* @g_i64, align 8
+; CHECK: sth 0(r4), r3
+ store volatile i16 %c, i16* @g_i16, align 2
+ ret void
+}
diff --git a/test/CodeGen/BPF/cc_ret.ll b/test/CodeGen/BPF/cc_ret.ll
new file mode 100644
index 0000000..e32b17b
--- /dev/null
+++ b/test/CodeGen/BPF/cc_ret.ll
@@ -0,0 +1,48 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+define void @test() #0 {
+entry:
+; CHECK: test:
+
+; CHECK: call f_i16
+; CHECK: sth 0(r1), r0
+ %0 = call i16 @f_i16()
+ store volatile i16 %0, i16* @g_i16
+
+; CHECK: call f_i32
+; CHECK: stw 0(r1), r0
+ %1 = call i32 @f_i32()
+ store volatile i32 %1, i32* @g_i32
+
+; CHECK: call f_i64
+; CHECK: std 0(r1), r0
+ %2 = call i64 @f_i64()
+ store volatile i64 %2, i64* @g_i64
+
+ ret void
+}
+
+@g_i16 = common global i16 0, align 2
+@g_i32 = common global i32 0, align 2
+@g_i64 = common global i64 0, align 2
+
+define i16 @f_i16() #0 {
+; CHECK: f_i16:
+; CHECK: mov r0, 1
+; CHECK: ret
+ ret i16 1
+}
+
+define i32 @f_i32() #0 {
+; CHECK: f_i32:
+; CHECK: mov r0, 16909060
+; CHECK: ret
+ ret i32 16909060
+}
+
+define i64 @f_i64() #0 {
+; CHECK: f_i64:
+; CHECK: ld_64 r0, 72623859790382856
+; CHECK: ret
+ ret i64 72623859790382856
+}
diff --git a/test/CodeGen/BPF/cmp.ll b/test/CodeGen/BPF/cmp.ll
new file mode 100644
index 0000000..b353f90
--- /dev/null
+++ b/test/CodeGen/BPF/cmp.ll
@@ -0,0 +1,119 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @foo_cmp1(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp sgt i8 %a, %b
+ br i1 %1, label %2, label %4
+
+; <label>:2 ; preds = %0
+ %3 = mul i8 %b, %a
+ br label %6
+
+; <label>:4 ; preds = %0
+ %5 = shl i8 %b, 3
+ br label %6
+
+; <label>:6 ; preds = %4, %2
+ %.0 = phi i8 [ %3, %2 ], [ %5, %4 ]
+ ret i8 %.0
+; CHECK-LABEL:foo_cmp1:
+; CHECK: jsge r2, r1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @foo_cmp2(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp slt i8 %a, %b
+ br i1 %1, label %4, label %2
+
+; <label>:2 ; preds = %0
+ %3 = mul i8 %b, %a
+ br label %6
+
+; <label>:4 ; preds = %0
+ %5 = shl i8 %b, 3
+ br label %6
+
+; <label>:6 ; preds = %4, %2
+ %.0 = phi i8 [ %3, %2 ], [ %5, %4 ]
+ ret i8 %.0
+; CHECK-LABEL:foo_cmp2:
+; CHECK: jsgt r2, r1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @foo_cmp3(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp slt i8 %a, %b
+ br i1 %1, label %2, label %4
+
+; <label>:2 ; preds = %0
+ %3 = mul i8 %b, %a
+ br label %6
+
+; <label>:4 ; preds = %0
+ %5 = shl i8 %b, 3
+ br label %6
+
+; <label>:6 ; preds = %4, %2
+ %.0 = phi i8 [ %3, %2 ], [ %5, %4 ]
+ ret i8 %.0
+; CHECK-LABEL:foo_cmp3:
+; CHECK: jsge r1, r2
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @foo_cmp4(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp sgt i8 %a, %b
+ br i1 %1, label %4, label %2
+
+; <label>:2 ; preds = %0
+ %3 = mul i8 %b, %a
+ br label %6
+
+; <label>:4 ; preds = %0
+ %5 = shl i8 %b, 3
+ br label %6
+
+; <label>:6 ; preds = %4, %2
+ %.0 = phi i8 [ %3, %2 ], [ %5, %4 ]
+ ret i8 %.0
+; CHECK-LABEL:foo_cmp4:
+; CHECK: jsgt r1, r2
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @min(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp slt i8 %a, %b
+ %a.b = select i1 %1, i8 %a, i8 %b
+ ret i8 %a.b
+; CHECK-LABEL:min:
+; CHECK: jsgt r2, r1
+; CHECK: mov r1, r2
+; CHECK: mov r0, r1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define zeroext i8 @minu(i8 zeroext %a, i8 zeroext %b) #0 {
+ %1 = icmp ult i8 %a, 100
+ %a.b = select i1 %1, i8 %a, i8 %b
+ ret i8 %a.b
+; CHECK-LABEL:minu:
+; CHECK: jgt r3, r1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @max(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp sgt i8 %a, %b
+ %a.b = select i1 %1, i8 %a, i8 %b
+ ret i8 %a.b
+; CHECK-LABEL:max:
+; CHECK: jsgt r1, r2
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @meq(i8 signext %a, i8 signext %b, i8 signext %c) #0 {
+ %1 = icmp eq i8 %a, %b
+ %c.a = select i1 %1, i8 %c, i8 %a
+ ret i8 %c.a
+; CHECK-LABEL:meq:
+; CHECK: jeq r1, r2
+}
diff --git a/test/CodeGen/BPF/ex1.ll b/test/CodeGen/BPF/ex1.ll
new file mode 100644
index 0000000..5fc1200
--- /dev/null
+++ b/test/CodeGen/BPF/ex1.ll
@@ -0,0 +1,46 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+%struct.bpf_context = type { i64, i64, i64, i64, i64, i64, i64 }
+%struct.sk_buff = type { i64, i64, i64, i64, i64, i64, i64 }
+%struct.net_device = type { i64, i64, i64, i64, i64, i64, i64 }
+
+@bpf_prog1.devname = private unnamed_addr constant [3 x i8] c"lo\00", align 1
+@bpf_prog1.fmt = private unnamed_addr constant [15 x i8] c"skb %x dev %x\0A\00", align 1
+
+; Function Attrs: nounwind uwtable
+define i32 @bpf_prog1(%struct.bpf_context* nocapture %ctx) #0 section "events/net/netif_receive_skb" {
+ %devname = alloca [3 x i8], align 1
+ %fmt = alloca [15 x i8], align 1
+ %1 = getelementptr inbounds [3 x i8]* %devname, i64 0, i64 0
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([3 x i8]* @bpf_prog1.devname, i64 0, i64 0), i64 3, i32 1, i1 false)
+ %2 = getelementptr inbounds %struct.bpf_context* %ctx, i64 0, i32 0
+ %3 = load i64* %2, align 8
+ %4 = inttoptr i64 %3 to %struct.sk_buff*
+ %5 = getelementptr inbounds %struct.sk_buff* %4, i64 0, i32 2
+ %6 = bitcast i64* %5 to i8*
+ %7 = call i8* inttoptr (i64 4 to i8* (i8*)*)(i8* %6) #1
+ %8 = call i32 inttoptr (i64 9 to i32 (i8*, i8*, i32)*)(i8* %7, i8* %1, i32 2) #1
+ %9 = icmp eq i32 %8, 0
+ br i1 %9, label %10, label %13
+
+; <label>:10 ; preds = %0
+ %11 = getelementptr inbounds [15 x i8]* %fmt, i64 0, i64 0
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %11, i8* getelementptr inbounds ([15 x i8]* @bpf_prog1.fmt, i64 0, i64 0), i64 15, i32 1, i1 false)
+ %12 = call i32 (i8*, i32, ...)* inttoptr (i64 11 to i32 (i8*, i32, ...)*)(i8* %11, i32 15, %struct.sk_buff* %4, i8* %7) #1
+; CHECK-LABEL: bpf_prog1:
+; CHECK: call 4
+; CHECK: call 9
+; CHECK: jnei r0, 0
+; CHECK: mov r1, 622884453
+; CHECK: ld_64 r1, 7214898703899978611
+; CHECK: call 11
+; CHECK: mov r0, 0
+; CHECK: ret
+ br label %13
+
+; <label>:13 ; preds = %10, %0
+ ret i32 0
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #1
diff --git a/test/CodeGen/BPF/intrinsics.ll b/test/CodeGen/BPF/intrinsics.ll
new file mode 100644
index 0000000..9a078fb
--- /dev/null
+++ b/test/CodeGen/BPF/intrinsics.ll
@@ -0,0 +1,50 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+; Function Attrs: nounwind uwtable
+define i32 @ld_b(i64 %foo, i64* nocapture %bar, i8* %ctx, i8* %ctx2) #0 {
+ %1 = tail call i64 @llvm.bpf.load.byte(i8* %ctx, i64 123) #2
+ %2 = add i64 %1, %foo
+ %3 = load volatile i64* %bar, align 8
+ %4 = add i64 %2, %3
+ %5 = tail call i64 @llvm.bpf.load.byte(i8* %ctx2, i64 %foo) #2
+ %6 = add i64 %4, %5
+ %7 = load volatile i64* %bar, align 8
+ %8 = add i64 %6, %7
+ %9 = trunc i64 %8 to i32
+ ret i32 %9
+; CHECK-LABEL: ld_b:
+; CHECK: ldabs_b r0, r6.data + 123
+; CHECK: ldind_b r0, r6.data
+}
+
+declare i64 @llvm.bpf.load.byte(i8*, i64) #1
+
+; Function Attrs: nounwind uwtable
+define i32 @ld_h(i8* %ctx, i8* %ctx2, i32 %foo) #0 {
+ %1 = tail call i64 @llvm.bpf.load.half(i8* %ctx, i64 123) #2
+ %2 = sext i32 %foo to i64
+ %3 = tail call i64 @llvm.bpf.load.half(i8* %ctx2, i64 %2) #2
+ %4 = add i64 %3, %1
+ %5 = trunc i64 %4 to i32
+ ret i32 %5
+; CHECK-LABEL: ld_h:
+; CHECK: ldind_h r0, r6.data
+; CHECK: ldabs_h r0, r6.data + 123
+}
+
+declare i64 @llvm.bpf.load.half(i8*, i64) #1
+
+; Function Attrs: nounwind uwtable
+define i32 @ld_w(i8* %ctx, i8* %ctx2, i32 %foo) #0 {
+ %1 = tail call i64 @llvm.bpf.load.word(i8* %ctx, i64 123) #2
+ %2 = sext i32 %foo to i64
+ %3 = tail call i64 @llvm.bpf.load.word(i8* %ctx2, i64 %2) #2
+ %4 = add i64 %3, %1
+ %5 = trunc i64 %4 to i32
+ ret i32 %5
+; CHECK-LABEL: ld_w:
+; CHECK: ldind_w r0, r6.data
+; CHECK: ldabs_w r0, r6.data + 123
+}
+
+declare i64 @llvm.bpf.load.word(i8*, i64) #1
diff --git a/test/CodeGen/BPF/lit.local.cfg b/test/CodeGen/BPF/lit.local.cfg
new file mode 100644
index 0000000..a4ab262
--- /dev/null
+++ b/test/CodeGen/BPF/lit.local.cfg
@@ -0,0 +1,2 @@
+if not 'BPF' in config.root.targets:
+ config.unsupported = True
diff --git a/test/CodeGen/BPF/load.ll b/test/CodeGen/BPF/load.ll
new file mode 100644
index 0000000..b097435
--- /dev/null
+++ b/test/CodeGen/BPF/load.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+define i16 @am1(i16* %a) nounwind {
+ %1 = load i16* %a
+ ret i16 %1
+}
+; CHECK-LABEL: am1:
+; CHECK: ldh r0, 0(r1)
+
+@foo = external global i16
+
+define i16 @am2() nounwind {
+ %1 = load i16* @foo
+ ret i16 %1
+}
+; CHECK-LABEL: am2:
+; CHECK: ldh r0, 0(r1)
+
+define i16 @am4() nounwind {
+ %1 = load volatile i16* inttoptr(i16 32 to i16*)
+ ret i16 %1
+}
+; CHECK-LABEL: am4:
+; CHECK: mov r1, 32
+; CHECK: ldh r0, 0(r1)
+
+define i16 @am5(i16* %a) nounwind {
+ %1 = getelementptr i16* %a, i16 2
+ %2 = load i16* %1
+ ret i16 %2
+}
+; CHECK-LABEL: am5:
+; CHECK: ldh r0, 4(r1)
+
+%S = type { i16, i16 }
+@baz = common global %S zeroinitializer, align 1
+
+define i16 @am6() nounwind {
+ %1 = load i16* getelementptr (%S* @baz, i32 0, i32 1)
+ ret i16 %1
+}
+; CHECK-LABEL: am6:
+; CHECK: ldh r0, 2(r1)
diff --git a/test/CodeGen/BPF/loops.ll b/test/CodeGen/BPF/loops.ll
new file mode 100644
index 0000000..40bf449
--- /dev/null
+++ b/test/CodeGen/BPF/loops.ll
@@ -0,0 +1,111 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+define zeroext i16 @add(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+entry:
+ %cmp8 = icmp eq i16 %n, 0 ; <i1> [#uses=1]
+ br i1 %cmp8, label %for.end, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
+ %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+; CHECK-LABEL: add:
+; CHECK: add r{{[0-9]+}}, r{{[0-9]+}}
+ %tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
+ %add = add i16 %tmp4, %sum.09 ; <i16> [#uses=2]
+ %inc = add i16 %i.010, 1 ; <i16> [#uses=2]
+ %exitcond = icmp eq i16 %inc, %n ; <i1> [#uses=1]
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ ret i16 %sum.0.lcssa
+}
+
+define zeroext i16 @sub(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+entry:
+ %cmp8 = icmp eq i16 %n, 0 ; <i1> [#uses=1]
+ br i1 %cmp8, label %for.end, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
+ %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+; CHECK-LABEL: sub:
+; CHECK: sub r{{[0-9]+}}, r{{[0-9]+}}
+ %tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
+ %add = sub i16 %tmp4, %sum.09 ; <i16> [#uses=2]
+ %inc = add i16 %i.010, 1 ; <i16> [#uses=2]
+ %exitcond = icmp eq i16 %inc, %n ; <i1> [#uses=1]
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ ret i16 %sum.0.lcssa
+}
+
+define zeroext i16 @or(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+entry:
+ %cmp8 = icmp eq i16 %n, 0 ; <i1> [#uses=1]
+ br i1 %cmp8, label %for.end, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
+ %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+; CHECK-LABEL: or:
+; CHECK: or r{{[0-9]+}}, r{{[0-9]+}}
+ %tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
+ %add = or i16 %tmp4, %sum.09 ; <i16> [#uses=2]
+ %inc = add i16 %i.010, 1 ; <i16> [#uses=2]
+ %exitcond = icmp eq i16 %inc, %n ; <i1> [#uses=1]
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ ret i16 %sum.0.lcssa
+}
+
+define zeroext i16 @xor(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+entry:
+ %cmp8 = icmp eq i16 %n, 0 ; <i1> [#uses=1]
+ br i1 %cmp8, label %for.end, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
+ %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+; CHECK-LABEL: xor:
+; CHECK: xor r{{[0-9]+}}, r{{[0-9]+}}
+ %tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
+ %add = xor i16 %tmp4, %sum.09 ; <i16> [#uses=2]
+ %inc = add i16 %i.010, 1 ; <i16> [#uses=2]
+ %exitcond = icmp eq i16 %inc, %n ; <i1> [#uses=1]
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ ret i16 %sum.0.lcssa
+}
+
+define zeroext i16 @and(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+entry:
+ %cmp8 = icmp eq i16 %n, 0 ; <i1> [#uses=1]
+ br i1 %cmp8, label %for.end, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
+ %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+; CHECK-LABEL: and:
+; CHECK: and r{{[0-9]+}}, r{{[0-9]+}}
+ %tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
+ %add = and i16 %tmp4, %sum.09 ; <i16> [#uses=2]
+ %inc = add i16 %i.010, 1 ; <i16> [#uses=2]
+ %exitcond = icmp eq i16 %inc, %n ; <i1> [#uses=1]
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ ret i16 %sum.0.lcssa
+}
diff --git a/test/CodeGen/BPF/many_args1.ll b/test/CodeGen/BPF/many_args1.ll
new file mode 100644
index 0000000..08218f4
--- /dev/null
+++ b/test/CodeGen/BPF/many_args1.ll
@@ -0,0 +1,12 @@
+; RUN: not llc -march=bpf < %s 2> %t1
+; RUN: FileCheck %s < %t1
+; CHECK: too many args
+
+; Function Attrs: nounwind uwtable
+define i32 @foo(i32 %a, i32 %b, i32 %c) #0 {
+entry:
+ %call = tail call i32 @bar(i32 %a, i32 %b, i32 %c, i32 1, i32 2, i32 3) #3
+ ret i32 %call
+}
+
+declare i32 @bar(i32, i32, i32, i32, i32, i32) #1
diff --git a/test/CodeGen/BPF/many_args2.ll b/test/CodeGen/BPF/many_args2.ll
new file mode 100644
index 0000000..a69886c
--- /dev/null
+++ b/test/CodeGen/BPF/many_args2.ll
@@ -0,0 +1,15 @@
+; RUN: not llc -march=bpf < %s 2> %t1
+; RUN: FileCheck %s < %t1
+; CHECK: too many args
+
+; Function Attrs: nounwind readnone uwtable
+define i32 @bar(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) #0 {
+entry:
+ ret i32 1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define i32 @foo(i32 %a, i32 %b, i32 %c) #0 {
+entry:
+ ret i32 1
+}
diff --git a/test/CodeGen/BPF/sanity.ll b/test/CodeGen/BPF/sanity.ll
new file mode 100644
index 0000000..db63c07
--- /dev/null
+++ b/test/CodeGen/BPF/sanity.ll
@@ -0,0 +1,117 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+@foo_printf.fmt = private unnamed_addr constant [9 x i8] c"hello \0A\00", align 1
+
+; Function Attrs: nounwind readnone uwtable
+define i32 @foo_int(i32 %a, i32 %b) #0 {
+ %1 = add nsw i32 %b, %a
+ ret i32 %1
+; CHECK-LABEL: foo_int:
+; CHECK: add r2, r1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @foo_char(i8 signext %a, i8 signext %b) #0 {
+ %1 = add i8 %b, %a
+ ret i8 %1
+; CHECK-LABEL: foo_char:
+; CHECK: add r2, r1
+; CHECK: slli r2, 56
+; CHECK: srai r2, 56
+}
+
+; Function Attrs: nounwind readnone uwtable
+define i64 @foo_ll(i64 %a, i64 %b, i64 %c) #0 {
+ %1 = add nsw i64 %b, %a
+ %2 = sub i64 %1, %c
+ ret i64 %2
+; CHECK-LABEL: foo_ll:
+; CHECK: add r2, r1
+; CHECK: sub r2, r3
+; CHECK: mov r0, r2
+}
+
+; Function Attrs: nounwind uwtable
+define void @foo_call2(i32 %a, i32 %b) #1 {
+ %1 = trunc i32 %b to i8
+ tail call void @foo_2arg(i8 signext %1, i32 %a) #3
+ ret void
+; CHECK-LABEL: foo_call2:
+; CHECK: slli r2, 56
+; CHECK: srai r2, 56
+; CHECK: mov r1, r2
+}
+
+declare void @foo_2arg(i8 signext, i32) #2
+
+; Function Attrs: nounwind uwtable
+define i32 @foo_call5(i8 signext %a, i16 signext %b, i32 %c, i64 %d) #1 {
+ %1 = tail call i32 @bar(i8 signext %a, i16 signext %b, i32 %c, i64 %d) #3
+ ret i32 0
+; CHECK-LABEL: foo_call5:
+; CHECK: call bar
+}
+
+declare i32 @bar(i8 signext, i16 signext, i32, i64) #2
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @foo_cmp(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp slt i8 %a, %b
+ %a.b = select i1 %1, i8 %a, i8 %b
+ ret i8 %a.b
+; CHECK-LABEL: foo_cmp:
+; CHECK: jsgt r2, r1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define i32 @foo_muldiv(i8 signext %a, i16 signext %b, i32 %c, i64 %d) #0 {
+ %1 = icmp eq i8 %a, 0
+ br i1 %1, label %5, label %2
+
+; <label>:2 ; preds = %0
+ %3 = sext i16 %b to i32
+ %4 = mul nsw i32 %3, %c
+ br label %8
+
+; <label>:5 ; preds = %0
+ %6 = trunc i64 %d to i32
+ %7 = udiv i32 %6, %c
+ br label %8
+
+; <label>:8 ; preds = %5, %2
+ %.0 = phi i32 [ %4, %2 ], [ %7, %5 ]
+ ret i32 %.0
+; CHECK-LABEL: foo_muldiv:
+; CHECK: mul r2, r3
+}
+
+; Function Attrs: nounwind uwtable
+define i32 @foo_optimized() #1 {
+ %1 = tail call i32 @manyarg(i32 1, i32 2, i32 3, i32 4, i32 5) #3
+ ret i32 %1
+; CHECK-LABEL: foo_optimized:
+; CHECK: mov r1, 1
+; CHECK: mov r2, 2
+; CHECK: mov r3, 3
+; CHECK: mov r4, 4
+; CHECK: mov r5, 5
+}
+
+declare i32 @manyarg(i32, i32, i32, i32, i32) #2
+
+; Function Attrs: nounwind uwtable
+define void @foo_printf() #1 {
+ %fmt = alloca [9 x i8], align 1
+ %1 = getelementptr inbounds [9 x i8]* %fmt, i64 0, i64 0
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([9 x i8]* @foo_printf.fmt, i64 0, i64 0), i64 9, i32 1, i1 false)
+; CHECK-LABEL: foo_printf:
+; CHECK: ld_64 r1, 729618802566522216
+ %2 = call i32 (i8*, ...)* @printf(i8* %1) #3
+ ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #3
+
+; Function Attrs: nounwind
+declare i32 @printf(i8* nocapture, ...) #4
diff --git a/test/CodeGen/BPF/setcc.ll b/test/CodeGen/BPF/setcc.ll
new file mode 100644
index 0000000..eabb6c9
--- /dev/null
+++ b/test/CodeGen/BPF/setcc.ll
@@ -0,0 +1,99 @@
+; RUN: llc -march=bpf < %s | FileCheck %s
+
+define i16 @sccweqand(i16 %a, i16 %b) nounwind {
+ %t1 = and i16 %a, %b
+ %t2 = icmp eq i16 %t1, 0
+ %t3 = zext i1 %t2 to i16
+ ret i16 %t3
+}
+; CHECK-LABEL: sccweqand:
+; CHECK: jeq r1, r2
+
+define i16 @sccwneand(i16 %a, i16 %b) nounwind {
+ %t1 = and i16 %a, %b
+ %t2 = icmp ne i16 %t1, 0
+ %t3 = zext i1 %t2 to i16
+ ret i16 %t3
+}
+; CHECK-LABEL: sccwneand:
+; CHECK: jne r1, r2
+
+define i16 @sccwne(i16 %a, i16 %b) nounwind {
+ %t1 = icmp ne i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwne:
+; CHECK: jne r1, r2
+
+define i16 @sccweq(i16 %a, i16 %b) nounwind {
+ %t1 = icmp eq i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccweq:
+; CHECK: jeq r1, r2
+
+define i16 @sccwugt(i16 %a, i16 %b) nounwind {
+ %t1 = icmp ugt i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwugt:
+; CHECK: jgt r1, r2
+
+define i16 @sccwuge(i16 %a, i16 %b) nounwind {
+ %t1 = icmp uge i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwuge:
+; CHECK: jge r1, r2
+
+define i16 @sccwult(i16 %a, i16 %b) nounwind {
+ %t1 = icmp ult i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwult:
+; CHECK: jgt r2, r1
+
+define i16 @sccwule(i16 %a, i16 %b) nounwind {
+ %t1 = icmp ule i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwule:
+; CHECK: jge r2, r1
+
+define i16 @sccwsgt(i16 %a, i16 %b) nounwind {
+ %t1 = icmp sgt i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwsgt:
+; CHECK: jsgt r1, r2
+
+define i16 @sccwsge(i16 %a, i16 %b) nounwind {
+ %t1 = icmp sge i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwsge:
+; CHECK: jsge r1, r2
+
+define i16 @sccwslt(i16 %a, i16 %b) nounwind {
+ %t1 = icmp slt i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwslt:
+; CHECK: jsgt r2, r1
+
+define i16 @sccwsle(i16 %a, i16 %b) nounwind {
+ %t1 = icmp sle i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwsle:
+; CHECK: jsge r2, r1
diff --git a/test/CodeGen/BPF/shifts.ll b/test/CodeGen/BPF/shifts.ll
new file mode 100644
index 0000000..898ae2d
--- /dev/null
+++ b/test/CodeGen/BPF/shifts.ll
@@ -0,0 +1,101 @@
+; RUN: llc < %s -march=bpf -show-mc-encoding | FileCheck %s
+; test little endian only for now
+
+define zeroext i8 @lshr8(i8 zeroext %a, i8 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: lshr8:
+; CHECK: srl r1, r2 # encoding: [0x7f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = lshr i8 %a, %cnt
+ ret i8 %shr
+}
+
+define signext i8 @ashr8(i8 signext %a, i8 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: ashr8:
+; CHECK: sra r1, r2 # encoding: [0xcf,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = ashr i8 %a, %cnt
+ ret i8 %shr
+}
+
+define zeroext i8 @shl8(i8 zeroext %a, i8 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK: shl8
+; CHECK: sll r1, r2 # encoding: [0x6f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shl = shl i8 %a, %cnt
+ ret i8 %shl
+}
+
+define zeroext i16 @lshr16(i16 zeroext %a, i16 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: lshr16:
+; CHECK: srl r1, r2 # encoding: [0x7f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = lshr i16 %a, %cnt
+ ret i16 %shr
+}
+
+define signext i16 @ashr16(i16 signext %a, i16 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: ashr16:
+; CHECK: sra r1, r2 # encoding: [0xcf,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = ashr i16 %a, %cnt
+ ret i16 %shr
+}
+
+define zeroext i16 @shl16(i16 zeroext %a, i16 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: shl16:
+; CHECK: sll r1, r2 # encoding: [0x6f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shl = shl i16 %a, %cnt
+ ret i16 %shl
+}
+
+define zeroext i32 @lshr32(i32 zeroext %a, i32 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: lshr32:
+; CHECK: srl r1, r2 # encoding: [0x7f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+; CHECK: slli r1, 32 # encoding: [0x67,0x01,0x00,0x00,0x20,0x00,0x00,0x00]
+ %shr = lshr i32 %a, %cnt
+ ret i32 %shr
+}
+
+define signext i32 @ashr32(i32 signext %a, i32 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: ashr32:
+; CHECK: sra r1, r2 # encoding: [0xcf,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = ashr i32 %a, %cnt
+ ret i32 %shr
+}
+
+define zeroext i32 @shl32(i32 zeroext %a, i32 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: shl32:
+; CHECK: sll r1, r2 # encoding: [0x6f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shl = shl i32 %a, %cnt
+ ret i32 %shl
+}
+
+define zeroext i64 @lshr64(i64 zeroext %a, i64 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: lshr64:
+; CHECK: srl r1, r2 # encoding: [0x7f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = lshr i64 %a, %cnt
+ ret i64 %shr
+}
+
+define signext i64 @ashr64(i64 signext %a, i64 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: ashr64:
+; CHECK: sra r1, r2 # encoding: [0xcf,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = ashr i64 %a, %cnt
+ ret i64 %shr
+}
+
+define zeroext i64 @shl64(i64 zeroext %a, i64 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: shl64:
+; CHECK: sll r1, r2 # encoding: [0x6f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+; CHECK: mov r0, r1 # encoding: [0xbf,0x10,0x00,0x00,0x00,0x00,0x00,0x00]
+; CHECK: ret # encoding: [0x95,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shl = shl i64 %a, %cnt
+ ret i64 %shl
+}
diff --git a/test/CodeGen/BPF/sockex2.ll b/test/CodeGen/BPF/sockex2.ll
new file mode 100644
index 0000000..6ae5e1c
--- /dev/null
+++ b/test/CodeGen/BPF/sockex2.ll
@@ -0,0 +1,326 @@
+; RUN: llc < %s -march=bpf -show-mc-encoding | FileCheck %s
+; test little endian only for now
+
+%struct.bpf_map_def = type { i32, i32, i32, i32 }
+%struct.sk_buff = type opaque
+
+@hash_map = global %struct.bpf_map_def { i32 1, i32 4, i32 8, i32 1024 }, section "maps", align 4
+
+; Function Attrs: nounwind uwtable
+define i32 @bpf_prog2(%struct.sk_buff* %skb) #0 section "socket2" {
+ %key = alloca i32, align 4
+ %val = alloca i64, align 8
+ %1 = bitcast %struct.sk_buff* %skb to i8*
+ %2 = call i64 @llvm.bpf.load.half(i8* %1, i64 12) #2
+ %3 = icmp eq i64 %2, 34984
+ br i1 %3, label %4, label %6
+
+; <label>:4 ; preds = %0
+ %5 = call i64 @llvm.bpf.load.half(i8* %1, i64 16) #2
+ br label %6
+
+; <label>:6 ; preds = %4, %0
+ %proto.0.i = phi i64 [ %5, %4 ], [ %2, %0 ]
+ %nhoff.0.i = phi i64 [ 18, %4 ], [ 14, %0 ]
+ %7 = icmp eq i64 %proto.0.i, 33024
+ br i1 %7, label %8, label %12
+
+; <label>:8 ; preds = %6
+ %9 = add i64 %nhoff.0.i, 2
+ %10 = call i64 @llvm.bpf.load.half(i8* %1, i64 %9) #2
+ %11 = add i64 %nhoff.0.i, 4
+ br label %12
+
+; <label>:12 ; preds = %8, %6
+ %proto.1.i = phi i64 [ %10, %8 ], [ %proto.0.i, %6 ]
+ %nhoff.1.i = phi i64 [ %11, %8 ], [ %nhoff.0.i, %6 ]
+ switch i64 %proto.1.i, label %flow_dissector.exit.thread [
+ i64 2048, label %13
+ i64 34525, label %39
+ ]
+
+; <label>:13 ; preds = %12
+ %14 = add i64 %nhoff.1.i, 6
+ %15 = call i64 @llvm.bpf.load.half(i8* %1, i64 %14) #2
+ %16 = and i64 %15, 16383
+ %17 = icmp eq i64 %16, 0
+ br i1 %17, label %18, label %.thread.i.i
+
+; <label>:18 ; preds = %13
+ %19 = add i64 %nhoff.1.i, 9
+ %20 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %19) #2
+ %21 = icmp eq i64 %20, 47
+ br i1 %21, label %28, label %.thread.i.i
+
+.thread.i.i: ; preds = %18, %13
+ %22 = phi i64 [ %20, %18 ], [ 0, %13 ]
+ %23 = add i64 %nhoff.1.i, 12
+ %24 = call i64 @llvm.bpf.load.word(i8* %1, i64 %23) #2
+ %25 = add i64 %nhoff.1.i, 16
+ %26 = call i64 @llvm.bpf.load.word(i8* %1, i64 %25) #2
+ %27 = trunc i64 %26 to i32
+ br label %28
+
+; <label>:28 ; preds = %.thread.i.i, %18
+ %29 = phi i32 [ %27, %.thread.i.i ], [ undef, %18 ]
+ %30 = phi i64 [ %22, %.thread.i.i ], [ 47, %18 ]
+ %31 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %nhoff.1.i) #2
+ %32 = icmp eq i64 %31, 69
+ br i1 %32, label %33, label %35
+
+; <label>:33 ; preds = %28
+ %34 = add i64 %nhoff.1.i, 20
+ br label %parse_ip.exit.i
+
+; <label>:35 ; preds = %28
+ %36 = shl i64 %31, 2
+ %37 = and i64 %36, 60
+ %38 = add i64 %37, %nhoff.1.i
+ br label %parse_ip.exit.i
+
+; <label>:39 ; preds = %12
+ %40 = add i64 %nhoff.1.i, 6
+ %41 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %40) #2
+ %42 = add i64 %nhoff.1.i, 8
+ %43 = call i64 @llvm.bpf.load.word(i8* %1, i64 %42) #2
+ %44 = add i64 %nhoff.1.i, 12
+ %45 = call i64 @llvm.bpf.load.word(i8* %1, i64 %44) #2
+ %46 = add i64 %nhoff.1.i, 16
+ %47 = call i64 @llvm.bpf.load.word(i8* %1, i64 %46) #2
+ %48 = add i64 %nhoff.1.i, 20
+ %49 = call i64 @llvm.bpf.load.word(i8* %1, i64 %48) #2
+ %50 = add i64 %nhoff.1.i, 24
+ %51 = call i64 @llvm.bpf.load.word(i8* %1, i64 %50) #2
+ %52 = add i64 %nhoff.1.i, 28
+ %53 = call i64 @llvm.bpf.load.word(i8* %1, i64 %52) #2
+ %54 = add i64 %nhoff.1.i, 32
+ %55 = call i64 @llvm.bpf.load.word(i8* %1, i64 %54) #2
+ %56 = add i64 %nhoff.1.i, 36
+ %57 = call i64 @llvm.bpf.load.word(i8* %1, i64 %56) #2
+ %58 = xor i64 %53, %51
+ %59 = xor i64 %58, %55
+ %60 = xor i64 %59, %57
+ %61 = trunc i64 %60 to i32
+ %62 = add i64 %nhoff.1.i, 40
+ br label %parse_ip.exit.i
+
+parse_ip.exit.i: ; preds = %39, %35, %33
+ %63 = phi i32 [ %61, %39 ], [ %29, %33 ], [ %29, %35 ]
+ %64 = phi i64 [ %41, %39 ], [ %30, %33 ], [ %30, %35 ]
+ %nhoff.2.i = phi i64 [ %62, %39 ], [ %34, %33 ], [ %38, %35 ]
+ switch i64 %64, label %187 [
+ i64 47, label %65
+ i64 4, label %137
+ i64 41, label %163
+ ]
+
+; <label>:65 ; preds = %parse_ip.exit.i
+ %66 = call i64 @llvm.bpf.load.half(i8* %1, i64 %nhoff.2.i) #2
+ %67 = add i64 %nhoff.2.i, 2
+ %68 = call i64 @llvm.bpf.load.half(i8* %1, i64 %67) #2
+ %69 = and i64 %66, 1856
+ %70 = icmp eq i64 %69, 0
+ br i1 %70, label %71, label %187
+
+; <label>:71 ; preds = %65
+ %72 = lshr i64 %66, 5
+ %73 = and i64 %72, 4
+ %74 = add i64 %nhoff.2.i, 4
+ %..i = add i64 %74, %73
+ %75 = and i64 %66, 32
+ %76 = icmp eq i64 %75, 0
+ %77 = add i64 %..i, 4
+ %nhoff.4.i = select i1 %76, i64 %..i, i64 %77
+ %78 = and i64 %66, 16
+ %79 = icmp eq i64 %78, 0
+ %80 = add i64 %nhoff.4.i, 4
+ %nhoff.4..i = select i1 %79, i64 %nhoff.4.i, i64 %80
+ %81 = icmp eq i64 %68, 33024
+ br i1 %81, label %82, label %86
+
+; <label>:82 ; preds = %71
+ %83 = add i64 %nhoff.4..i, 2
+ %84 = call i64 @llvm.bpf.load.half(i8* %1, i64 %83) #2
+ %85 = add i64 %nhoff.4..i, 4
+ br label %86
+
+; <label>:86 ; preds = %82, %71
+ %proto.2.i = phi i64 [ %84, %82 ], [ %68, %71 ]
+ %nhoff.6.i = phi i64 [ %85, %82 ], [ %nhoff.4..i, %71 ]
+ switch i64 %proto.2.i, label %flow_dissector.exit.thread [
+ i64 2048, label %87
+ i64 34525, label %113
+ ]
+
+; <label>:87 ; preds = %86
+ %88 = add i64 %nhoff.6.i, 6
+ %89 = call i64 @llvm.bpf.load.half(i8* %1, i64 %88) #2
+ %90 = and i64 %89, 16383
+ %91 = icmp eq i64 %90, 0
+ br i1 %91, label %92, label %.thread.i4.i
+
+; <label>:92 ; preds = %87
+ %93 = add i64 %nhoff.6.i, 9
+ %94 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %93) #2
+ %95 = icmp eq i64 %94, 47
+ br i1 %95, label %102, label %.thread.i4.i
+
+.thread.i4.i: ; preds = %92, %87
+ %96 = phi i64 [ %94, %92 ], [ 0, %87 ]
+ %97 = add i64 %nhoff.6.i, 12
+ %98 = call i64 @llvm.bpf.load.word(i8* %1, i64 %97) #2
+ %99 = add i64 %nhoff.6.i, 16
+ %100 = call i64 @llvm.bpf.load.word(i8* %1, i64 %99) #2
+ %101 = trunc i64 %100 to i32
+ br label %102
+
+; <label>:102 ; preds = %.thread.i4.i, %92
+ %103 = phi i32 [ %101, %.thread.i4.i ], [ %63, %92 ]
+ %104 = phi i64 [ %96, %.thread.i4.i ], [ 47, %92 ]
+ %105 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %nhoff.6.i) #2
+ %106 = icmp eq i64 %105, 69
+ br i1 %106, label %107, label %109
+
+; <label>:107 ; preds = %102
+ %108 = add i64 %nhoff.6.i, 20
+ br label %187
+
+; <label>:109 ; preds = %102
+ %110 = shl i64 %105, 2
+ %111 = and i64 %110, 60
+ %112 = add i64 %111, %nhoff.6.i
+ br label %187
+
+; <label>:113 ; preds = %86
+ %114 = add i64 %nhoff.6.i, 6
+ %115 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %114) #2
+ %116 = add i64 %nhoff.6.i, 8
+ %117 = call i64 @llvm.bpf.load.word(i8* %1, i64 %116) #2
+ %118 = add i64 %nhoff.6.i, 12
+ %119 = call i64 @llvm.bpf.load.word(i8* %1, i64 %118) #2
+ %120 = add i64 %nhoff.6.i, 16
+ %121 = call i64 @llvm.bpf.load.word(i8* %1, i64 %120) #2
+ %122 = add i64 %nhoff.6.i, 20
+ %123 = call i64 @llvm.bpf.load.word(i8* %1, i64 %122) #2
+ %124 = add i64 %nhoff.6.i, 24
+ %125 = call i64 @llvm.bpf.load.word(i8* %1, i64 %124) #2
+ %126 = add i64 %nhoff.6.i, 28
+ %127 = call i64 @llvm.bpf.load.word(i8* %1, i64 %126) #2
+ %128 = add i64 %nhoff.6.i, 32
+ %129 = call i64 @llvm.bpf.load.word(i8* %1, i64 %128) #2
+ %130 = add i64 %nhoff.6.i, 36
+ %131 = call i64 @llvm.bpf.load.word(i8* %1, i64 %130) #2
+ %132 = xor i64 %127, %125
+ %133 = xor i64 %132, %129
+ %134 = xor i64 %133, %131
+ %135 = trunc i64 %134 to i32
+ %136 = add i64 %nhoff.6.i, 40
+ br label %187
+
+; <label>:137 ; preds = %parse_ip.exit.i
+ %138 = add i64 %nhoff.2.i, 6
+ %139 = call i64 @llvm.bpf.load.half(i8* %1, i64 %138) #2
+ %140 = and i64 %139, 16383
+ %141 = icmp eq i64 %140, 0
+ br i1 %141, label %142, label %.thread.i1.i
+
+; <label>:142 ; preds = %137
+ %143 = add i64 %nhoff.2.i, 9
+ %144 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %143) #2
+ %145 = icmp eq i64 %144, 47
+ br i1 %145, label %152, label %.thread.i1.i
+
+.thread.i1.i: ; preds = %142, %137
+ %146 = phi i64 [ %144, %142 ], [ 0, %137 ]
+ %147 = add i64 %nhoff.2.i, 12
+ %148 = call i64 @llvm.bpf.load.word(i8* %1, i64 %147) #2
+ %149 = add i64 %nhoff.2.i, 16
+ %150 = call i64 @llvm.bpf.load.word(i8* %1, i64 %149) #2
+ %151 = trunc i64 %150 to i32
+ br label %152
+
+; <label>:152 ; preds = %.thread.i1.i, %142
+ %153 = phi i32 [ %151, %.thread.i1.i ], [ %63, %142 ]
+ %154 = phi i64 [ %146, %.thread.i1.i ], [ 47, %142 ]
+ %155 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %nhoff.2.i) #2
+ %156 = icmp eq i64 %155, 69
+ br i1 %156, label %157, label %159
+
+; <label>:157 ; preds = %152
+ %158 = add i64 %nhoff.2.i, 20
+ br label %187
+
+; <label>:159 ; preds = %152
+ %160 = shl i64 %155, 2
+ %161 = and i64 %160, 60
+ %162 = add i64 %161, %nhoff.2.i
+ br label %187
+
+; <label>:163 ; preds = %parse_ip.exit.i
+ %164 = add i64 %nhoff.2.i, 6
+ %165 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %164) #2
+ %166 = add i64 %nhoff.2.i, 8
+ %167 = call i64 @llvm.bpf.load.word(i8* %1, i64 %166) #2
+ %168 = add i64 %nhoff.2.i, 12
+ %169 = call i64 @llvm.bpf.load.word(i8* %1, i64 %168) #2
+ %170 = add i64 %nhoff.2.i, 16
+ %171 = call i64 @llvm.bpf.load.word(i8* %1, i64 %170) #2
+ %172 = add i64 %nhoff.2.i, 20
+ %173 = call i64 @llvm.bpf.load.word(i8* %1, i64 %172) #2
+ %174 = add i64 %nhoff.2.i, 24
+ %175 = call i64 @llvm.bpf.load.word(i8* %1, i64 %174) #2
+ %176 = add i64 %nhoff.2.i, 28
+ %177 = call i64 @llvm.bpf.load.word(i8* %1, i64 %176) #2
+ %178 = add i64 %nhoff.2.i, 32
+ %179 = call i64 @llvm.bpf.load.word(i8* %1, i64 %178) #2
+ %180 = add i64 %nhoff.2.i, 36
+ %181 = call i64 @llvm.bpf.load.word(i8* %1, i64 %180) #2
+ %182 = xor i64 %177, %175
+ %183 = xor i64 %182, %179
+ %184 = xor i64 %183, %181
+ %185 = trunc i64 %184 to i32
+ %186 = add i64 %nhoff.2.i, 40
+ br label %187
+
+; <label>:187 ; preds = %163, %159, %157, %113, %109, %107, %65, %parse_ip.exit.i
+ %188 = phi i32 [ %63, %parse_ip.exit.i ], [ %185, %163 ], [ %63, %65 ], [ %135, %113 ], [ %103, %107 ], [ %103, %109 ], [ %153, %157 ], [ %153, %159 ]
+ %189 = phi i64 [ %64, %parse_ip.exit.i ], [ %165, %163 ], [ 47, %65 ], [ %115, %113 ], [ %104, %107 ], [ %104, %109 ], [ %154, %157 ], [ %154, %159 ]
+ %nhoff.7.i = phi i64 [ %nhoff.2.i, %parse_ip.exit.i ], [ %186, %163 ], [ %nhoff.2.i, %65 ], [ %136, %113 ], [ %108, %107 ], [ %112, %109 ], [ %158, %157 ], [ %162, %159 ]
+ %cond.i.i = icmp eq i64 %189, 51
+ %190 = select i1 %cond.i.i, i64 4, i64 0
+ %191 = add i64 %190, %nhoff.7.i
+ %192 = call i64 @llvm.bpf.load.word(i8* %1, i64 %191) #2
+ store i32 %188, i32* %key, align 4
+ %193 = bitcast i32* %key to i8*
+ %194 = call i8* inttoptr (i64 1 to i8* (i8*, i8*)*)(i8* bitcast (%struct.bpf_map_def* @hash_map to i8*), i8* %193) #2
+ %195 = icmp eq i8* %194, null
+ br i1 %195, label %199, label %196
+
+; <label>:196 ; preds = %187
+ %197 = bitcast i8* %194 to i64*
+ %198 = atomicrmw add i64* %197, i64 1 seq_cst
+ br label %flow_dissector.exit.thread
+
+; <label>:199 ; preds = %187
+ store i64 1, i64* %val, align 8
+ %200 = bitcast i64* %val to i8*
+ %201 = call i32 inttoptr (i64 2 to i32 (i8*, i8*, i8*, i64)*)(i8* bitcast (%struct.bpf_map_def* @hash_map to i8*), i8* %193, i8* %200, i64 0) #2
+ br label %flow_dissector.exit.thread
+
+flow_dissector.exit.thread: ; preds = %86, %12, %196, %199
+ ret i32 0
+; CHECK-LABEL: bpf_prog2:
+; CHECK: ldabs_h r0, r6.data + 12 # encoding: [0x28,0x00,0x00,0x00,0x0c,0x00,0x00,0x00]
+; CHECK: ldabs_h r0, r6.data + 16 # encoding: [0x28,0x00,0x00,0x00,0x10,0x00,0x00,0x00]
+; CHECK-NOT: implicit
+; CHECK: ld_64 r1
+; CHECK-NOT: ori
+; CHECK: call 1 # encoding: [0x85,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+; CHECK: call 2 # encoding: [0x85,0x00,0x00,0x00,0x02,0x00,0x00,0x00]
+}
+
+declare i64 @llvm.bpf.load.half(i8*, i64) #1
+
+declare i64 @llvm.bpf.load.word(i8*, i64) #1
+
+declare i64 @llvm.bpf.load.byte(i8*, i64) #1
diff --git a/test/CodeGen/BPF/struct_ret1.ll b/test/CodeGen/BPF/struct_ret1.ll
new file mode 100644
index 0000000..1477c56
--- /dev/null
+++ b/test/CodeGen/BPF/struct_ret1.ll
@@ -0,0 +1,17 @@
+; RUN: not llc -march=bpf < %s 2> %t1
+; RUN: FileCheck %s < %t1
+; CHECK: only integer returns
+
+%struct.S = type { i32, i32, i32 }
+
+@s = common global %struct.S zeroinitializer, align 4
+
+; Function Attrs: nounwind readonly uwtable
+define { i64, i32 } @bar(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) #0 {
+entry:
+ %retval.sroa.0.0.copyload = load i64* bitcast (%struct.S* @s to i64*), align 4
+ %retval.sroa.2.0.copyload = load i32* getelementptr inbounds (%struct.S* @s, i64 0, i32 2), align 4
+ %.fca.0.insert = insertvalue { i64, i32 } undef, i64 %retval.sroa.0.0.copyload, 0
+ %.fca.1.insert = insertvalue { i64, i32 } %.fca.0.insert, i32 %retval.sroa.2.0.copyload, 1
+ ret { i64, i32 } %.fca.1.insert
+}
diff --git a/test/CodeGen/BPF/struct_ret2.ll b/test/CodeGen/BPF/struct_ret2.ll
new file mode 100644
index 0000000..9046120
--- /dev/null
+++ b/test/CodeGen/BPF/struct_ret2.ll
@@ -0,0 +1,12 @@
+; RUN: not llc -march=bpf < %s 2> %t1
+; RUN: FileCheck %s < %t1
+; CHECK: only small returns
+
+; Function Attrs: nounwind uwtable
+define { i64, i32 } @foo(i32 %a, i32 %b, i32 %c) #0 {
+entry:
+ %call = tail call { i64, i32 } @bar(i32 %a, i32 %b, i32 %c, i32 1, i32 2) #3
+ ret { i64, i32 } %call
+}
+
+declare { i64, i32 } @bar(i32, i32, i32, i32, i32) #1
diff --git a/test/CodeGen/BPF/vararg1.ll b/test/CodeGen/BPF/vararg1.ll
new file mode 100644
index 0000000..4a22db6
--- /dev/null
+++ b/test/CodeGen/BPF/vararg1.ll
@@ -0,0 +1,9 @@
+; RUN: not llc -march=bpf < %s 2> %t1
+; RUN: FileCheck %s < %t1
+; CHECK: with VarArgs
+
+; Function Attrs: nounwind readnone uwtable
+define void @foo(i32 %a, ...) #0 {
+entry:
+ ret void
+}
diff --git a/test/CodeGen/Generic/MachineBranchProb.ll b/test/CodeGen/Generic/MachineBranchProb.ll
index 0e98280..83277c9 100644
--- a/test/CodeGen/Generic/MachineBranchProb.ll
+++ b/test/CodeGen/Generic/MachineBranchProb.ll
@@ -32,4 +32,4 @@ return:
ret i32 %retval.0
}
-!0 = metadata !{metadata !"branch_weights", i32 7, i32 6, i32 4, i32 4, i32 64}
+!0 = !{!"branch_weights", i32 7, i32 6, i32 4, i32 4, i32 64}
diff --git a/test/CodeGen/Generic/dbg_value.ll b/test/CodeGen/Generic/dbg_value.ll
index 73e41c7..ed7bdba 100644
--- a/test/CodeGen/Generic/dbg_value.ll
+++ b/test/CodeGen/Generic/dbg_value.ll
@@ -4,11 +4,11 @@
%0 = type { i32, i32 }
define void @t(%0*, i32, i32, i32, i32) nounwind {
- tail call void @llvm.dbg.value(metadata !{%0* %0}, i64 0, metadata !0, metadata !{metadata !"0x102"})
+ tail call void @llvm.dbg.value(metadata %0* %0, i64 0, metadata !0, metadata !{!"0x102"})
unreachable
}
declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnone
; !0 should conform to the format of DIVariable.
-!0 = metadata !{metadata !"0x101\00a\000\000", null, null, null} ; [ DW_TAG_arg_variable ]
+!0 = !{!"0x101\00a\000\000", null, null, null} ; [ DW_TAG_arg_variable ]
diff --git a/test/CodeGen/Generic/empty-phi.ll b/test/CodeGen/Generic/empty-phi.ll
new file mode 100644
index 0000000..8d5f3b9
--- /dev/null
+++ b/test/CodeGen/Generic/empty-phi.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s
+
+define void @f() {
+entry:
+ br label %bb1
+
+bb1:
+ %0 = phi [0 x { i8*, i64, i64 }] [ %load, %bb2 ], [ undef, %entry ]
+ store [0 x { i8*, i64, i64 }] %0, [0 x { i8*, i64, i64 }]* undef, align 8
+ %1 = icmp eq i64 undef, 0
+ br i1 %1, label %bb2, label %bb3
+
+bb2:
+ %load = load [0 x { i8*, i64, i64 }]* undef, align 8
+ br label %bb1
+
+bb3:
+ ret void
+}
diff --git a/test/CodeGen/Generic/overloaded-intrinsic-name.ll b/test/CodeGen/Generic/overloaded-intrinsic-name.ll
new file mode 100644
index 0000000..aa6a031
--- /dev/null
+++ b/test/CodeGen/Generic/overloaded-intrinsic-name.ll
@@ -0,0 +1,57 @@
+; RUN: opt -verify -S < %s
+
+; Tests the name mangling performed by the codepath following
+; getMangledTypeStr(). Only tests that code with the various manglings
+; run fine: doesn't actually test the mangling with the type of the
+; arguments. Meant to serve as an example-document on how the user
+; should do name manglings.
+
+; Exercise the most general case, llvm_anyptr_type, using gc.relocate
+; and gc.statepoint. Note that it has nothing to do with gc.*
+; functions specifically: any function that accepts llvm_anyptr_type
+; will serve the purpose.
+
+; function and integer
+define i32* @test_iAny(i32* %v) {
+ %tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32* %v)
+ %v-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 4)
+ ret i32* %v-new
+}
+
+; float
+define float* @test_fAny(float* %v) {
+ %tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, float* %v)
+ %v-new = call float* @llvm.experimental.gc.relocate.p0f32(i32 %tok, i32 4, i32 4)
+ ret float* %v-new
+}
+
+; array of integers
+define [3 x i32]* @test_aAny([3 x i32]* %v) {
+ %tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, [3 x i32]* %v)
+ %v-new = call [3 x i32]* @llvm.experimental.gc.relocate.p0a3i32(i32 %tok, i32 4, i32 4)
+ ret [3 x i32]* %v-new
+}
+
+; vector of integers
+define <3 x i32>* @test_vAny(<3 x i32>* %v) {
+ %tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, <3 x i32>* %v)
+ %v-new = call <3 x i32>* @llvm.experimental.gc.relocate.p0v3i32(i32 %tok, i32 4, i32 4)
+ ret <3 x i32>* %v-new
+}
+
+%struct.test = type { i32, i1 }
+
+; struct
+define %struct.test* @test_struct(%struct.test* %v) {
+ %tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, %struct.test* %v)
+ %v-new = call %struct.test* @llvm.experimental.gc.relocate.p0struct.test(i32 %tok, i32 4, i32 4)
+ ret %struct.test* %v-new
+}
+
+declare zeroext i1 @return_i1()
+declare i32 @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()*, i32, i32, ...)
+declare i32* @llvm.experimental.gc.relocate.p0i32(i32, i32, i32)
+declare float* @llvm.experimental.gc.relocate.p0f32(i32, i32, i32)
+declare [3 x i32]* @llvm.experimental.gc.relocate.p0a3i32(i32, i32, i32)
+declare <3 x i32>* @llvm.experimental.gc.relocate.p0v3i32(i32, i32, i32)
+declare %struct.test* @llvm.experimental.gc.relocate.p0struct.test(i32, i32, i32)
diff --git a/test/CodeGen/Generic/print-machineinstrs.ll b/test/CodeGen/Generic/print-machineinstrs.ll
index 75dceb5..26bccaa 100644
--- a/test/CodeGen/Generic/print-machineinstrs.ll
+++ b/test/CodeGen/Generic/print-machineinstrs.ll
@@ -3,7 +3,7 @@
; RUN: llc < %s -O3 -debug-pass=Structure -print-machineinstrs= -o /dev/null 2>&1 | FileCheck %s
define i64 @foo(i64 %a, i64 %b) nounwind {
-; CHECK: -branch-folder -print-machineinstrs
+; CHECK: -branch-folder -machineinstr-printer
; CHECK: Control Flow Optimizer
; CHECK-NEXT: MachineFunction Printer
; CHECK: Machine code for function foo:
diff --git a/test/CodeGen/Hexagon/BranchPredict.ll b/test/CodeGen/Hexagon/BranchPredict.ll
index 4ab1966..5d56449 100644
--- a/test/CodeGen/Hexagon/BranchPredict.ll
+++ b/test/CodeGen/Hexagon/BranchPredict.ll
@@ -72,5 +72,5 @@ return: ; preds = %if.else, %if.then
ret i32 %retval.0
}
-!0 = metadata !{metadata !"branch_weights", i32 64, i32 4}
-!1 = metadata !{metadata !"branch_weights", i32 4, i32 64}
+!0 = !{!"branch_weights", i32 64, i32 4}
+!1 = !{!"branch_weights", i32 4, i32 64}
diff --git a/test/CodeGen/Hexagon/always-ext.ll b/test/CodeGen/Hexagon/always-ext.ll
index 9c8d708..93f4240 100644
--- a/test/CodeGen/Hexagon/always-ext.ll
+++ b/test/CodeGen/Hexagon/always-ext.ll
@@ -1,3 +1,4 @@
+; XFAIL:
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
; Check that we don't generate an invalid packet with too many instructions
@@ -7,7 +8,7 @@
; CHECK: {
; CHECK-NOT: call abort
; CHECK: memw(##0)
-; CHECK: memw(r{{[0-9+]}}<<#2+##4)
+; CHECK: memw(r{{[0-9+]}}<<#2 + ##4)
; CHECK: }
%struct.CuTest.1.28.31.37.40.43.52.55.67.85.111 = type { i8*, void (%struct.CuTest.1.28.31.37.40.43.52.55.67.85.111*)*, i32, i32, i8*, [23 x i32]* }
diff --git a/test/CodeGen/Hexagon/block-addr.ll b/test/CodeGen/Hexagon/block-addr.ll
index 54a12bf..dc0d6e6 100644
--- a/test/CodeGen/Hexagon/block-addr.ll
+++ b/test/CodeGen/Hexagon/block-addr.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
; CHECK: r{{[0-9]+}} = CONST32(#.LJTI{{[0-9]+_[0-9]+}})
-; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+r{{[0-9]+<<#[0-9]+}})
+; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}} + r{{[0-9]+<<#[0-9]+}})
; CHECK: jumpr r{{[0-9]+}}
define void @main() #0 {
diff --git a/test/CodeGen/Hexagon/cext-check.ll b/test/CodeGen/Hexagon/cext-check.ll
index 7c4b19e..b7181d8 100644
--- a/test/CodeGen/Hexagon/cext-check.ll
+++ b/test/CodeGen/Hexagon/cext-check.ll
@@ -2,9 +2,9 @@
; Check that we constant extended instructions only when necessary.
define i32 @cext_test1(i32* %a) nounwind {
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}+##8000)
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}##8000)
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}##300000)
-; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}+##4092)
+; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}##4092)
; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}##300)
entry:
%0 = load i32* %a, align 4
@@ -29,9 +29,9 @@ return:
}
define i32 @cext_test2(i8* %a) nounwind {
-; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}+##1023)
+; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}+{{ *}}##1023)
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}##300000)
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}+##1024)
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}{{ *}}+{{ *}}##1024)
; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}##6000)
entry:
%tobool = icmp ne i8* %a, null
diff --git a/test/CodeGen/Hexagon/cmp-not.ll b/test/CodeGen/Hexagon/cmp-not.ll
deleted file mode 100644
index abcddc38..0000000
--- a/test/CodeGen/Hexagon/cmp-not.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
-; Check that we generate matching compare insn.
-
-; Function Attrs: nounwind
-define i32 @neqi(i32 %argc) #0 {
-entry:
- %p = alloca i8, align 1
- %0 = tail call i1 @llvm.hexagon.C4.cmpneqi(i32 %argc, i32 512)
- %conv = zext i1 %0 to i8
- store volatile i8 %conv, i8* %p, align 1
- %p.0.p.0. = load volatile i8* %p, align 1
- %conv1 = zext i8 %p.0.p.0. to i32
- ret i32 %conv1
-}
-; CHECK: p{{[0-3]}}{{ *}} = !cmp.eq(r{{[0-9]+}}, ##512)
-
-; Function Attrs: nounwind readnone
-declare i1 @llvm.hexagon.C4.cmpneqi(i32, i32) #1
-
-; Function Attrs: nounwind
-define i32 @ngti(i32 %argc) #0 {
-entry:
- %p = alloca i8, align 1
- %0 = tail call i1 @llvm.hexagon.C4.cmpltei(i32 %argc, i32 4)
- %conv = zext i1 %0 to i8
- store volatile i8 %conv, i8* %p, align 1
- %p.0.p.0. = load volatile i8* %p, align 1
- %conv1 = zext i8 %p.0.p.0. to i32
- ret i32 %conv1
-}
-; CHECK: p{{[0-3]}}{{ *}} = !cmp.gt(r{{[0-9]+}}, #4)
-
-; Function Attrs: nounwind readnone
-declare i1 @llvm.hexagon.C4.cmpltei(i32, i32) #1
-
-; Function Attrs: nounwind
-define i32 @ngtui(i32 %argc) #0 {
-entry:
- %p = alloca i8, align 1
- %0 = tail call i1 @llvm.hexagon.C4.cmplteui(i32 %argc, i32 4)
- %conv = zext i1 %0 to i8
- store volatile i8 %conv, i8* %p, align 1
- %p.0.p.0. = load volatile i8* %p, align 1
- %conv1 = zext i8 %p.0.p.0. to i32
- ret i32 %conv1
-}
-; CHECK: p{{[0-3]}}{{ *}} = !cmp.gtu(r{{[0-9]+}}, #4)
-
-; Function Attrs: nounwind readnone
-declare i1 @llvm.hexagon.C4.cmplteui(i32, i32) #1
diff --git a/test/CodeGen/Hexagon/cmp-to-predreg.ll b/test/CodeGen/Hexagon/cmp-to-predreg.ll
index d430b90..2b65343 100644
--- a/test/CodeGen/Hexagon/cmp-to-predreg.ll
+++ b/test/CodeGen/Hexagon/cmp-to-predreg.ll
@@ -2,7 +2,7 @@
; Check that we generate compare to predicate register.
define i32 @compare1(i32 %a, i32 %b) nounwind {
-; CHECK: p{{[0-3]}}{{ *}}={{ *}}!cmp.eq(r{{[0-9]+}},{{ *}}r{{[0-9]+}})
+; CHECK: p{{[0-3]}}{{ *}}={{ *[!]?}}cmp.eq(r{{[0-9]+}},{{ *}}r{{[0-9]+}})
entry:
%cmp = icmp ne i32 %a, %b
%add = add nsw i32 %a, %b
@@ -12,7 +12,7 @@ entry:
}
define i32 @compare2(i32 %a) nounwind {
-; CHECK: p{{[0-3]}}{{ *}}={{ *}}!cmp.eq(r{{[0-9]+}},{{ *}}#10)
+; CHECK: p{{[0-3]}}{{ *}}={{ *[!]?}}cmp.eq(r{{[0-9]+}},{{ *}}#10)
entry:
%cmp = icmp ne i32 %a, 10
%add = add nsw i32 %a, 10
diff --git a/test/CodeGen/Hexagon/dadd.ll b/test/CodeGen/Hexagon/dadd.ll
index 602978a..a86a90c 100644
--- a/test/CodeGen/Hexagon/dadd.ll
+++ b/test/CodeGen/Hexagon/dadd.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate double precision floating point add in V5.
-; CHECK: r{{[0-9]+}}:{{[0-9]+}} = dfadd(r{{[0-9]+}}:{{[0-9]+}}, r{{[0-9]+}}:{{[0-9]+}})
+; CHECK: call __hexagon_adddf3
define i32 @main() nounwind {
diff --git a/test/CodeGen/Hexagon/dmul.ll b/test/CodeGen/Hexagon/dmul.ll
index d743773..cbe0d7f 100644
--- a/test/CodeGen/Hexagon/dmul.ll
+++ b/test/CodeGen/Hexagon/dmul.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate double precision floating point multiply in V5.
-; CHECK: r{{[0-9]+}}:{{[0-9]+}} = dfmpy(r{{[0-9]+}}:{{[0-9]+}}, r{{[0-9]+}}:{{[0-9]+}})
+; CHECK: call __hexagon_muldf3
define i32 @main() nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/dsub.ll b/test/CodeGen/Hexagon/dsub.ll
index 4f9d39e..f271492 100644
--- a/test/CodeGen/Hexagon/dsub.ll
+++ b/test/CodeGen/Hexagon/dsub.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Check that we generate double precision floating point subtract in V5.
-; CHECK: r{{[0-9]+}}:{{[0-9]+}} = dfsub(r{{[0-9]+}}:{{[0-9]+}}, r{{[0-9]+}}:{{[0-9]+}})
+; CHECK: call __hexagon_subdf3
define i32 @main() nounwind {
entry:
diff --git a/test/CodeGen/Hexagon/dualstore.ll b/test/CodeGen/Hexagon/dualstore.ll
index f7d7e8b..33d9ce9 100644
--- a/test/CodeGen/Hexagon/dualstore.ll
+++ b/test/CodeGen/Hexagon/dualstore.ll
@@ -1,17 +1,12 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 -disable-hexagon-misched < %s | FileCheck %s
+; RUN: llc -march=hexagon -disable-hexagon-misched < %s | FileCheck %s
; Check that we generate dual stores in one packet in V4
-; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}={{ *}}##500000
-; CHECK-NEXT: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}={{ *}}##100000
-; CHECK-NEXT: }
+; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}=
+; CHECK-NEXT: memw(r{{[0-9]+}}{{ *}}+{{ *}}#{{[0-9]+}}){{ *}}=
-@Reg = global i32 0, align 4
-define i32 @main() nounwind {
+define i32 @main(i32 %v, i32* %p1, i32* %p2) nounwind {
entry:
- %number= alloca i32, align 4
- store i32 500000, i32* %number, align 4
- %number1= alloca i32, align 4
- store i32 100000, i32* %number1, align 4
+ store i32 %v, i32* %p1, align 4
+ store i32 %v, i32* %p2, align 4
ret i32 0
}
-
diff --git a/test/CodeGen/Hexagon/hwloop-dbg.ll b/test/CodeGen/Hexagon/hwloop-dbg.ll
index f093dae..3c05884 100644
--- a/test/CodeGen/Hexagon/hwloop-dbg.ll
+++ b/test/CodeGen/Hexagon/hwloop-dbg.ll
@@ -5,9 +5,9 @@ target triple = "hexagon"
define void @foo(i32* nocapture %a, i32* nocapture %b) nounwind {
entry:
- tail call void @llvm.dbg.value(metadata !{i32* %a}, i64 0, metadata !13, metadata !{metadata !"0x102"}), !dbg !17
- tail call void @llvm.dbg.value(metadata !{i32* %b}, i64 0, metadata !14, metadata !{metadata !"0x102"}), !dbg !18
- tail call void @llvm.dbg.value(metadata !30, i64 0, metadata !15, metadata !{metadata !"0x102"}), !dbg !19
+ tail call void @llvm.dbg.value(metadata i32* %a, i64 0, metadata !13, metadata !{!"0x102"}), !dbg !17
+ tail call void @llvm.dbg.value(metadata i32* %b, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !18
+ tail call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !19
br label %for.body, !dbg !19
for.body: ; preds = %for.body, %entry
@@ -18,11 +18,11 @@ for.body: ; preds = %for.body, %entry
%i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%b.addr.01 = phi i32* [ %b, %entry ], [ %incdec.ptr, %for.body ]
%incdec.ptr = getelementptr inbounds i32* %b.addr.01, i32 1, !dbg !21
- tail call void @llvm.dbg.value(metadata !{i32* %incdec.ptr}, i64 0, metadata !14, metadata !{metadata !"0x102"}), !dbg !21
+ tail call void @llvm.dbg.value(metadata i32* %incdec.ptr, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !21
%0 = load i32* %b.addr.01, align 4, !dbg !21
store i32 %0, i32* %arrayidx.phi, align 4, !dbg !21
%inc = add nsw i32 %i.02, 1, !dbg !26
- tail call void @llvm.dbg.value(metadata !{i32 %inc}, i64 0, metadata !15, metadata !{metadata !"0x102"}), !dbg !26
+ tail call void @llvm.dbg.value(metadata i32 %inc, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !26
%exitcond = icmp eq i32 %inc, 10, !dbg !19
%arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1
br i1 %exitcond, label %for.end, label %for.body, !dbg !19
@@ -37,28 +37,28 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!29}
-!0 = metadata !{metadata !"0x11\0012\00QuIC LLVM Hexagon Clang version 6.1-pre-unknown, (git://git-hexagon-aus.quicinc.com/llvm/clang-mainline.git e9382867661454cdf44addb39430741578e9765c) (llvm/llvm-mainline.git 36412bb1fcf03ed426d4437b41198bae066675ac)\001\00\000\00\001", metadata !28, metadata !2, metadata !2, metadata !3, metadata !2, null} ; [ DW_TAG_compile_unit ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c] [DW_LANG_C99]
-!2 = metadata !{}
-!3 = metadata !{metadata !5}
-!5 = metadata !{metadata !"0x2e\00foo\00foo\00\001\000\001\000\006\00256\001\001", metadata !28, null, metadata !7, null, void (i32*, i32*)* @foo, null, null, metadata !11} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
-!6 = metadata !{metadata !"0x29", metadata !28} ; [ DW_TAG_file_type ]
-!7 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !8, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!8 = metadata !{null, metadata !9, metadata !9}
-!9 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", null, null, metadata !10} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from int]
-!10 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!11 = metadata !{metadata !13, metadata !14, metadata !15}
-!13 = metadata !{metadata !"0x101\00a\0016777217\000", metadata !5, metadata !6, metadata !9} ; [ DW_TAG_arg_variable ] [a] [line 1]
-!14 = metadata !{metadata !"0x101\00b\0033554433\000", metadata !5, metadata !6, metadata !9} ; [ DW_TAG_arg_variable ] [b] [line 1]
-!15 = metadata !{metadata !"0x100\00i\002\000", metadata !16, metadata !6, metadata !10} ; [ DW_TAG_auto_variable ] [i] [line 2]
-!16 = metadata !{metadata !"0xb\001\0026\000", metadata !28, metadata !5} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
-!17 = metadata !{i32 1, i32 15, metadata !5, null}
-!18 = metadata !{i32 1, i32 23, metadata !5, null}
-!19 = metadata !{i32 3, i32 8, metadata !20, null}
-!20 = metadata !{metadata !"0xb\003\003\001", metadata !28, metadata !16} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
-!21 = metadata !{i32 4, i32 5, metadata !22, null}
-!22 = metadata !{metadata !"0xb\003\0028\002", metadata !28, metadata !20} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
-!26 = metadata !{i32 3, i32 23, metadata !20, null}
-!27 = metadata !{i32 6, i32 1, metadata !16, null}
-!28 = metadata !{metadata !"hwloop-dbg.c", metadata !"/usr2/kparzysz/s.hex/t"}
-!29 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
-!30 = metadata !{i32 0}
+!0 = !{!"0x11\0012\00QuIC LLVM Hexagon Clang version 6.1-pre-unknown, (git://git-hexagon-aus.quicinc.com/llvm/clang-mainline.git e9382867661454cdf44addb39430741578e9765c) (llvm/llvm-mainline.git 36412bb1fcf03ed426d4437b41198bae066675ac)\001\00\000\00\001", !28, !2, !2, !3, !2, null} ; [ DW_TAG_compile_unit ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c] [DW_LANG_C99]
+!2 = !{}
+!3 = !{!5}
+!5 = !{!"0x2e\00foo\00foo\00\001\000\001\000\006\00256\001\001", !28, null, !7, null, void (i32*, i32*)* @foo, null, null, !11} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!6 = !{!"0x29", !28} ; [ DW_TAG_file_type ]
+!7 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !8, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = !{null, !9, !9}
+!9 = !{!"0xf\00\000\0032\0032\000\000", null, null, !10} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from int]
+!10 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!11 = !{!13, !14, !15}
+!13 = !{!"0x101\00a\0016777217\000", !5, !6, !9} ; [ DW_TAG_arg_variable ] [a] [line 1]
+!14 = !{!"0x101\00b\0033554433\000", !5, !6, !9} ; [ DW_TAG_arg_variable ] [b] [line 1]
+!15 = !{!"0x100\00i\002\000", !16, !6, !10} ; [ DW_TAG_auto_variable ] [i] [line 2]
+!16 = !{!"0xb\001\0026\000", !28, !5} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
+!17 = !MDLocation(line: 1, column: 15, scope: !5)
+!18 = !MDLocation(line: 1, column: 23, scope: !5)
+!19 = !MDLocation(line: 3, column: 8, scope: !20)
+!20 = !{!"0xb\003\003\001", !28, !16} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
+!21 = !MDLocation(line: 4, column: 5, scope: !22)
+!22 = !{!"0xb\003\0028\002", !28, !20} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
+!26 = !MDLocation(line: 3, column: 23, scope: !20)
+!27 = !MDLocation(line: 6, column: 1, scope: !16)
+!28 = !{!"hwloop-dbg.c", !"/usr2/kparzysz/s.hex/t"}
+!29 = !{i32 1, !"Debug Info Version", i32 2}
+!30 = !{i32 0}
diff --git a/test/CodeGen/Hexagon/idxload-with-zero-offset.ll b/test/CodeGen/Hexagon/idxload-with-zero-offset.ll
index ca6df88..fbf1a3a 100644
--- a/test/CodeGen/Hexagon/idxload-with-zero-offset.ll
+++ b/test/CodeGen/Hexagon/idxload-with-zero-offset.ll
@@ -1,12 +1,12 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
-; Check that we generate load instruction with (base + register offset << 0)
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; Check that we generate load instruction with (base + register offset << x)
; load word
-define i32 @load_w(i32* nocapture %a, i32 %n) nounwind {
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+define i32 @load_w(i32* nocapture %a, i32 %n, i32 %m) nounwind {
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#2)
entry:
- %tmp = shl i32 %n, 4
+ %tmp = add i32 %n, %m
%scevgep9 = getelementptr i32* %a, i32 %tmp
%val = load i32* %scevgep9, align 4
ret i32 %val
@@ -14,10 +14,10 @@ entry:
; load unsigned half word
-define i16 @load_uh(i16* nocapture %a, i32 %n) nounwind {
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memuh(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+define i16 @load_uh(i16* nocapture %a, i32 %n, i32 %m) nounwind {
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memuh(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#1)
entry:
- %tmp = shl i32 %n, 4
+ %tmp = add i32 %n, %m
%scevgep9 = getelementptr i16* %a, i32 %tmp
%val = load i16* %scevgep9, align 2
ret i16 %val
@@ -25,10 +25,10 @@ entry:
; load signed half word
-define i32 @load_h(i16* nocapture %a, i32 %n) nounwind {
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memh(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+define i32 @load_h(i16* nocapture %a, i32 %n, i32 %m) nounwind {
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memh(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#1)
entry:
- %tmp = shl i32 %n, 4
+ %tmp = add i32 %n, %m
%scevgep9 = getelementptr i16* %a, i32 %tmp
%val = load i16* %scevgep9, align 2
%conv = sext i16 %val to i32
@@ -37,10 +37,10 @@ entry:
; load unsigned byte
-define i8 @load_ub(i8* nocapture %a, i32 %n) nounwind {
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+define i8 @load_ub(i8* nocapture %a, i32 %n, i32 %m) nounwind {
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#0)
entry:
- %tmp = shl i32 %n, 4
+ %tmp = add i32 %n, %m
%scevgep9 = getelementptr i8* %a, i32 %tmp
%val = load i8* %scevgep9, align 1
ret i8 %val
@@ -48,10 +48,10 @@ entry:
; load signed byte
-define i32 @foo_2(i8* nocapture %a, i32 %n) nounwind {
-; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memb(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+define i32 @foo_2(i8* nocapture %a, i32 %n, i32 %m) nounwind {
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memb(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#0)
entry:
- %tmp = shl i32 %n, 4
+ %tmp = add i32 %n, %m
%scevgep9 = getelementptr i8* %a, i32 %tmp
%val = load i8* %scevgep9, align 1
%conv = sext i8 %val to i32
@@ -60,10 +60,10 @@ entry:
; load doubleword
-define i64 @load_d(i64* nocapture %a, i32 %n) nounwind {
-; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}memd(r{{[0-9]+}}+r{{[0-9]+}}<<#0)
+define i64 @load_d(i64* nocapture %a, i32 %n, i32 %m) nounwind {
+; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}memd(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#3)
entry:
- %tmp = shl i32 %n, 4
+ %tmp = add i32 %n, %m
%scevgep9 = getelementptr i64* %a, i32 %tmp
%val = load i64* %scevgep9, align 8
ret i64 %val
diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll
new file mode 100644
index 0000000..37f9f40
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll
@@ -0,0 +1,202 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.1.1 ALU32/ALU
+
+; Add
+declare i32 @llvm.hexagon.A2.addi(i32, i32)
+define i32 @A2_addi(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.addi(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, #0)
+
+declare i32 @llvm.hexagon.A2.add(i32, i32)
+define i32 @A2_add(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.add(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, r1)
+
+declare i32 @llvm.hexagon.A2.addsat(i32, i32)
+define i32 @A2_addsat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addsat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, r1):sat
+
+; Logical operations
+declare i32 @llvm.hexagon.A2.and(i32, i32)
+define i32 @A2_and(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.and(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = and(r0, r1)
+
+declare i32 @llvm.hexagon.A2.or(i32, i32)
+define i32 @A2_or(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.or(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = or(r0, r1)
+
+declare i32 @llvm.hexagon.A2.xor(i32, i32)
+define i32 @A2_xor(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.xor(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = xor(r0, r1)
+
+declare i32 @llvm.hexagon.A4.andn(i32, i32)
+define i32 @A4_andn(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.andn(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = and(r0, ~r1)
+
+declare i32 @llvm.hexagon.A4.orn(i32, i32)
+define i32 @A4_orn(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.orn(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = or(r0, ~r1)
+
+; Nop
+declare void @llvm.hexagon.A2.nop()
+define void @A2_nop(i32 %a, i32 %b) {
+ call void @llvm.hexagon.A2.nop()
+ ret void
+}
+; CHECK: nop
+
+; Subtract
+declare i32 @llvm.hexagon.A2.sub(i32, i32)
+define i32 @A2_sub(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.sub(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0, r1)
+
+declare i32 @llvm.hexagon.A2.subsat(i32, i32)
+define i32 @A2_subsat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subsat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0, r1):sat
+
+; Sign extend
+declare i32 @llvm.hexagon.A2.sxtb(i32)
+define i32 @A2_sxtb(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.sxtb(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = sxtb(r0)
+
+declare i32 @llvm.hexagon.A2.sxth(i32)
+define i32 @A2_sxth(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.sxth(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = sxth(r0)
+
+; Transfer immediate
+declare i32 @llvm.hexagon.A2.tfril(i32, i32)
+define i32 @A2_tfril(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.tfril(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0.l = #0
+
+declare i32 @llvm.hexagon.A2.tfrih(i32, i32)
+define i32 @A2_tfrih(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.tfrih(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0.h = #0
+
+declare i32 @llvm.hexagon.A2.tfrsi(i32)
+define i32 @A2_tfrsi() {
+ %z = call i32 @llvm.hexagon.A2.tfrsi(i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = #0
+
+; Transfer register
+declare i32 @llvm.hexagon.A2.tfr(i32)
+define i32 @A2_tfr(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.tfr(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = r0
+
+; Vector add halfwords
+declare i32 @llvm.hexagon.A2.svaddh(i32, i32)
+define i32 @A2_svaddh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svaddh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vaddh(r0, r1)
+
+declare i32 @llvm.hexagon.A2.svaddhs(i32, i32)
+define i32 @A2_svaddhs(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svaddhs(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vaddh(r0, r1):sat
+
+declare i32 @llvm.hexagon.A2.svadduhs(i32, i32)
+define i32 @A2_svadduhs(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svadduhs(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vadduh(r0, r1):sat
+
+; Vector average halfwords
+declare i32 @llvm.hexagon.A2.svavgh(i32, i32)
+define i32 @A2_svavgh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svavgh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vavgh(r0, r1)
+
+declare i32 @llvm.hexagon.A2.svavghs(i32, i32)
+define i32 @A2_svavghs(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svavghs(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vavgh(r0, r1):rnd
+
+declare i32 @llvm.hexagon.A2.svnavgh(i32, i32)
+define i32 @A2_svnavgh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svnavgh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vnavgh(r0, r1)
+
+; Vector subtract halfwords
+declare i32 @llvm.hexagon.A2.svsubh(i32, i32)
+define i32 @A2_svsubh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svsubh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vsubh(r0, r1)
+
+declare i32 @llvm.hexagon.A2.svsubhs(i32, i32)
+define i32 @A2_svsubhs(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svsubhs(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vsubh(r0, r1):sat
+
+declare i32 @llvm.hexagon.A2.svsubuhs(i32, i32)
+define i32 @A2_svsubuhs(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.svsubuhs(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vsubuh(r0, r1):sat
+
+; Zero extend
+declare i32 @llvm.hexagon.A2.zxth(i32)
+define i32 @A2_zxth(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.zxth(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = zxth(r0)
diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll
new file mode 100644
index 0000000..a9cc01c
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll
@@ -0,0 +1,104 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.1.2 ALU32/PERM
+
+; Combine words into doubleword
+declare i64 @llvm.hexagon.A4.combineri(i32, i32)
+define i64 @A4_combineri(i32 %a) {
+ %z = call i64 @llvm.hexagon.A4.combineri(i32 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: = combine(r0, #0)
+
+declare i64 @llvm.hexagon.A4.combineir(i32, i32)
+define i64 @A4_combineir(i32 %a) {
+ %z = call i64 @llvm.hexagon.A4.combineir(i32 0, i32 %a)
+ ret i64 %z
+}
+; CHECK: = combine(#0, r0)
+
+declare i64 @llvm.hexagon.A2.combineii(i32, i32)
+define i64 @A2_combineii() {
+ %z = call i64 @llvm.hexagon.A2.combineii(i32 0, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = combine(#0, #0)
+
+declare i32 @llvm.hexagon.A2.combine.hh(i32, i32)
+define i32 @A2_combine_hh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.combine.hh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = combine(r0.h, r1.h)
+
+declare i32 @llvm.hexagon.A2.combine.hl(i32, i32)
+define i32 @A2_combine_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.combine.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = combine(r0.h, r1.l)
+
+declare i32 @llvm.hexagon.A2.combine.lh(i32, i32)
+define i32 @A2_combine_lh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.combine.lh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = combine(r0.l, r1.h)
+
+declare i32 @llvm.hexagon.A2.combine.ll(i32, i32)
+define i32 @A2_combine_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.combine.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = combine(r0.l, r1.l)
+
+declare i64 @llvm.hexagon.A2.combinew(i32, i32)
+define i64 @A2_combinew(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.A2.combinew(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = combine(r0, r1)
+
+; Mux
+declare i32 @llvm.hexagon.C2.muxri(i32, i32, i32)
+define i32 @C2_muxri(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C2.muxri(i32 %a, i32 0, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mux(p0, #0, r1)
+
+declare i32 @llvm.hexagon.C2.muxir(i32, i32, i32)
+define i32 @C2_muxir(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C2.muxir(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = mux(p0, r1, #0)
+
+declare i32 @llvm.hexagon.C2.mux(i32, i32, i32)
+define i32 @C2_mux(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.C2.mux(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 = mux(p0, r1, r2)
+
+; Shift word by 16
+declare i32 @llvm.hexagon.A2.aslh(i32)
+define i32 @A2_aslh(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.aslh(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = aslh(r0)
+
+declare i32 @llvm.hexagon.A2.asrh(i32)
+define i32 @A2_asrh(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.asrh(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = asrh(r0)
+
+; Pack high and low halfwords
+declare i64 @llvm.hexagon.S2.packhl(i32, i32)
+define i64 @S2_packhl(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.packhl(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = packhl(r0, r1)
diff --git a/test/CodeGen/Hexagon/intrinsics/cr.ll b/test/CodeGen/Hexagon/intrinsics/cr.ll
new file mode 100644
index 0000000..9bdcb25
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/cr.ll
@@ -0,0 +1,132 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.2 CR
+
+; Corner detection acceleration
+declare i32 @llvm.hexagon.C4.fastcorner9(i32, i32)
+define i32 @C4_fastcorner9(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C4.fastcorner9(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = fastcorner9(p0, p1)
+
+declare i32 @llvm.hexagon.C4.fastcorner9.not(i32, i32)
+define i32 @C4_fastcorner9_not(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C4.fastcorner9.not(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = !fastcorner9(p0, p1)
+
+; Logical reductions on predicates
+declare i32 @llvm.hexagon.C2.any8(i32)
+define i32 @C2_any8(i32 %a) {
+ %z = call i32@llvm.hexagon.C2.any8(i32 %a)
+ ret i32 %z
+}
+; CHECK: p0 = any8(p0)
+
+declare i32 @llvm.hexagon.C2.all8(i32)
+define i32 @C2_all8(i32 %a) {
+ %z = call i32@llvm.hexagon.C2.all8(i32 %a)
+ ret i32 %z
+}
+
+; CHECK: p0 = all8(p0)
+
+; Logical operations on predicates
+declare i32 @llvm.hexagon.C2.and(i32, i32)
+define i32 @C2_and(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C2.and(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = and(p0, p1)
+
+declare i32 @llvm.hexagon.C4.and.and(i32, i32, i32)
+define i32 @C4_and_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.and.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = and(p0, and(p1, p2))
+
+declare i32 @llvm.hexagon.C2.or(i32, i32)
+define i32 @C2_or(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C2.or(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = or(p0, p1)
+
+declare i32 @llvm.hexagon.C4.and.or(i32, i32, i32)
+define i32 @C4_and_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.and.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = and(p0, or(p1, p2))
+
+declare i32 @llvm.hexagon.C2.xor(i32, i32)
+define i32 @C2_xor(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C2.xor(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = xor(p0, p1)
+
+declare i32 @llvm.hexagon.C4.or.and(i32, i32, i32)
+define i32 @C4_or_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.or.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = or(p0, and(p1, p2))
+
+declare i32 @llvm.hexagon.C2.andn(i32, i32)
+define i32 @C2_andn(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C2.andn(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = and(p0, !p1)
+
+declare i32 @llvm.hexagon.C4.or.or(i32, i32, i32)
+define i32 @C4_or_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.or.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = or(p0, or(p1, p2))
+
+declare i32 @llvm.hexagon.C4.and.andn(i32, i32, i32)
+define i32 @C4_and_andn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.and.andn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = and(p0, and(p1, !p2))
+
+declare i32 @llvm.hexagon.C4.and.orn(i32, i32, i32)
+define i32 @C4_and_orn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.and.orn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = and(p0, or(p1, !p2))
+
+declare i32 @llvm.hexagon.C2.not(i32)
+define i32 @C2_not(i32 %a) {
+ %z = call i32@llvm.hexagon.C2.not(i32 %a)
+ ret i32 %z
+}
+; CHECK: p0 = not(p0)
+
+declare i32 @llvm.hexagon.C4.or.andn(i32, i32, i32)
+define i32 @C4_or_andn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.or.andn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = or(p0, and(p1, !p2))
+
+declare i32 @llvm.hexagon.C2.orn(i32, i32)
+define i32 @C2_orn(i32 %a, i32 %b) {
+ %z = call i32@llvm.hexagon.C2.orn(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = or(p0, !p1)
+
+declare i32 @llvm.hexagon.C4.or.orn(i32, i32, i32)
+define i32 @C4_or_orn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32@llvm.hexagon.C4.or.orn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: p0 = or(p0, or(p1, !p2))
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll
new file mode 100644
index 0000000..4a11112
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll
@@ -0,0 +1,1020 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.1 XTYPE/ALU
+
+; Absolute value doubleword
+declare i64 @llvm.hexagon.A2.absp(i64)
+define i64 @A2_absp(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.absp(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = abs(r1:0)
+
+; Absolute value word
+declare i32 @llvm.hexagon.A2.abs(i32)
+define i32 @A2_abs(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.abs(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = abs(r0)
+
+declare i32 @llvm.hexagon.A2.abssat(i32)
+define i32 @A2_abssat(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.abssat(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = abs(r0):sat
+
+; Add and accumulate
+declare i32 @llvm.hexagon.S4.addaddi(i32, i32, i32)
+define i32 @S4_addaddi(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S4.addaddi(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, add(r1, #0))
+
+declare i32 @llvm.hexagon.S4.subaddi(i32, i32, i32)
+define i32 @S4_subaddi(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S4.subaddi(i32 %a, i32 0, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, sub(#0, r1))
+
+declare i32 @llvm.hexagon.M2.accii(i32, i32, i32)
+define i32 @M2_accii(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.accii(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 += add(r1, #0)
+
+declare i32 @llvm.hexagon.M2.naccii(i32, i32, i32)
+define i32 @M2_naccii(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.naccii(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 -= add(r1, #0)
+
+declare i32 @llvm.hexagon.M2.acci(i32, i32, i32)
+define i32 @M2_acci(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.acci(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += add(r1, r2)
+
+declare i32 @llvm.hexagon.M2.nacci(i32, i32, i32)
+define i32 @M2_nacci(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.nacci(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= add(r1, r2)
+
+; Add doublewords
+declare i64 @llvm.hexagon.A2.addp(i64, i64)
+define i64 @A2_addp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.addp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = add(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.addpsat(i64, i64)
+define i64 @A2_addpsat(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.addpsat(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = add(r1:0, r3:2):sat
+
+; Add halfword
+declare i32 @llvm.hexagon.A2.addh.l16.ll(i32, i32)
+define i32 @A2_addh_l16_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.l)
+
+declare i32 @llvm.hexagon.A2.addh.l16.hl(i32, i32)
+define i32 @A2_addh_l16_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.h)
+
+declare i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32, i32)
+define i32 @A2_addh_l16_sat.ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.l):sat
+
+declare i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32, i32)
+define i32 @A2_addh_l16_sat.hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.h):sat
+
+declare i32 @llvm.hexagon.A2.addh.h16.ll(i32, i32)
+define i32 @A2_addh_h16_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.l):<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.lh(i32, i32)
+define i32 @A2_addh_h16_lh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.h):<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.hl(i32, i32)
+define i32 @A2_addh_h16_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.h, r1.l):<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.hh(i32, i32)
+define i32 @A2_addh_h16_hh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.h, r1.h):<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32, i32)
+define i32 @A2_addh_h16_sat_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.l):sat:<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32, i32)
+define i32 @A2_addh_h16_sat_lh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.l, r1.h):sat:<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32, i32)
+define i32 @A2_addh_h16_sat_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.h, r1.l):sat:<<16
+
+declare i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32, i32)
+define i32 @A2_addh_h16_sat_hh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0.h, r1.h):sat:<<16
+
+; Logical doublewords
+declare i64 @llvm.hexagon.A2.notp(i64)
+define i64 @A2_notp(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.notp(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = not(r1:0)
+
+declare i64 @llvm.hexagon.A2.andp(i64, i64)
+define i64 @A2_andp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.andp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = and(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A4.andnp(i64, i64)
+define i64 @A2_andnp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A4.andnp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = and(r1:0, ~r3:2)
+
+declare i64 @llvm.hexagon.A2.orp(i64, i64)
+define i64 @A2_orp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.orp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = or(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A4.ornp(i64, i64)
+define i64 @A2_ornp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A4.ornp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = or(r1:0, ~r3:2)
+
+declare i64 @llvm.hexagon.A2.xorp(i64, i64)
+define i64 @A2_xorp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.xorp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = xor(r1:0, r3:2)
+
+; Logical-logical doublewords
+declare i64 @llvm.hexagon.M4.xor.xacc(i64, i64, i64)
+define i64 @M4_xor_xacc(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M4.xor.xacc(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 ^= xor(r3:2, r5:4)
+
+; Logical-logical words
+declare i32 @llvm.hexagon.S4.or.andi(i32, i32, i32)
+define i32 @S4_or_andi(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S4.or.andi(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 |= and(r1, #0)
+
+declare i32 @llvm.hexagon.S4.or.andix(i32, i32, i32)
+define i32 @S4_or_andix(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S4.or.andix(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r1 = or(r0, and(r1, #0))
+
+declare i32 @llvm.hexagon.M4.or.andn(i32, i32, i32)
+define i32 @M4_or_andn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.or.andn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= and(r1, ~r2)
+
+declare i32 @llvm.hexagon.M4.and.andn(i32, i32, i32)
+define i32 @M4_and_andn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.and.andn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= and(r1, ~r2)
+
+declare i32 @llvm.hexagon.M4.xor.andn(i32, i32, i32)
+define i32 @M4_xor_andn(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.xor.andn(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 ^= and(r1, ~r2)
+
+declare i32 @llvm.hexagon.M4.and.and(i32, i32, i32)
+define i32 @M4_and_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.and.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= and(r1, r2)
+
+declare i32 @llvm.hexagon.M4.and.or(i32, i32, i32)
+define i32 @M4_and_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.and.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= or(r1, r2)
+
+declare i32 @llvm.hexagon.M4.and.xor(i32, i32, i32)
+define i32 @M4_and_xor(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.and.xor(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= xor(r1, r2)
+
+declare i32 @llvm.hexagon.M4.or.and(i32, i32, i32)
+define i32 @M4_or_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.or.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= and(r1, r2)
+
+declare i32 @llvm.hexagon.M4.or.or(i32, i32, i32)
+define i32 @M4_or_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.or.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= or(r1, r2)
+
+declare i32 @llvm.hexagon.M4.or.xor(i32, i32, i32)
+define i32 @M4_or_xor(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.or.xor(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= xor(r1, r2)
+
+declare i32 @llvm.hexagon.M4.xor.and(i32, i32, i32)
+define i32 @M4_xor_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.xor.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 ^= and(r1, r2)
+
+declare i32 @llvm.hexagon.M4.xor.or(i32, i32, i32)
+define i32 @M4_xor_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.xor.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 ^= or(r1, r2)
+
+; Maximum words
+declare i32 @llvm.hexagon.A2.max(i32, i32)
+define i32 @A2_max(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.max(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = max(r0, r1)
+
+declare i32 @llvm.hexagon.A2.maxu(i32, i32)
+define i32 @A2_maxu(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.maxu(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = maxu(r0, r1)
+
+; Maximum doublewords
+declare i64 @llvm.hexagon.A2.maxp(i64, i64)
+define i64 @A2_maxp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.maxp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = max(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.maxup(i64, i64)
+define i64 @A2_maxup(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.maxup(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = maxu(r1:0, r3:2)
+
+; Minimum words
+declare i32 @llvm.hexagon.A2.min(i32, i32)
+define i32 @A2_min(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.min(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = min(r0, r1)
+
+declare i32 @llvm.hexagon.A2.minu(i32, i32)
+define i32 @A2_minu(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.minu(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = minu(r0, r1)
+
+; Minimum doublewords
+declare i64 @llvm.hexagon.A2.minp(i64, i64)
+define i64 @A2_minp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.minp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = min(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.minup(i64, i64)
+define i64 @A2_minup(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.minup(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = minu(r1:0, r3:2)
+
+; Module wrap
+declare i32 @llvm.hexagon.A4.modwrapu(i32, i32)
+define i32 @A4_modwrapu(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.modwrapu(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = modwrap(r0, r1)
+
+; Negate
+declare i64 @llvm.hexagon.A2.negp(i64)
+define i64 @A2_negp(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.negp(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = neg(r1:0)
+
+declare i32 @llvm.hexagon.A2.negsat(i32)
+define i32 @A2_negsat(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.negsat(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = neg(r0):sat
+
+; Round
+declare i32 @llvm.hexagon.A2.roundsat(i64)
+define i32 @A2_roundsat(i64 %a) {
+ %z = call i32 @llvm.hexagon.A2.roundsat(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = round(r1:0):sat
+
+declare i32 @llvm.hexagon.A4.cround.ri(i32, i32)
+define i32 @A4_cround_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cround.ri(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = cround(r0, #0)
+
+declare i32 @llvm.hexagon.A4.round.ri(i32, i32)
+define i32 @A4_round_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.round.ri(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = round(r0, #0)
+
+declare i32 @llvm.hexagon.A4.round.ri.sat(i32, i32)
+define i32 @A4_round_ri_sat(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.round.ri.sat(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = round(r0, #0):sat
+
+declare i32 @llvm.hexagon.A4.cround.rr(i32, i32)
+define i32 @A4_cround_rr(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cround.rr(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cround(r0, r1)
+
+declare i32 @llvm.hexagon.A4.round.rr(i32, i32)
+define i32 @A4_round_rr(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.round.rr(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = round(r0, r1)
+
+declare i32 @llvm.hexagon.A4.round.rr.sat(i32, i32)
+define i32 @A4_round_rr_sat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.round.rr.sat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = round(r0, r1):sat
+
+; Subtract doublewords
+declare i64 @llvm.hexagon.A2.subp(i64, i64)
+define i64 @A2_subp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.subp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = sub(r1:0, r3:2)
+
+; Subtract and accumulate
+declare i32 @llvm.hexagon.M2.subacc(i32, i32, i32)
+define i32 @M2_subacc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.subacc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += sub(r1, r2)
+
+; Subtract halfwords
+declare i32 @llvm.hexagon.A2.subh.l16.ll(i32, i32)
+define i32 @A2_subh_l16_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.l)
+
+declare i32 @llvm.hexagon.A2.subh.l16.hl(i32, i32)
+define i32 @A2_subh_l16_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.h)
+
+declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32)
+define i32 @A2_subh_l16_sat.ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.l):sat
+
+declare i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32, i32)
+define i32 @A2_subh_l16_sat.hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.h):sat
+
+declare i32 @llvm.hexagon.A2.subh.h16.ll(i32, i32)
+define i32 @A2_subh_h16_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.l):<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.lh(i32, i32)
+define i32 @A2_subh_h16_lh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.h):<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.hl(i32, i32)
+define i32 @A2_subh_h16_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.h, r1.l):<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.hh(i32, i32)
+define i32 @A2_subh_h16_hh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.h, r1.h):<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32, i32)
+define i32 @A2_subh_h16_sat_ll(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.l):sat:<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32, i32)
+define i32 @A2_subh_h16_sat_lh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.l, r1.h):sat:<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32, i32)
+define i32 @A2_subh_h16_sat_hl(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.h, r1.l):sat:<<16
+
+declare i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32, i32)
+define i32 @A2_subh_h16_sat_hh(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = sub(r0.h, r1.h):sat:<<16
+
+; Sign extend word to doubleword
+declare i64 @llvm.hexagon.A2.sxtw(i32)
+define i64 @A2_sxtw(i32 %a) {
+ %z = call i64 @llvm.hexagon.A2.sxtw(i32 %a)
+ ret i64 %z
+}
+; CHECK: = sxtw(r0)
+
+; Vector absolute value halfwords
+declare i64 @llvm.hexagon.A2.vabsh(i64)
+define i64 @A2_vabsh(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.vabsh(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vabsh(r1:0)
+
+declare i64 @llvm.hexagon.A2.vabshsat(i64)
+define i64 @A2_vabshsat(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.vabshsat(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vabsh(r1:0):sat
+
+; Vector absolute value words
+declare i64 @llvm.hexagon.A2.vabsw(i64)
+define i64 @A2_vabsw(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.vabsw(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vabsw(r1:0)
+
+declare i64 @llvm.hexagon.A2.vabswsat(i64)
+define i64 @A2_vabswsat(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.vabswsat(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vabsw(r1:0):sat
+
+; Vector absolute difference halfwords
+declare i64 @llvm.hexagon.M2.vabsdiffh(i64, i64)
+define i64 @M2_vabsdiffh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vabsdiffh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vabsdiffh(r1:0, r3:2)
+
+; Vector absolute difference words
+declare i64 @llvm.hexagon.M2.vabsdiffw(i64, i64)
+define i64 @M2_vabsdiffw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vabsdiffw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vabsdiffw(r1:0, r3:2)
+
+; Vector add halfwords
+declare i64 @llvm.hexagon.A2.vaddh(i64, i64)
+define i64 @A2_vaddh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vaddh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaddh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vaddhs(i64, i64)
+define i64 @A2_vaddhs(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vaddhs(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaddh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.A2.vadduhs(i64, i64)
+define i64 @A2_vadduhs(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vadduhs(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vadduh(r1:0, r3:2):sat
+
+; Vector add halfwords with saturate and pack to unsigned bytes
+declare i32 @llvm.hexagon.A5.vaddhubs(i64, i64)
+define i32 @A5_vaddhubs(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A5.vaddhubs(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vaddhub(r1:0, r3:2):sat
+
+; Vector reduce add unsigned bytes
+declare i64 @llvm.hexagon.A2.vraddub(i64, i64)
+define i64 @A2_vraddub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vraddub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vraddub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vraddub.acc(i64, i64, i64)
+define i64 @A2_vraddub_acc(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.A2.vraddub.acc(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vraddub(r3:2, r5:4)
+
+; Vector reduce add halfwords
+declare i32 @llvm.hexagon.M2.vradduh(i64, i64)
+define i32 @M2_vradduh(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.M2.vradduh(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vradduh(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.M2.vraddh(i64, i64)
+define i32 @M2_vraddh(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.M2.vraddh(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vraddh(r1:0, r3:2)
+
+; Vector add bytes
+declare i64 @llvm.hexagon.A2.vaddub(i64, i64)
+define i64 @A2_vaddub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vaddub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaddub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vaddubs(i64, i64)
+define i64 @A2_vaddubs(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vaddubs(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaddub(r1:0, r3:2):sat
+
+; Vector add words
+declare i64 @llvm.hexagon.A2.vaddw(i64, i64)
+define i64 @A2_vaddw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vaddw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaddw(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vaddws(i64, i64)
+define i64 @A2_vaddws(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vaddws(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaddw(r1:0, r3:2):sat
+
+; Vector average halfwords
+declare i64 @llvm.hexagon.A2.vavgh(i64, i64)
+define i64 @A2_vavgh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavgh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vavghr(i64, i64)
+define i64 @A2_vavghr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavghr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgh(r1:0, r3:2):rnd
+
+declare i64 @llvm.hexagon.A2.vavghcr(i64, i64)
+define i64 @A2_vavghcr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavghcr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgh(r1:0, r3:2):crnd
+
+declare i64 @llvm.hexagon.A2.vavguh(i64, i64)
+define i64 @A2_vavguh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavguh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavguh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vavguhr(i64, i64)
+define i64 @A2_vavguhr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavguhr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavguh(r1:0, r3:2):rnd
+
+declare i64 @llvm.hexagon.A2.vnavgh(i64, i64)
+define i64 @A2_vnavgh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vnavgh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vnavgh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vnavghr(i64, i64)
+define i64 @A2_vnavghr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vnavghr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vnavgh(r1:0, r3:2):rnd
+
+declare i64 @llvm.hexagon.A2.vnavghcr(i64, i64)
+define i64 @A2_vnavghcr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vnavghcr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vnavgh(r1:0, r3:2):crnd
+
+; Vector average unsigned bytes
+declare i64 @llvm.hexagon.A2.vavgub(i64, i64)
+define i64 @A2_vavgub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavgub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vavgubr(i64, i64)
+define i64 @A2_vavgubr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavgubr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgub(r1:0, r3:2):rnd
+
+; Vector average words
+declare i64 @llvm.hexagon.A2.vavgw(i64, i64)
+define i64 @A2_vavgw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavgw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgw(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vavgwr(i64, i64)
+define i64 @A2_vavgwr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavgwr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgw(r1:0, r3:2):rnd
+
+declare i64 @llvm.hexagon.A2.vavgwcr(i64, i64)
+define i64 @A2_vavgwcr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavgwcr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavgw(r1:0, r3:2):crnd
+
+declare i64 @llvm.hexagon.A2.vavguw(i64, i64)
+define i64 @A2_vavguw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavguw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavguw(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vavguwr(i64, i64)
+define i64 @A2_vavguwr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vavguwr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vavguw(r1:0, r3:2):rnd
+
+declare i64 @llvm.hexagon.A2.vnavgw(i64, i64)
+define i64 @A2_vnavgw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vnavgw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vnavgw(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vnavgwr(i64, i64)
+define i64 @A2_vnavgwr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vnavgwr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vnavgw(r1:0, r3:2):rnd
+
+declare i64 @llvm.hexagon.A2.vnavgwcr(i64, i64)
+define i64 @A2_vnavgwcr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vnavgwcr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vnavgw(r1:0, r3:2):crnd
+
+; Vector conditional negate
+declare i64 @llvm.hexagon.S2.vcnegh(i64, i32)
+define i64 @S2_vcnegh(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.vcnegh(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vcnegh(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.vrcnegh(i64, i64, i32)
+define i64 @S2_vrcnegh(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.vrcnegh(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrcnegh(r3:2, r4)
+
+; Vector maximum bytes
+declare i64 @llvm.hexagon.A2.vmaxub(i64, i64)
+define i64 @A2_vmaxub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vmaxub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmaxub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vmaxb(i64, i64)
+define i64 @A2_vmaxb(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vmaxb(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmaxb(r1:0, r3:2)
+
+; Vector maximum halfwords
+declare i64 @llvm.hexagon.A2.vmaxh(i64, i64)
+define i64 @A2_vmaxh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vmaxh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmaxh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vmaxuh(i64, i64)
+define i64 @A2_vmaxuh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vmaxuh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmaxuh(r1:0, r3:2)
+
+; Vector reduce maximum halfwords
+declare i64 @llvm.hexagon.A4.vrmaxh(i64, i64, i32)
+define i64 @A4_vrmaxh(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrmaxh(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmaxh(r3:2, r4)
+
+declare i64 @llvm.hexagon.A4.vrmaxuh(i64, i64, i32)
+define i64 @A4_vrmaxuh(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrmaxuh(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmaxuh(r3:2, r4)
+
+; Vector reduce maximum words
+declare i64 @llvm.hexagon.A4.vrmaxw(i64, i64, i32)
+define i64 @A4_vrmaxw(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrmaxw(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmaxw(r3:2, r4)
+
+declare i64 @llvm.hexagon.A4.vrmaxuw(i64, i64, i32)
+define i64 @A4_vrmaxuw(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrmaxuw(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmaxuw(r3:2, r4)
+
+; Vector minimum bytes
+declare i64 @llvm.hexagon.A2.vminub(i64, i64)
+define i64 @A2_vminub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vminub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vminub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vminb(i64, i64)
+define i64 @A2_vminb(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vminb(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vminb(r1:0, r3:2)
+
+; Vector minimum halfwords
+declare i64 @llvm.hexagon.A2.vminh(i64, i64)
+define i64 @A2_vminh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vminh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vminh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vminuh(i64, i64)
+define i64 @A2_vminuh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vminuh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vminuh(r1:0, r3:2)
+
+; Vector reduce minimum halfwords
+declare i64 @llvm.hexagon.A4.vrminh(i64, i64, i32)
+define i64 @A4_vrminh(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrminh(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrminh(r3:2, r4)
+
+declare i64 @llvm.hexagon.A4.vrminuh(i64, i64, i32)
+define i64 @A4_vrminuh(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrminuh(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrminuh(r3:2, r4)
+
+; Vector reduce minimum words
+declare i64 @llvm.hexagon.A4.vrminw(i64, i64, i32)
+define i64 @A4_vrminw(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrminw(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrminw(r3:2, r4)
+
+declare i64 @llvm.hexagon.A4.vrminuw(i64, i64, i32)
+define i64 @A4_vrminuw(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.A4.vrminuw(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrminuw(r3:2, r4)
+
+; Vector sum of absolute differences unsigned bytes
+declare i64 @llvm.hexagon.A2.vrsadub(i64, i64)
+define i64 @A2_vrsadub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vrsadub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrsadub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vrsadub.acc(i64, i64, i64)
+define i64 @A2_vrsadub_acc(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.A2.vrsadub.acc(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrsadub(r3:2, r5:4)
+
+; Vector subtract halfwords
+declare i64 @llvm.hexagon.A2.vsubh(i64, i64)
+define i64 @A2_vsubh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsubh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vsubhs(i64, i64)
+define i64 @A2_vsubhs(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsubhs(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.A2.vsubuhs(i64, i64)
+define i64 @A2_vsubuhs(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsubuhs(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubuh(r1:0, r3:2):sat
+
+; Vector subtract bytes
+declare i64 @llvm.hexagon.A2.vsubub(i64, i64)
+define i64 @A2_vsubub(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsubub(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubub(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vsububs(i64, i64)
+define i64 @A2_vsububs(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsububs(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubub(r1:0, r3:2):sat
+
+; Vector subtract words
+declare i64 @llvm.hexagon.A2.vsubw(i64, i64)
+define i64 @A2_vsubw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsubw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubw(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.A2.vsubws(i64, i64)
+define i64 @A2_vsubws(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.A2.vsubws(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsubw(r1:0, r3:2):sat
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
new file mode 100644
index 0000000..8531b2f
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
@@ -0,0 +1,329 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.2 XTYPE/BIT
+
+; Count leading
+declare i32 @llvm.hexagon.S2.clbp(i64)
+define i32 @S2_clbp(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.clbp(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = clb(r1:0)
+
+declare i32 @llvm.hexagon.S2.cl0p(i64)
+define i32 @S2_cl0p(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.cl0p(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = cl0(r1:0)
+
+declare i32 @llvm.hexagon.S2.cl1p(i64)
+define i32 @S2_cl1p(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.cl1p(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = cl1(r1:0)
+
+declare i32 @llvm.hexagon.S4.clbpnorm(i64)
+define i32 @S4_clbpnorm(i64 %a) {
+ %z = call i32 @llvm.hexagon.S4.clbpnorm(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = normamt(r1:0)
+
+declare i32 @llvm.hexagon.S4.clbpaddi(i64, i32)
+define i32 @S4_clbpaddi(i64 %a) {
+ %z = call i32 @llvm.hexagon.S4.clbpaddi(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(clb(r1:0), #0)
+
+declare i32 @llvm.hexagon.S4.clbaddi(i32, i32)
+define i32 @S4_clbaddi(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.clbaddi(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(clb(r0), #0)
+
+declare i32 @llvm.hexagon.S2.cl0(i32)
+define i32 @S2_cl0(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.cl0(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = cl0(r0)
+
+declare i32 @llvm.hexagon.S2.cl1(i32)
+define i32 @S2_cl1(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.cl1(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = cl1(r0)
+
+declare i32 @llvm.hexagon.S2.clbnorm(i32)
+define i32 @S4_clbnorm(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.clbnorm(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = normamt(r0)
+
+; Count population
+declare i32 @llvm.hexagon.S5.popcountp(i64)
+define i32 @S5_popcountp(i64 %a) {
+ %z = call i32 @llvm.hexagon.S5.popcountp(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = popcount(r1:0)
+
+; Count trailing
+declare i32 @llvm.hexagon.S2.ct0p(i64)
+define i32 @S2_ct0p(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.ct0p(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = ct0(r1:0)
+
+declare i32 @llvm.hexagon.S2.ct1p(i64)
+define i32 @S2_ct1p(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.ct1p(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = ct1(r1:0)
+
+declare i32 @llvm.hexagon.S2.ct0(i32)
+define i32 @S2_ct0(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.ct0(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = ct0(r0)
+
+declare i32 @llvm.hexagon.S2.ct1(i32)
+define i32 @S2_ct1(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.ct1(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = ct1(r0)
+
+; Extract bitfield
+declare i64 @llvm.hexagon.S2.extractup(i64, i32, i32)
+define i64 @S2_extractup(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.extractup(i64 %a, i32 0, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = extractu(r1:0, #0, #0)
+
+declare i64 @llvm.hexagon.S4.extractp(i64, i32, i32)
+define i64 @S2_extractp(i64 %a) {
+ %z = call i64 @llvm.hexagon.S4.extractp(i64 %a, i32 0, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = extract(r1:0, #0, #0)
+
+declare i32 @llvm.hexagon.S2.extractu(i32, i32, i32)
+define i32 @S2_extractu(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.extractu(i32 %a, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = extractu(r0, #0, #0)
+
+declare i32 @llvm.hexagon.S4.extract(i32, i32, i32)
+define i32 @S2_extract(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.extract(i32 %a, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = extract(r0, #0, #0)
+
+declare i64 @llvm.hexagon.S2.extractup.rp(i64, i64)
+define i64 @S2_extractup_rp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.extractup.rp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = extractu(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.S4.extractp.rp(i64, i64)
+define i64 @S4_extractp_rp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.extractp.rp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = extract(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.S2.extractu.rp(i32, i64)
+define i32 @S2_extractu_rp(i32 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.S2.extractu.rp(i32 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = extractu(r0, r3:2)
+
+declare i32 @llvm.hexagon.S4.extract.rp(i32, i64)
+define i32 @S4_extract_rp(i32 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.S4.extract.rp(i32 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = extract(r0, r3:2)
+
+; Insert bitfield
+declare i64 @llvm.hexagon.S2.insertp(i64, i64, i32, i32)
+define i64 @S2_insertp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.insertp(i64 %a, i64 %b, i32 0, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = insert(r3:2, #0, #0)
+
+declare i32 @llvm.hexagon.S2.insert(i32, i32, i32, i32)
+define i32 @S2_insert(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.insert(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = insert(r1, #0, #0)
+
+declare i32 @llvm.hexagon.S2.insert.rp(i32, i32, i64)
+define i32 @S2_insert_rp(i32 %a, i32 %b, i64 %c) {
+ %z = call i32 @llvm.hexagon.S2.insert.rp(i32 %a, i32 %b, i64 %c)
+ ret i32 %z
+}
+; CHECK: r0 = insert(r1, r3:2)
+
+declare i64 @llvm.hexagon.S2.insertp.rp(i64, i64, i64)
+define i64 @S2_insertp_rp(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.S2.insertp.rp(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = insert(r3:2, r5:4)
+
+; Interleave/deinterleave
+declare i64 @llvm.hexagon.S2.deinterleave(i64)
+define i64 @S2_deinterleave(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.deinterleave(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = deinterleave(r1:0)
+
+declare i64 @llvm.hexagon.S2.interleave(i64)
+define i64 @S2_interleave(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.interleave(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = interleave(r1:0)
+
+; Linear feedback-shift operation
+declare i64 @llvm.hexagon.S2.lfsp(i64, i64)
+define i64 @S2_lfsp(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lfsp(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = lfs(r1:0, r3:2)
+
+; Masked parity
+declare i32 @llvm.hexagon.S2.parityp(i64, i64)
+define i32 @S2_parityp(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.S2.parityp(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: r0 = parity(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.S4.parity(i32, i32)
+define i32 @S4_parity(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S4.parity(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = parity(r0, r1)
+
+; Bit reverse
+declare i64 @llvm.hexagon.S2.brevp(i64)
+define i64 @S2_brevp(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.brevp(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = brev(r1:0)
+
+declare i32 @llvm.hexagon.S2.brev(i32)
+define i32 @S2_brev(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.brev(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = brev(r0)
+
+; Set/clear/toggle bit
+declare i32 @llvm.hexagon.S2.setbit.i(i32, i32)
+define i32 @S2_setbit_i(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.setbit.i(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = setbit(r0, #0)
+
+declare i32 @llvm.hexagon.S2.clrbit.i(i32, i32)
+define i32 @S2_clrbit_i(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.clrbit.i(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = clrbit(r0, #0)
+
+declare i32 @llvm.hexagon.S2.togglebit.i(i32, i32)
+define i32 @S2_togglebit_i(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.togglebit.i(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = togglebit(r0, #0)
+
+declare i32 @llvm.hexagon.S2.setbit.r(i32, i32)
+define i32 @S2_setbit_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.setbit.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = setbit(r0, r1)
+
+declare i32 @llvm.hexagon.S2.clrbit.r(i32, i32)
+define i32 @S2_clrbit_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.clrbit.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = clrbit(r0, r1)
+
+declare i32 @llvm.hexagon.S2.togglebit.r(i32, i32)
+define i32 @S2_togglebit_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.togglebit.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = togglebit(r0, r1)
+
+; Split bitfield
+declare i64 @llvm.hexagon.A4.bitspliti(i32, i32)
+define i64 @A4_bitspliti(i32 %a) {
+ %z = call i64 @llvm.hexagon.A4.bitspliti(i32 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: = bitsplit(r0, #0)
+
+declare i64 @llvm.hexagon.A4.bitsplit(i32, i32)
+define i64 @A4_bitsplit(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.A4.bitsplit(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = bitsplit(r0, r1)
+
+; Table index
+declare i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32, i32, i32, i32)
+define i32 @S2_tableidxb_goodsyntax(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = tableidxb(r1, #0, #0)
+
+declare i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32, i32, i32, i32)
+define i32 @S2_tableidxh_goodsyntax(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = tableidxh(r1, #0, #-1)
+
+declare i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32, i32, i32, i32)
+define i32 @S2_tableidxw_goodsyntax(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = tableidxw(r1, #0, #-2)
+
+declare i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32, i32, i32, i32)
+define i32 @S2_tableidxd_goodsyntax(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = tableidxd(r1, #0, #-3)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
new file mode 100644
index 0000000..57b0c5b
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
@@ -0,0 +1,349 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.3 XTYPE/COMPLEX
+
+; Complex add/sub halfwords
+declare i64 @llvm.hexagon.S4.vxaddsubh(i64, i64)
+define i64 @S4_vxaddsubh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.vxaddsubh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vxaddsubh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.S4.vxsubaddh(i64, i64)
+define i64 @S4_vxsubaddh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.vxsubaddh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vxsubaddh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.S4.vxaddsubhr(i64, i64)
+define i64 @S4_vxaddsubhr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vxaddsubh(r1:0, r3:2):rnd:>>1:sat
+
+declare i64 @llvm.hexagon.S4.vxsubaddhr(i64, i64)
+define i64 @S4_vxsubaddhr(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vxsubaddh(r1:0, r3:2):rnd:>>1:sat
+
+; Complex add/sub words
+declare i64 @llvm.hexagon.S4.vxaddsubw(i64, i64)
+define i64 @S4_vxaddsubw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.vxaddsubw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vxaddsubw(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.S4.vxsubaddw(i64, i64)
+define i64 @S4_vxsubaddw(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S4.vxsubaddw(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vxsubaddw(r1:0, r3:2):sat
+
+; Complex multiply
+declare i64 @llvm.hexagon.M2.cmpys.s0(i32, i32)
+define i64 @M2_cmpys_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.cmpys.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = cmpy(r0, r1):sat
+
+declare i64 @llvm.hexagon.M2.cmpys.s1(i32, i32)
+define i64 @M2_cmpys_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.cmpys.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = cmpy(r0, r1):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cmpysc.s0(i32, i32)
+define i64 @M2_cmpysc_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.cmpysc.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = cmpy(r0, r1*):sat
+
+declare i64 @llvm.hexagon.M2.cmpysc.s1(i32, i32)
+define i64 @M2_cmpysc_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.cmpysc.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = cmpy(r0, r1*):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cmacs.s0(i64, i32, i32)
+define i64 @M2_cmacs_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cmacs.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += cmpy(r2, r3):sat
+
+declare i64 @llvm.hexagon.M2.cmacs.s1(i64, i32, i32)
+define i64 @M2_cmacs_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cmacs.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += cmpy(r2, r3):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cnacs.s0(i64, i32, i32)
+define i64 @M2_cnacs_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cnacs.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= cmpy(r2, r3):sat
+
+declare i64 @llvm.hexagon.M2.cnacs.s1(i64, i32, i32)
+define i64 @M2_cnacs_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cnacs.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= cmpy(r2, r3):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cmacsc.s0(i64, i32, i32)
+define i64 @M2_cmacsc_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cmacsc.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += cmpy(r2, r3*):sat
+
+declare i64 @llvm.hexagon.M2.cmacsc.s1(i64, i32, i32)
+define i64 @M2_cmacsc_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cmacsc.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += cmpy(r2, r3*):<<1:sat
+
+declare i64 @llvm.hexagon.M2.cnacsc.s0(i64, i32, i32)
+define i64 @M2_cnacsc_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cnacsc.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= cmpy(r2, r3*):sat
+
+declare i64 @llvm.hexagon.M2.cnacsc.s1(i64, i32, i32)
+define i64 @M2_cnacsc_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cnacsc.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= cmpy(r2, r3*):<<1:sat
+
+; Complex multiply real or imaginary
+declare i64 @llvm.hexagon.M2.cmpyi.s0(i32, i32)
+define i64 @M2_cmpyi_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.cmpyi.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = cmpyi(r0, r1)
+
+declare i64 @llvm.hexagon.M2.cmpyr.s0(i32, i32)
+define i64 @M2_cmpyr_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.cmpyr.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = cmpyr(r0, r1)
+
+declare i64 @llvm.hexagon.M2.cmaci.s0(i64, i32, i32)
+define i64 @M2_cmaci_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cmaci.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += cmpyi(r2, r3)
+
+declare i64 @llvm.hexagon.M2.cmacr.s0(i64, i32, i32)
+define i64 @M2_cmacr_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.cmacr.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += cmpyr(r2, r3)
+
+; Complex multiply with round and pack
+declare i32 @llvm.hexagon.M2.cmpyrs.s0(i32, i32)
+define i32 @M2_cmpyrs_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.cmpyrs.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpy(r0, r1):rnd:sat
+
+declare i32 @llvm.hexagon.M2.cmpyrs.s1(i32, i32)
+define i32 @M2_cmpyrs_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.cmpyrs.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpy(r0, r1):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.cmpyrsc.s0(i32, i32)
+define i32 @M2_cmpyrsc_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.cmpyrsc.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpy(r0, r1*):rnd:sat
+
+declare i32 @llvm.hexagon.M2.cmpyrsc.s1(i32, i32)
+define i32 @M2_cmpyrsc_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.cmpyrsc.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpy(r0, r1*):<<1:rnd:sat
+
+; Complex multiply 32x16
+declare i32 @llvm.hexagon.M4.cmpyi.wh(i64, i32)
+define i32 @M4_cmpyi_wh(i64 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.cmpyi.wh(i64 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpyiwh(r1:0, r2):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M4.cmpyi.whc(i64, i32)
+define i32 @M4_cmpyi_whc(i64 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.cmpyi.whc(i64 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpyiwh(r1:0, r2*):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M4.cmpyr.wh(i64, i32)
+define i32 @M4_cmpyr_wh(i64 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.cmpyr.wh(i64 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpyrwh(r1:0, r2):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M4.cmpyr.whc(i64, i32)
+define i32 @M4_cmpyr_whc(i64 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.cmpyr.whc(i64 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = cmpyrwh(r1:0, r2*):<<1:rnd:sat
+
+; Vector complex multiply real or imaginary
+declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64, i64)
+define i64 @M2_vcmpy_s0_sat_r(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vcmpyr(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64, i64)
+define i64 @M2_vcmpy_s1_sat_r(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vcmpyr(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64, i64)
+define i64 @M2_vcmpy_s0_sat_i(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vcmpyi(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64, i64)
+define i64 @M2_vcmpy_s1_sat_i(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vcmpyi(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64, i64, i64)
+define i64 @M2_vcmac_s0_sat_r(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vcmpyr(r3:2, r5:4):sat
+
+declare i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64, i64, i64)
+define i64 @M2_vcmac_s0_sat_i(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vcmpyi(r3:2, r5:4):sat
+
+; Vector complex conjugate
+declare i64 @llvm.hexagon.A2.vconj(i64)
+define i64 @A2_vconj(i64 %a) {
+ %z = call i64 @llvm.hexagon.A2.vconj(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vconj(r1:0):sat
+
+; Vector complex rotate
+declare i64 @llvm.hexagon.S2.vcrotate(i64, i32)
+define i64 @S2_vcrotate(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.vcrotate(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vcrotate(r1:0, r2)
+
+; Vector reduce complex multiply real or imaginary
+declare i64 @llvm.hexagon.M2.vrcmpyi.s0(i64, i64)
+define i64 @M2_vrcmpyi_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrcmpyi(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M2.vrcmpyr.s0(i64, i64)
+define i64 @M2_vrcmpyr_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrcmpyr(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64, i64)
+define i64 @M2_vrcmpyi_s0c(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrcmpyi(r1:0, r3:2*)
+
+declare i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64, i64)
+define i64 @M2_vrcmpyr_s0c(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrcmpyr(r1:0, r3:2*)
+
+declare i64 @llvm.hexagon.M2.vrcmaci.s0(i64, i64, i64)
+define i64 @M2_vrcmaci_s0(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vrcmaci.s0(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrcmpyi(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M2.vrcmacr.s0(i64, i64, i64)
+define i64 @M2_vrcmacr_s0(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vrcmacr.s0(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrcmpyr(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M2.vrcmaci.s0c(i64, i64, i64)
+define i64 @M2_vrcmaci_s0c(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vrcmaci.s0c(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrcmpyi(r3:2, r5:4*)
+
+declare i64 @llvm.hexagon.M2.vrcmacr.s0c(i64, i64, i64)
+define i64 @M2_vrcmacr_s0c(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vrcmacr.s0c(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrcmpyr(r3:2, r5:4*)
+
+; Vector reduce complex rotate
+declare i64 @llvm.hexagon.S4.vrcrotate(i64, i32, i32)
+define i64 @S4_vrcrotate(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S4.vrcrotate(i64 %a, i32 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrcrotate(r1:0, r2, #0)
+
+declare i64 @llvm.hexagon.S4.vrcrotate.acc(i64, i64, i32, i32)
+define i64 @S4_vrcrotate_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S4.vrcrotate.acc(i64 %a, i64 %b, i32 %c, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrcrotate(r3:2, r4, #0)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll
new file mode 100644
index 0000000..aef8127
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll
@@ -0,0 +1,388 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.4 XTYPE/FP
+
+; Floating point addition
+declare float @llvm.hexagon.F2.sfadd(float, float)
+define float @F2_sfadd(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sfadd(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sfadd(r0, r1)
+
+; Classify floating-point value
+declare i32 @llvm.hexagon.F2.sfclass(float, i32)
+define i32 @F2_sfclass(float %a) {
+ %z = call i32 @llvm.hexagon.F2.sfclass(float %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = sfclass(r0, #0)
+
+declare i32 @llvm.hexagon.F2.dfclass(double, i32)
+define i32 @F2_dfclass(double %a) {
+ %z = call i32 @llvm.hexagon.F2.dfclass(double %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = dfclass(r1:0, #0)
+
+; Compare floating-point value
+declare i32 @llvm.hexagon.F2.sfcmpge(float, float)
+define i32 @F2_sfcmpge(float %a, float %b) {
+ %z = call i32 @llvm.hexagon.F2.sfcmpge(float %a, float %b)
+ ret i32 %z
+}
+; CHECK: p0 = sfcmp.ge(r0, r1)
+
+declare i32 @llvm.hexagon.F2.sfcmpuo(float, float)
+define i32 @F2_sfcmpuo(float %a, float %b) {
+ %z = call i32 @llvm.hexagon.F2.sfcmpuo(float %a, float %b)
+ ret i32 %z
+}
+; CHECK: p0 = sfcmp.uo(r0, r1)
+
+declare i32 @llvm.hexagon.F2.sfcmpeq(float, float)
+define i32 @F2_sfcmpeq(float %a, float %b) {
+ %z = call i32 @llvm.hexagon.F2.sfcmpeq(float %a, float %b)
+ ret i32 %z
+}
+; CHECK: p0 = sfcmp.eq(r0, r1)
+
+declare i32 @llvm.hexagon.F2.sfcmpgt(float, float)
+define i32 @F2_sfcmpgt(float %a, float %b) {
+ %z = call i32 @llvm.hexagon.F2.sfcmpgt(float %a, float %b)
+ ret i32 %z
+}
+; CHECK: p0 = sfcmp.gt(r0, r1)
+
+declare i32 @llvm.hexagon.F2.dfcmpge(double, double)
+define i32 @F2_dfcmpge(double %a, double %b) {
+ %z = call i32 @llvm.hexagon.F2.dfcmpge(double %a, double %b)
+ ret i32 %z
+}
+; CHECK: p0 = dfcmp.ge(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.F2.dfcmpuo(double, double)
+define i32 @F2_dfcmpuo(double %a, double %b) {
+ %z = call i32 @llvm.hexagon.F2.dfcmpuo(double %a, double %b)
+ ret i32 %z
+}
+; CHECK: p0 = dfcmp.uo(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.F2.dfcmpeq(double, double)
+define i32 @F2_dfcmpeq(double %a, double %b) {
+ %z = call i32 @llvm.hexagon.F2.dfcmpeq(double %a, double %b)
+ ret i32 %z
+}
+; CHECK: p0 = dfcmp.eq(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.F2.dfcmpgt(double, double)
+define i32 @F2_dfcmpgt(double %a, double %b) {
+ %z = call i32 @llvm.hexagon.F2.dfcmpgt(double %a, double %b)
+ ret i32 %z
+}
+; CHECK: p0 = dfcmp.gt(r1:0, r3:2)
+
+; Convert floating-point value to other format
+declare double @llvm.hexagon.F2.conv.sf2df(float)
+define double @F2_conv_sf2df(float %a) {
+ %z = call double @llvm.hexagon.F2.conv.sf2df(float %a)
+ ret double %z
+}
+; CHECK: = convert_sf2df(r0)
+
+declare float @llvm.hexagon.F2.conv.df2sf(double)
+define float @F2_conv_df2sf(double %a) {
+ %z = call float @llvm.hexagon.F2.conv.df2sf(double %a)
+ ret float %z
+}
+; CHECK: r0 = convert_df2sf(r1:0)
+
+; Convert integer to floating-point value
+declare double @llvm.hexagon.F2.conv.ud2df(i64)
+define double @F2_conv_ud2df(i64 %a) {
+ %z = call double @llvm.hexagon.F2.conv.ud2df(i64 %a)
+ ret double %z
+}
+; CHECK: r1:0 = convert_ud2df(r1:0)
+
+declare double @llvm.hexagon.F2.conv.d2df(i64)
+define double @F2_conv_d2df(i64 %a) {
+ %z = call double @llvm.hexagon.F2.conv.d2df(i64 %a)
+ ret double %z
+}
+; CHECK: r1:0 = convert_d2df(r1:0)
+
+declare double @llvm.hexagon.F2.conv.uw2df(i32)
+define double @F2_conv_uw2df(i32 %a) {
+ %z = call double @llvm.hexagon.F2.conv.uw2df(i32 %a)
+ ret double %z
+}
+; CHECK: = convert_uw2df(r0)
+
+declare double @llvm.hexagon.F2.conv.w2df(i32)
+define double @F2_conv_w2df(i32 %a) {
+ %z = call double @llvm.hexagon.F2.conv.w2df(i32 %a)
+ ret double %z
+}
+; CHECK: = convert_w2df(r0)
+
+declare float @llvm.hexagon.F2.conv.ud2sf(i64)
+define float @F2_conv_ud2sf(i64 %a) {
+ %z = call float @llvm.hexagon.F2.conv.ud2sf(i64 %a)
+ ret float %z
+}
+; CHECK: r0 = convert_ud2sf(r1:0)
+
+declare float @llvm.hexagon.F2.conv.d2sf(i64)
+define float @F2_conv_d2sf(i64 %a) {
+ %z = call float @llvm.hexagon.F2.conv.d2sf(i64 %a)
+ ret float %z
+}
+; CHECK: r0 = convert_d2sf(r1:0)
+
+declare float @llvm.hexagon.F2.conv.uw2sf(i32)
+define float @F2_conv_uw2sf(i32 %a) {
+ %z = call float @llvm.hexagon.F2.conv.uw2sf(i32 %a)
+ ret float %z
+}
+; CHECK: r0 = convert_uw2sf(r0)
+
+declare float @llvm.hexagon.F2.conv.w2sf(i32)
+define float @F2_conv_w2sf(i32 %a) {
+ %z = call float @llvm.hexagon.F2.conv.w2sf(i32 %a)
+ ret float %z
+}
+; CHECK: r0 = convert_w2sf(r0)
+
+; Convert floating-point value to integer
+declare i64 @llvm.hexagon.F2.conv.df2d(double)
+define i64 @F2_conv_df2d(double %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.df2d(double %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = convert_df2d(r1:0)
+
+declare i64 @llvm.hexagon.F2.conv.df2ud(double)
+define i64 @F2_conv_df2ud(double %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.df2ud(double %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = convert_df2ud(r1:0)
+
+declare i64 @llvm.hexagon.F2.conv.df2d.chop(double)
+define i64 @F2_conv_df2d_chop(double %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.df2d.chop(double %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = convert_df2d(r1:0):chop
+
+declare i64 @llvm.hexagon.F2.conv.df2ud.chop(double)
+define i64 @F2_conv_df2ud_chop(double %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.df2ud.chop(double %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = convert_df2ud(r1:0):chop
+
+declare i64 @llvm.hexagon.F2.conv.sf2ud(float)
+define i64 @F2_conv_sf2ud(float %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.sf2ud(float %a)
+ ret i64 %z
+}
+; CHECK: = convert_sf2ud(r0)
+
+declare i64 @llvm.hexagon.F2.conv.sf2d(float)
+define i64 @F2_conv_sf2d(float %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.sf2d(float %a)
+ ret i64 %z
+}
+; CHECK: = convert_sf2d(r0)
+
+declare i64 @llvm.hexagon.F2.conv.sf2d.chop(float)
+define i64 @F2_conv_sf2d_chop(float %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.sf2d.chop(float %a)
+ ret i64 %z
+}
+; CHECK: = convert_sf2d(r0):chop
+
+declare i64 @llvm.hexagon.F2.conv.sf2ud.chop(float)
+define i64 @F2_conv_sf2ud_chop(float %a) {
+ %z = call i64 @llvm.hexagon.F2.conv.sf2ud.chop(float %a)
+ ret i64 %z
+}
+; CHECK: = convert_sf2ud(r0):chop
+
+declare i32 @llvm.hexagon.F2.conv.df2uw(double)
+define i32 @F2_conv_df2uw(double %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.df2uw(double %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_df2uw(r1:0)
+
+declare i32 @llvm.hexagon.F2.conv.df2w(double)
+define i32 @F2_conv_df2w(double %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.df2w(double %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_df2w(r1:0)
+
+declare i32 @llvm.hexagon.F2.conv.df2w.chop(double)
+define i32 @F2_conv_df2w_chop(double %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.df2w.chop(double %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_df2w(r1:0):chop
+
+declare i32 @llvm.hexagon.F2.conv.df2uw.chop(double)
+define i32 @F2_conv_df2uw_chop(double %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.df2uw.chop(double %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_df2uw(r1:0):chop
+
+declare i32 @llvm.hexagon.F2.conv.sf2uw(float)
+define i32 @F2_conv_sf2uw(float %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.sf2uw(float %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_sf2uw(r0)
+
+declare i32 @llvm.hexagon.F2.conv.sf2uw.chop(float)
+define i32 @F2_conv_sf2uw_chop(float %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.sf2uw.chop(float %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_sf2uw(r0):chop
+
+declare i32 @llvm.hexagon.F2.conv.sf2w(float)
+define i32 @F2_conv_sf2w(float %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.sf2w(float %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_sf2w(r0)
+
+declare i32 @llvm.hexagon.F2.conv.sf2w.chop(float)
+define i32 @F2_conv_sf2w_chop(float %a) {
+ %z = call i32 @llvm.hexagon.F2.conv.sf2w.chop(float %a)
+ ret i32 %z
+}
+; CHECK: r0 = convert_sf2w(r0):chop
+
+; Floating point extreme value assistance
+declare float @llvm.hexagon.F2.sffixupr(float)
+define float @F2_sffixupr(float %a) {
+ %z = call float @llvm.hexagon.F2.sffixupr(float %a)
+ ret float %z
+}
+; CHECK: r0 = sffixupr(r0)
+
+declare float @llvm.hexagon.F2.sffixupn(float, float)
+define float @F2_sffixupn(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sffixupn(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sffixupn(r0, r1)
+
+declare float @llvm.hexagon.F2.sffixupd(float, float)
+define float @F2_sffixupd(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sffixupd(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sffixupd(r0, r1)
+
+; Floating point fused multiply-add
+declare float @llvm.hexagon.F2.sffma(float, float, float)
+define float @F2_sffma(float %a, float %b, float %c) {
+ %z = call float @llvm.hexagon.F2.sffma(float %a, float %b, float %c)
+ ret float %z
+}
+; CHECK: r0 += sfmpy(r1, r2)
+
+declare float @llvm.hexagon.F2.sffms(float, float, float)
+define float @F2_sffms(float %a, float %b, float %c) {
+ %z = call float @llvm.hexagon.F2.sffms(float %a, float %b, float %c)
+ ret float %z
+}
+; CHECK: r0 -= sfmpy(r1, r2)
+
+; Floating point fused multiply-add with scaling
+declare float @llvm.hexagon.F2.sffma.sc(float, float, float, i32)
+define float @F2_sffma_sc(float %a, float %b, float %c, i32 %d) {
+ %z = call float @llvm.hexagon.F2.sffma.sc(float %a, float %b, float %c, i32 %d)
+ ret float %z
+}
+; CHECK: r0 += sfmpy(r1, r2, p0):scale
+
+; Floating point fused multiply-add for library routines
+declare float @llvm.hexagon.F2.sffma.lib(float, float, float)
+define float @F2_sffma_lib(float %a, float %b, float %c) {
+ %z = call float @llvm.hexagon.F2.sffma.lib(float %a, float %b, float %c)
+ ret float %z
+}
+; CHECK: r0 += sfmpy(r1, r2):lib
+
+declare float @llvm.hexagon.F2.sffms.lib(float, float, float)
+define float @F2_sffms_lib(float %a, float %b, float %c) {
+ %z = call float @llvm.hexagon.F2.sffms.lib(float %a, float %b, float %c)
+ ret float %z
+}
+; CHECK: r0 -= sfmpy(r1, r2):lib
+
+; Create floating-point constant
+declare float @llvm.hexagon.F2.sfimm.p(i32)
+define float @F2_sfimm_p() {
+ %z = call float @llvm.hexagon.F2.sfimm.p(i32 0)
+ ret float %z
+}
+; CHECK: r0 = sfmake(#0):pos
+
+declare float @llvm.hexagon.F2.sfimm.n(i32)
+define float @F2_sfimm_n() {
+ %z = call float @llvm.hexagon.F2.sfimm.n(i32 0)
+ ret float %z
+}
+; CHECK: r0 = sfmake(#0):neg
+
+declare double @llvm.hexagon.F2.dfimm.p(i32)
+define double @F2_dfimm_p() {
+ %z = call double @llvm.hexagon.F2.dfimm.p(i32 0)
+ ret double %z
+}
+; CHECK: r1:0 = dfmake(#0):pos
+
+declare double @llvm.hexagon.F2.dfimm.n(i32)
+define double @F2_dfimm_n() {
+ %z = call double @llvm.hexagon.F2.dfimm.n(i32 0)
+ ret double %z
+}
+; CHECK: r1:0 = dfmake(#0):neg
+
+; Floating point maximum
+declare float @llvm.hexagon.F2.sfmax(float, float)
+define float @F2_sfmax(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sfmax(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sfmax(r0, r1)
+
+; Floating point minimum
+declare float @llvm.hexagon.F2.sfmin(float, float)
+define float @F2_sfmin(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sfmin(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sfmin(r0, r1)
+
+; Floating point multiply
+declare float @llvm.hexagon.F2.sfmpy(float, float)
+define float @F2_sfmpy(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sfmpy(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sfmpy(r0, r1)
+
+; Floating point subtraction
+declare float @llvm.hexagon.F2.sfsub(float, float)
+define float @F2_sfsub(float %a, float %b) {
+ %z = call float @llvm.hexagon.F2.sfsub(float %a, float %b)
+ ret float %z
+}
+; CHECK: r0 = sfsub(r0, r1)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll
new file mode 100644
index 0000000..6409e4e
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll
@@ -0,0 +1,1525 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.5 XTYPE/MPY
+
+; Multiply and use lower result
+declare i32 @llvm.hexagon.M4.mpyrr.addi(i32, i32, i32)
+define i32 @M4_mpyrr_addi(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.mpyrr.addi(i32 0, i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(#0, mpyi(r0, r1))
+
+declare i32 @llvm.hexagon.M4.mpyri.addi(i32, i32, i32)
+define i32 @M4_mpyri_addi(i32 %a) {
+ %z = call i32 @llvm.hexagon.M4.mpyri.addi(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(#0, mpyi(r0, #0))
+
+declare i32 @llvm.hexagon.M4.mpyri.addr.u2(i32, i32, i32)
+define i32 @M4_mpyri_addr_u2(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.mpyri.addr.u2(i32 %a, i32 0, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, mpyi(#0, r1))
+
+declare i32 @llvm.hexagon.M4.mpyri.addr(i32, i32, i32)
+define i32 @M4_mpyri_addr(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M4.mpyri.addr(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(r0, mpyi(r1, #0))
+
+declare i32 @llvm.hexagon.M4.mpyrr.addr(i32, i32, i32)
+define i32 @M4_mpyrr_addr(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.mpyrr.addr(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r1 = add(r0, mpyi(r1, r2))
+
+; Vector multiply word by signed half (32x16)
+declare i64 @llvm.hexagon.M2.mmpyl.s0(i64, i64)
+define i64 @M2_mmpyl_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyl.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.mmpyl.s1(i64, i64)
+define i64 @M2_mmpyl_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyl.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweh(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.mmpyh.s0(i64, i64)
+define i64 @M2_mmpyh_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyh.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywoh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.mmpyh.s1(i64, i64)
+define i64 @M2_mmpyh_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyh.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywoh(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.mmpyl.rs0(i64, i64)
+define i64 @M2_mmpyl_rs0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyl.rs0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweh(r1:0, r3:2):rnd:sat
+
+declare i64 @llvm.hexagon.M2.mmpyl.rs1(i64, i64)
+define i64 @M2_mmpyl_rs1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyl.rs1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweh(r1:0, r3:2):<<1:rnd:sat
+
+declare i64 @llvm.hexagon.M2.mmpyh.rs0(i64, i64)
+define i64 @M2_mmpyh_rs0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyh.rs0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywoh(r1:0, r3:2):rnd:sat
+
+declare i64 @llvm.hexagon.M2.mmpyh.rs1(i64, i64)
+define i64 @M2_mmpyh_rs1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyh.rs1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywoh(r1:0, r3:2):<<1:rnd:sat
+
+; Vector multiply word by unsigned half (32x16)
+declare i64 @llvm.hexagon.M2.mmpyul.s0(i64, i64)
+define i64 @M2_mmpyul_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyul.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.mmpyul.s1(i64, i64)
+define i64 @M2_mmpyul_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyul.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.mmpyuh.s0(i64, i64)
+define i64 @M2_mmpyuh_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyuh.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywouh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.mmpyuh.s1(i64, i64)
+define i64 @M2_mmpyuh_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyuh.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywouh(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.mmpyul.rs0(i64, i64)
+define i64 @M2_mmpyul_rs0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyul.rs0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):rnd:sat
+
+declare i64 @llvm.hexagon.M2.mmpyul.rs1(i64, i64)
+define i64 @M2_mmpyul_rs1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyul.rs1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):<<1:rnd:sat
+
+declare i64 @llvm.hexagon.M2.mmpyuh.rs0(i64, i64)
+define i64 @M2_mmpyuh_rs0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyuh.rs0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywouh(r1:0, r3:2):rnd:sat
+
+declare i64 @llvm.hexagon.M2.mmpyuh.rs1(i64, i64)
+define i64 @M2_mmpyuh_rs1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.mmpyuh.rs1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpywouh(r1:0, r3:2):<<1:rnd:sat
+
+; Multiply signed halfwords
+declare i64 @llvm.hexagon.M2.mpyd.ll.s0(i32, i32)
+define i64 @M2_mpyd_ll_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.ll.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.l)
+
+declare i64 @llvm.hexagon.M2.mpyd.ll.s1(i32, i32)
+define i64 @M2_mpyd_ll_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.ll.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.lh.s0(i32, i32)
+define i64 @M2_mpyd_lh_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.lh.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.h)
+
+declare i64 @llvm.hexagon.M2.mpyd.lh.s1(i32, i32)
+define i64 @M2_mpyd_lh_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.lh.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.hl.s0(i32, i32)
+define i64 @M2_mpyd_hl_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.hl.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.l)
+
+declare i64 @llvm.hexagon.M2.mpyd.hl.s1(i32, i32)
+define i64 @M2_mpyd_hl_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.hl.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.hh.s0(i32, i32)
+define i64 @M2_mpyd_hh_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.hh.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.h)
+
+declare i64 @llvm.hexagon.M2.mpyd.hh.s1(i32, i32)
+define i64 @M2_mpyd_hh_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.hh.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32, i32)
+define i64 @M2_mpyd_rnd_ll_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.l):rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32, i32)
+define i64 @M2_mpyd_rnd_ll_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.l):<<1:rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32, i32)
+define i64 @M2_mpyd_rnd_lh_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.h):rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32, i32)
+define i64 @M2_mpyd_rnd_lh_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.l, r1.h):<<1:rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32, i32)
+define i64 @M2_mpyd_rnd_hl_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.l):rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32, i32)
+define i64 @M2_mpyd_rnd_hl_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.l):<<1:rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32, i32)
+define i64 @M2_mpyd_rnd_hh_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.h):rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32, i32)
+define i64 @M2_mpyd_rnd_hh_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0.h, r1.h):<<1:rnd
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64, i32, i32)
+define i64 @M2_mpyd_acc_ll_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.l, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64, i32, i32)
+define i64 @M2_mpyd_acc_ll_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.l, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64, i32, i32)
+define i64 @M2_mpyd_acc_lh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.l, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64, i32, i32)
+define i64 @M2_mpyd_acc_lh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.l, r3.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64, i32, i32)
+define i64 @M2_mpyd_acc_hl_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.h, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64, i32, i32)
+define i64 @M2_mpyd_acc_hl_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.h, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64, i32, i32)
+define i64 @M2_mpyd_acc_hh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.h, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64, i32, i32)
+define i64 @M2_mpyd_acc_hh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2.h, r3.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64, i32, i32)
+define i64 @M2_mpyd_nac_ll_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.l, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64, i32, i32)
+define i64 @M2_mpyd_nac_ll_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.l, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64, i32, i32)
+define i64 @M2_mpyd_nac_lh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.l, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64, i32, i32)
+define i64 @M2_mpyd_nac_lh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.l, r3.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64, i32, i32)
+define i64 @M2_mpyd_nac_hl_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.h, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64, i32, i32)
+define i64 @M2_mpyd_nac_hl_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.h, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64, i32, i32)
+define i64 @M2_mpyd_nac_hh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.h, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64, i32, i32)
+define i64 @M2_mpyd_nac_hh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2.h, r3.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.ll.s0(i32, i32)
+define i32 @M2_mpy_ll_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.l)
+
+declare i32 @llvm.hexagon.M2.mpy.ll.s1(i32, i32)
+define i32 @M2_mpy_ll_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.ll.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.lh.s0(i32, i32)
+define i32 @M2_mpy_lh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.lh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.h)
+
+declare i32 @llvm.hexagon.M2.mpy.lh.s1(i32, i32)
+define i32 @M2_mpy_lh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.lh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.hl.s0(i32, i32)
+define i32 @M2_mpy_hl_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.hl.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.l)
+
+declare i32 @llvm.hexagon.M2.mpy.hl.s1(i32, i32)
+define i32 @M2_mpy_hl_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.hl.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.hh.s0(i32, i32)
+define i32 @M2_mpy_hh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.hh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.h)
+
+declare i32 @llvm.hexagon.M2.mpy.hh.s1(i32, i32)
+define i32 @M2_mpy_hh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.hh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32, i32)
+define i32 @M2_mpy_sat_ll_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.l):sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32, i32)
+define i32 @M2_mpy_sat_ll_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32, i32)
+define i32 @M2_mpy_sat_lh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.h):sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32, i32)
+define i32 @M2_mpy_sat_lh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.h):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32, i32)
+define i32 @M2_mpy_sat_hl_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.l):sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32, i32)
+define i32 @M2_mpy_sat_hl_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32, i32)
+define i32 @M2_mpy_sat_hh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.h):sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32, i32)
+define i32 @M2_mpy_sat_hh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.h):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32, i32)
+define i32 @M2_mpy_sat_rnd_ll_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.l):rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32, i32)
+define i32 @M2_mpy_sat_rnd_ll_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.l):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32, i32)
+define i32 @M2_mpy_sat_rnd_lh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.h):rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32, i32)
+define i32 @M2_mpy_sat_rnd_lh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.l, r1.h):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32, i32)
+define i32 @M2_mpy_sat_rnd_hl_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.l):rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32, i32)
+define i32 @M2_mpy_sat_rnd_hl_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.l):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32, i32)
+define i32 @M2_mpy_sat_rnd_hh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.h):rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32, i32)
+define i32 @M2_mpy_sat_rnd_hh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0.h, r1.h):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_ll_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_ll_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_lh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_lh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_hl_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_hl_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_hh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_hh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_ll_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.l):sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_ll_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_lh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.h):sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_lh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.l, r2.h):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_hl_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.l):sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_hl_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_hh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.h):sat
+
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32, i32, i32)
+define i32 @M2_mpy_acc_sat_hh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1.h, r2.h):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_ll_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_ll_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_lh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_lh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_hl_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_hl_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_hh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_hh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_ll_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.l):sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_ll_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_lh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.h):sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_lh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.l, r2.h):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_hl_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.l):sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_hl_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_hh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.h):sat
+
+declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32, i32, i32)
+define i32 @M2_mpy_nac_sat_hh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1.h, r2.h):<<1:sat
+
+; Multiply unsigned halfwords
+declare i64 @llvm.hexagon.M2.mpyud.ll.s0(i32, i32)
+define i64 @M2_mpyud_ll_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.ll.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.l, r1.l)
+
+declare i64 @llvm.hexagon.M2.mpyud.ll.s1(i32, i32)
+define i64 @M2_mpyud_ll_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.ll.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.l, r1.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.lh.s0(i32, i32)
+define i64 @M2_mpyud_lh_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.lh.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.l, r1.h)
+
+declare i64 @llvm.hexagon.M2.mpyud.lh.s1(i32, i32)
+define i64 @M2_mpyud_lh_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.lh.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.l, r1.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.hl.s0(i32, i32)
+define i64 @M2_mpyud_hl_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.hl.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.h, r1.l)
+
+declare i64 @llvm.hexagon.M2.mpyud.hl.s1(i32, i32)
+define i64 @M2_mpyud_hl_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.hl.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.h, r1.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.hh.s0(i32, i32)
+define i64 @M2_mpyud_hh_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.hh.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.h, r1.h)
+
+declare i64 @llvm.hexagon.M2.mpyud.hh.s1(i32, i32)
+define i64 @M2_mpyud_hh_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.hh.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0.h, r1.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64, i32, i32)
+define i64 @M2_mpyud_acc_ll_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.l, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64, i32, i32)
+define i64 @M2_mpyud_acc_ll_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.l, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64, i32, i32)
+define i64 @M2_mpyud_acc_lh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.l, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64, i32, i32)
+define i64 @M2_mpyud_acc_lh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.l, r3.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64, i32, i32)
+define i64 @M2_mpyud_acc_hl_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.h, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64, i32, i32)
+define i64 @M2_mpyud_acc_hl_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.h, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64, i32, i32)
+define i64 @M2_mpyud_acc_hh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.h, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64, i32, i32)
+define i64 @M2_mpyud_acc_hh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2.h, r3.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64, i32, i32)
+define i64 @M2_mpyud_nac_ll_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.l, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64, i32, i32)
+define i64 @M2_mpyud_nac_ll_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.l, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64, i32, i32)
+define i64 @M2_mpyud_nac_lh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.l, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64, i32, i32)
+define i64 @M2_mpyud_nac_lh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.l, r3.h):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64, i32, i32)
+define i64 @M2_mpyud_nac_hl_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.h, r3.l)
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64, i32, i32)
+define i64 @M2_mpyud_nac_hl_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.h, r3.l):<<1
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64, i32, i32)
+define i64 @M2_mpyud_nac_hh_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.h, r3.h)
+
+declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64, i32, i32)
+define i64 @M2_mpyud_nac_hh_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2.h, r3.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.ll.s0(i32, i32)
+define i32 @M2_mpyu_ll_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.ll.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.l, r1.l)
+
+declare i32 @llvm.hexagon.M2.mpyu.ll.s1(i32, i32)
+define i32 @M2_mpyu_ll_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.ll.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.l, r1.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.lh.s0(i32, i32)
+define i32 @M2_mpyu_lh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.lh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.l, r1.h)
+
+declare i32 @llvm.hexagon.M2.mpyu.lh.s1(i32, i32)
+define i32 @M2_mpyu_lh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.lh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.l, r1.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.hl.s0(i32, i32)
+define i32 @M2_mpyu_hl_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.hl.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.h, r1.l)
+
+declare i32 @llvm.hexagon.M2.mpyu.hl.s1(i32, i32)
+define i32 @M2_mpyu_hl_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.hl.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.h, r1.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.hh.s0(i32, i32)
+define i32 @M2_mpyu_hh_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.hh.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.h, r1.h)
+
+declare i32 @llvm.hexagon.M2.mpyu.hh.s1(i32, i32)
+define i32 @M2_mpyu_hh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.hh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0.h, r1.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32, i32, i32)
+define i32 @M2_mpyu_acc_ll_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.l, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32, i32, i32)
+define i32 @M2_mpyu_acc_ll_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.l, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32, i32, i32)
+define i32 @M2_mpyu_acc_lh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.l, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32, i32, i32)
+define i32 @M2_mpyu_acc_lh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.l, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32, i32, i32)
+define i32 @M2_mpyu_acc_hl_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.h, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32, i32, i32)
+define i32 @M2_mpyu_acc_hl_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.h, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32, i32, i32)
+define i32 @M2_mpyu_acc_hh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.h, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32, i32, i32)
+define i32 @M2_mpyu_acc_hh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpyu(r1.h, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32, i32, i32)
+define i32 @M2_mpyu_nac_ll_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.l, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32, i32, i32)
+define i32 @M2_mpyu_nac_ll_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.l, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32, i32, i32)
+define i32 @M2_mpyu_nac_lh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.l, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32, i32, i32)
+define i32 @M2_mpyu_nac_lh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.l, r2.h):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32, i32, i32)
+define i32 @M2_mpyu_nac_hl_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.h, r2.l)
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32, i32, i32)
+define i32 @M2_mpyu_nac_hl_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.h, r2.l):<<1
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32, i32, i32)
+define i32 @M2_mpyu_nac_hh_s0(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.h, r2.h)
+
+declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32, i32, i32)
+define i32 @M2_mpyu_nac_hh_s1(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpyu(r1.h, r2.h):<<1
+
+; Polynomial multiply words
+declare i64 @llvm.hexagon.M4.pmpyw(i32, i32)
+define i64 @M4_pmpyw(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M4.pmpyw(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = pmpyw(r0, r1)
+
+declare i64 @llvm.hexagon.M4.pmpyw.acc(i64, i32, i32)
+define i64 @M4_pmpyw_acc(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M4.pmpyw.acc(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 ^= pmpyw(r2, r3)
+
+; Vector reduce multiply word by signed half
+declare i64 @llvm.hexagon.M4.vrmpyoh.s0(i64, i64)
+define i64 @M4_vrmpyoh_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyoh.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpywoh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M4.vrmpyoh.s1(i64, i64)
+define i64 @M4_vrmpyoh_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyoh.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpywoh(r1:0, r3:2):<<1
+
+declare i64 @llvm.hexagon.M4.vrmpyeh.s0(i64, i64)
+define i64 @M4_vrmpyeh_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyeh.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpyweh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M4.vrmpyeh.s1(i64, i64)
+define i64 @M4_vrmpyeh_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyeh.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpyweh(r1:0, r3:2):<<1
+
+declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64, i64, i64)
+define i64 @M4_vrmpyoh_acc_s0(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpywoh(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64, i64, i64)
+define i64 @M4_vrmpyoh_acc_s1(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpywoh(r3:2, r5:4):<<1
+
+declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64, i64, i64)
+define i64 @M4_vrmpyeh_acc_s0(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpyweh(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64, i64, i64)
+define i64 @M4_vrmpyeh_acc_s1(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpyweh(r3:2, r5:4):<<1
+
+; Multiply and use upper result
+declare i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32, i32)
+define i32 @M2_dpmpyss_rnd_s0(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1):rnd
+
+declare i32 @llvm.hexagon.M2.mpyu.up(i32, i32)
+define i32 @M2_mpyu_up(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpyu.up(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpyu(r0, r1)
+
+declare i32 @llvm.hexagon.M2.mpysu.up(i32, i32)
+define i32 @M2_mpysu_up(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpysu.up(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpysu(r0, r1)
+
+declare i32 @llvm.hexagon.M2.hmmpyh.s1(i32, i32)
+define i32 @M2_hmmpyh_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.hmmpyh.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1.h):<<1:sat
+
+declare i32 @llvm.hexagon.M2.hmmpyl.s1(i32, i32)
+define i32 @M2_hmmpyl_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.hmmpyl.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1.l):<<1:sat
+
+declare i32 @llvm.hexagon.M2.hmmpyh.rs1(i32, i32)
+define i32 @M2_hmmpyh_rs1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.hmmpyh.rs1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1.h):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32, i32)
+define i32 @M2_mpy_up_s1_sat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1):<<1:sat
+
+declare i32 @llvm.hexagon.M2.hmmpyl.rs1(i32, i32)
+define i32 @M2_hmmpyl_rs1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.hmmpyl.rs1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1.l):<<1:rnd:sat
+
+declare i32 @llvm.hexagon.M2.mpy.up(i32, i32)
+define i32 @M2_mpy_up(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.up(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1)
+
+declare i32 @llvm.hexagon.M2.mpy.up.s1(i32, i32)
+define i32 @M2_mpy_up_s1(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.M2.mpy.up.s1(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = mpy(r0, r1):<<1
+
+declare i32 @llvm.hexagon.M4.mac.up.s1.sat(i32, i32, i32)
+define i32 @M4_mac_up_s1_sat(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.mac.up.s1.sat(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += mpy(r1, r2):<<1:sat
+
+declare i32 @llvm.hexagon.M4.nac.up.s1.sat(i32, i32, i32)
+define i32 @M4_nac_up_s1_sat(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.M4.nac.up.s1.sat(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= mpy(r1, r2):<<1:sat
+
+; Multiply and use full result
+declare i64 @llvm.hexagon.M2.dpmpyss.s0(i32, i32)
+define i64 @M2_dpmpyss_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpy(r0, r1)
+
+declare i64 @llvm.hexagon.M2.dpmpyuu.s0(i32, i32)
+define i64 @M2_dpmpyuu_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.dpmpyuu.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = mpyu(r0, r1)
+
+declare i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64, i32, i32)
+define i64 @M2_dpmpyss_acc_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpy(r2, r3)
+
+declare i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64, i32, i32)
+define i64 @M2_dpmpyss_nac_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpy(r2, r3)
+
+declare i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64, i32, i32)
+define i64 @M2_dpmpyuu_acc_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += mpyu(r2, r3)
+
+declare i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64, i32, i32)
+define i64 @M2_dpmpyuu_nac_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= mpyu(r2, r3)
+
+; Vector dual multiply
+declare i64 @llvm.hexagon.M2.vdmpys.s0(i64, i64)
+define i64 @M2_vdmpys_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vdmpys.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vdmpy(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.vdmpys.s1(i64, i64)
+define i64 @M2_vdmpys_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vdmpys.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vdmpy(r1:0, r3:2):<<1:sat
+
+; Vector reduce multiply bytes
+declare i64 @llvm.hexagon.M5.vrmpybuu(i64, i64)
+define i64 @M5_vrmpybuu(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M5.vrmpybuu(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpybu(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M5.vrmpybsu(i64, i64)
+define i64 @M5_vrmpybsu(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M5.vrmpybsu(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpybsu(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M5.vrmacbuu(i64, i64, i64)
+define i64 @M5_vrmacbuu(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M5.vrmacbuu(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpybu(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M5.vrmacbsu(i64, i64, i64)
+define i64 @M5_vrmacbsu(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M5.vrmacbsu(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpybsu(r3:2, r5:4)
+
+; Vector dual multiply signed by unsigned bytes
+declare i64 @llvm.hexagon.M5.vdmpybsu(i64, i64)
+define i64 @M5_vdmpybsu(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M5.vdmpybsu(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vdmpybsu(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M5.vdmacbsu(i64, i64, i64)
+define i64 @M5_vdmacbsu(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M5.vdmacbsu(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vdmpybsu(r3:2, r5:4):sat
+
+; Vector multiply even halfwords
+declare i64 @llvm.hexagon.M2.vmpy2es.s0(i64, i64)
+define i64 @M2_vmpy2es_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vmpy2es.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyeh(r1:0, r3:2):sat
+
+declare i64 @llvm.hexagon.M2.vmpy2es.s1(i64, i64)
+define i64 @M2_vmpy2es_s1(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vmpy2es.s1(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyeh(r1:0, r3:2):<<1:sat
+
+declare i64 @llvm.hexagon.M2.vmac2es(i64, i64, i64)
+define i64 @M2_vmac2es(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2es(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyeh(r3:2, r5:4)
+
+declare i64 @llvm.hexagon.M2.vmac2es.s0(i64, i64, i64)
+define i64 @M2_vmac2es_s0(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2es.s0(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyeh(r3:2, r5:4):sat
+
+declare i64 @llvm.hexagon.M2.vmac2es.s1(i64, i64, i64)
+define i64 @M2_vmac2es_s1(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2es.s1(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyeh(r3:2, r5:4):<<1:sat
+
+; Vector multiply halfwords
+declare i64 @llvm.hexagon.M2.vmpy2s.s0(i32, i32)
+define i64 @M2_vmpy2s_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyh(r0, r1):sat
+
+declare i64 @llvm.hexagon.M2.vmpy2s.s1(i32, i32)
+define i64 @M2_vmpy2s_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.vmpy2s.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyh(r0, r1):<<1:sat
+
+declare i64 @llvm.hexagon.M2.vmac2(i64, i32, i32)
+define i64 @M2_vmac2(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyh(r2, r3)
+
+declare i64 @llvm.hexagon.M2.vmac2s.s0(i64, i32, i32)
+define i64 @M2_vmac2s_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyh(r2, r3):sat
+
+declare i64 @llvm.hexagon.M2.vmac2s.s1(i64, i32, i32)
+define i64 @M2_vmac2s_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2s.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyh(r2, r3):<<1:sat
+
+; Vector multiply halfwords signed by unsigned
+declare i64 @llvm.hexagon.M2.vmpy2su.s0(i32, i32)
+define i64 @M2_vmpy2su_s0(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.vmpy2su.s0(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyhsu(r0, r1):sat
+
+declare i64 @llvm.hexagon.M2.vmpy2su.s1(i32, i32)
+define i64 @M2_vmpy2su_s1(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M2.vmpy2su.s1(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpyhsu(r0, r1):<<1:sat
+
+declare i64 @llvm.hexagon.M2.vmac2su.s0(i64, i32, i32)
+define i64 @M2_vmac2su_s0(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2su.s0(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyhsu(r2, r3):sat
+
+declare i64 @llvm.hexagon.M2.vmac2su.s1(i64, i32, i32)
+define i64 @M2_vmac2su_s1(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M2.vmac2su.s1(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpyhsu(r2, r3):<<1:sat
+
+; Vector reduce multiply halfwords
+declare i64 @llvm.hexagon.M2.vrmpy.s0(i64, i64)
+define i64 @M2_vrmpy_s0(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.M2.vrmpy.s0(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vrmpyh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.M2.vrmac.s0(i64, i64, i64)
+define i64 @M2_vrmac_s0(i64 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.M2.vrmac.s0(i64 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vrmpyh(r3:2, r5:4)
+
+; Vector multiply bytes
+declare i64 @llvm.hexagon.M5.vmpybsu(i32, i32)
+define i64 @M2_vmpybsu(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M5.vmpybsu(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpybsu(r0, r1)
+
+declare i64 @llvm.hexagon.M5.vmpybuu(i32, i32)
+define i64 @M2_vmpybuu(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M5.vmpybuu(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vmpybu(r0, r1)
+
+declare i64 @llvm.hexagon.M5.vmacbuu(i64, i32, i32)
+define i64 @M2_vmacbuu(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M5.vmacbuu(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpybu(r2, r3)
+
+declare i64 @llvm.hexagon.M5.vmacbsu(i64, i32, i32)
+define i64 @M2_vmacbsu(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M5.vmacbsu(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += vmpybsu(r2, r3)
+
+; Vector polynomial multiply halfwords
+declare i64 @llvm.hexagon.M4.vpmpyh(i32, i32)
+define i64 @M4_vpmpyh(i32 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.M4.vpmpyh(i32 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vpmpyh(r0, r1)
+
+declare i64 @llvm.hexagon.M4.vpmpyh.acc(i64, i32, i32)
+define i64 @M4_vpmpyh_acc(i64 %a, i32 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.M4.vpmpyh.acc(i64 %a, i32 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 ^= vpmpyh(r2, r3)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll
new file mode 100644
index 0000000..0b76132
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll
@@ -0,0 +1,252 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.6 XTYPE/PERM
+
+; Saturate
+declare i32 @llvm.hexagon.A2.sat(i64)
+define i32 @A2_sat(i64 %a) {
+ %z = call i32 @llvm.hexagon.A2.sat(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = sat(r1:0)
+
+declare i32 @llvm.hexagon.A2.sath(i32)
+define i32 @A2_sath(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.sath(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = sath(r0)
+
+declare i32 @llvm.hexagon.A2.satuh(i32)
+define i32 @A2_satuh(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.satuh(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = satuh(r0)
+
+declare i32 @llvm.hexagon.A2.satub(i32)
+define i32 @A2_satub(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.satub(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = satub(r0)
+
+declare i32 @llvm.hexagon.A2.satb(i32)
+define i32 @A2_satb(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.satb(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = satb(r0)
+
+; Swizzle bytes
+declare i32 @llvm.hexagon.A2.swiz(i32)
+define i32 @A2_swiz(i32 %a) {
+ %z = call i32 @llvm.hexagon.A2.swiz(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = swiz(r0)
+
+; Vector round and pack
+declare i32 @llvm.hexagon.S2.vrndpackwh(i64)
+define i32 @S2_vrndpackwh(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vrndpackwh(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vrndwh(r1:0)
+
+declare i32 @llvm.hexagon.S2.vrndpackwhs(i64)
+define i32 @S2_vrndpackwhs(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vrndpackwhs(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vrndwh(r1:0):sat
+
+; Vector saturate and pack
+declare i32 @llvm.hexagon.S2.vsathub(i64)
+define i32 @S2_vsathub(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vsathub(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsathub(r1:0)
+
+declare i32 @llvm.hexagon.S2.vsatwh(i64)
+define i32 @S2_vsatwh(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vsatwh(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsatwh(r1:0)
+
+declare i32 @llvm.hexagon.S2.vsatwuh(i64)
+define i32 @S2_vsatwuh(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vsatwuh(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsatwuh(r1:0)
+
+declare i32 @llvm.hexagon.S2.vsathb(i64)
+define i32 @S2_vsathb(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vsathb(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsathb(r1:0)
+
+declare i32 @llvm.hexagon.S2.svsathb(i32)
+define i32 @S2_svsathb(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.svsathb(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsathb(r0)
+
+declare i32 @llvm.hexagon.S2.svsathub(i32)
+define i32 @S2_svsathub(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.svsathub(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsathub(r0)
+
+; Vector saturate without pack
+declare i64 @llvm.hexagon.S2.vsathub.nopack(i64)
+define i64 @S2_vsathub_nopack(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsathub.nopack(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsathub(r1:0)
+
+declare i64 @llvm.hexagon.S2.vsatwuh.nopack(i64)
+define i64 @S2_vsatwuh_nopack(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsatwuh.nopack(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsatwuh(r1:0)
+
+declare i64 @llvm.hexagon.S2.vsatwh.nopack(i64)
+define i64 @S2_vsatwh_nopack(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsatwh.nopack(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsatwh(r1:0)
+
+declare i64 @llvm.hexagon.S2.vsathb.nopack(i64)
+define i64 @S2_vsathb_nopack(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsathb.nopack(i64 %a)
+ ret i64 %z
+}
+; CHECK: r1:0 = vsathb(r1:0)
+
+; Vector shuffle
+declare i64 @llvm.hexagon.S2.shuffeb(i64, i64)
+define i64 @S2_shuffeb(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.shuffeb(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = shuffeb(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.S2.shuffob(i64, i64)
+define i64 @S2_shuffob(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.shuffob(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = shuffob(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.S2.shuffeh(i64, i64)
+define i64 @S2_shuffeh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.shuffeh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = shuffeh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.S2.shuffoh(i64, i64)
+define i64 @S2_shuffoh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.shuffoh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = shuffoh(r1:0, r3:2)
+
+; Vector splat bytes
+declare i32 @llvm.hexagon.S2.vsplatrb(i32)
+define i32 @S2_vsplatrb(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.vsplatrb(i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vsplatb(r0)
+
+; Vector splat halfwords
+declare i64 @llvm.hexagon.S2.vsplatrh(i32)
+define i64 @S2_vsplatrh(i32 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsplatrh(i32 %a)
+ ret i64 %z
+}
+; CHECK: = vsplath(r0)
+
+; Vector splice
+declare i64 @llvm.hexagon.S2.vspliceib(i64, i64, i32)
+define i64 @S2_vspliceib(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.vspliceib(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vspliceb(r1:0, r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.vsplicerb(i64, i64, i32)
+define i64 @S2_vsplicerb(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.vsplicerb(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 = vspliceb(r1:0, r3:2, p0)
+
+; Vector sign extend
+declare i64 @llvm.hexagon.S2.vsxtbh(i32)
+define i64 @S2_vsxtbh(i32 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsxtbh(i32 %a)
+ ret i64 %z
+}
+; CHECK: = vsxtbh(r0)
+
+declare i64 @llvm.hexagon.S2.vsxthw(i32)
+define i64 @S2_vsxthw(i32 %a) {
+ %z = call i64 @llvm.hexagon.S2.vsxthw(i32 %a)
+ ret i64 %z
+}
+; CHECK: = vsxthw(r0)
+
+; Vector truncate
+declare i32 @llvm.hexagon.S2.vtrunohb(i64)
+define i32 @S2_vtrunohb(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vtrunohb(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vtrunohb(r1:0)
+
+declare i32 @llvm.hexagon.S2.vtrunehb(i64)
+define i32 @S2_vtrunehb(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.vtrunehb(i64 %a)
+ ret i32 %z
+}
+; CHECK: r0 = vtrunehb(r1:0)
+
+declare i64 @llvm.hexagon.S2.vtrunowh(i64, i64)
+define i64 @S2_vtrunowh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.vtrunowh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vtrunowh(r1:0, r3:2)
+
+declare i64 @llvm.hexagon.S2.vtrunewh(i64, i64)
+define i64 @S2_vtrunewh(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.vtrunewh(i64 %a, i64 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vtrunewh(r1:0, r3:2)
+
+; Vector zero extend
+declare i64 @llvm.hexagon.S2.vzxtbh(i32)
+define i64 @S2_vzxtbh(i32 %a) {
+ %z = call i64 @llvm.hexagon.S2.vzxtbh(i32 %a)
+ ret i64 %z
+}
+; CHECK: = vzxtbh(r0)
+
+declare i64 @llvm.hexagon.S2.vzxthw(i32)
+define i64 @S2_vzxthw(i32 %a) {
+ %z = call i64 @llvm.hexagon.S2.vzxthw(i32 %a)
+ ret i64 %z
+}
+; CHECK: = vzxthw(r0)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll
new file mode 100644
index 0000000..96e63d8
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll
@@ -0,0 +1,351 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.7 XTYPE/PRED
+
+; Compare byte
+declare i32 @llvm.hexagon.A4.cmpbgt(i32, i32)
+define i32 @A4_cmpbgt(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cmpbgt(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmpb.gt(r0, r1)
+
+declare i32 @llvm.hexagon.A4.cmpbeq(i32, i32)
+define i32 @A4_cmpbeq(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cmpbeq(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmpb.eq(r0, r1)
+
+declare i32 @llvm.hexagon.A4.cmpbgtu(i32, i32)
+define i32 @A4_cmpbgtu(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cmpbgtu(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmpb.gtu(r0, r1)
+
+declare i32 @llvm.hexagon.A4.cmpbgti(i32, i32)
+define i32 @A4_cmpbgti(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cmpbgti(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = cmpb.gt(r0, #0)
+
+declare i32 @llvm.hexagon.A4.cmpbeqi(i32, i32)
+define i32 @A4_cmpbeqi(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cmpbeqi(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = cmpb.eq(r0, #0)
+
+declare i32 @llvm.hexagon.A4.cmpbgtui(i32, i32)
+define i32 @A4_cmpbgtui(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cmpbgtui(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = cmpb.gtu(r0, #0)
+
+; Compare half
+declare i32 @llvm.hexagon.A4.cmphgt(i32, i32)
+define i32 @A4_cmphgt(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cmphgt(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmph.gt(r0, r1)
+
+declare i32 @llvm.hexagon.A4.cmpheq(i32, i32)
+define i32 @A4_cmpheq(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cmpheq(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmph.eq(r0, r1)
+
+declare i32 @llvm.hexagon.A4.cmphgtu(i32, i32)
+define i32 @A4_cmphgtu(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.cmphgtu(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmph.gtu(r0, r1)
+
+declare i32 @llvm.hexagon.A4.cmphgti(i32, i32)
+define i32 @A4_cmphgti(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cmphgti(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = cmph.gt(r0, #0)
+
+declare i32 @llvm.hexagon.A4.cmpheqi(i32, i32)
+define i32 @A4_cmpheqi(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cmpheqi(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = cmph.eq(r0, #0)
+
+declare i32 @llvm.hexagon.A4.cmphgtui(i32, i32)
+define i32 @A4_cmphgtui(i32 %a) {
+ %z = call i32 @llvm.hexagon.A4.cmphgtui(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = cmph.gtu(r0, #0)
+
+; Compare doublewords
+declare i32 @llvm.hexagon.C2.cmpgtp(i64, i64)
+define i32 @C2_cmpgtp(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.C2.cmpgtp(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmp.gt(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.C2.cmpeqp(i64, i64)
+define i32 @C2_cmpeqp(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.C2.cmpeqp(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmp.eq(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.C2.cmpgtup(i64, i64)
+define i32 @C2_cmpgtup(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.C2.cmpgtup(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = cmp.gtu(r1:0, r3:2)
+
+; Compare bitmask
+declare i32 @llvm.hexagon.C2.bitsclri(i32, i32)
+define i32 @C2_bitsclri(i32 %a) {
+ %z = call i32 @llvm.hexagon.C2.bitsclri(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = bitsclr(r0, #0)
+
+declare i32 @llvm.hexagon.C4.nbitsclri(i32, i32)
+define i32 @C4_nbitsclri(i32 %a) {
+ %z = call i32 @llvm.hexagon.C4.nbitsclri(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = !bitsclr(r0, #0)
+
+declare i32 @llvm.hexagon.C2.bitsset(i32, i32)
+define i32 @C2_bitsset(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C2.bitsset(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = bitsset(r0, r1)
+
+declare i32 @llvm.hexagon.C4.nbitsset(i32, i32)
+define i32 @C4_nbitsset(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C4.nbitsset(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = !bitsset(r0, r1)
+
+declare i32 @llvm.hexagon.C2.bitsclr(i32, i32)
+define i32 @C2_bitsclr(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C2.bitsclr(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = bitsclr(r0, r1)
+
+declare i32 @llvm.hexagon.C4.nbitsclr(i32, i32)
+define i32 @C4_nbitsclr(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C4.nbitsclr(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = !bitsclr(r0, r1)
+
+; Mask generate from predicate
+declare i64 @llvm.hexagon.C2.mask(i32)
+define i64 @C2_mask(i32 %a) {
+ %z = call i64 @llvm.hexagon.C2.mask(i32 %a)
+ ret i64 %z
+}
+; CHECK: = mask(p0)
+
+; Check for TLB match
+declare i32 @llvm.hexagon.A4.tlbmatch(i64, i32)
+define i32 @A4_tlbmatch(i64 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.A4.tlbmatch(i64 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = tlbmatch(r1:0, r2)
+
+; Test bit
+declare i32 @llvm.hexagon.S2.tstbit.i(i32, i32)
+define i32 @S2_tstbit_i(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = tstbit(r0, #0)
+
+declare i32 @llvm.hexagon.S4.ntstbit.i(i32, i32)
+define i32 @S4_ntstbit_i(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.ntstbit.i(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = !tstbit(r0, #0)
+
+declare i32 @llvm.hexagon.S2.tstbit.r(i32, i32)
+define i32 @S2_tstbit_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.tstbit.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = tstbit(r0, r1)
+
+declare i32 @llvm.hexagon.S4.ntstbit.r(i32, i32)
+define i32 @S4_ntstbit_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S4.ntstbit.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: p0 = !tstbit(r0, r1)
+
+; Vector compare halfwords
+declare i32 @llvm.hexagon.A2.vcmpheq(i64, i64)
+define i32 @A2_vcmpheq(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmpheq(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmph.eq(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A2.vcmphgt(i64, i64)
+define i32 @A2_vcmphgt(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmphgt(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmph.gt(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A2.vcmphgtu(i64, i64)
+define i32 @A2_vcmphgtu(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmphgtu(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmph.gtu(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A4.vcmpheqi(i64, i32)
+define i32 @A4_vcmpheqi(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpheqi(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmph.eq(r1:0, #0)
+
+declare i32 @llvm.hexagon.A4.vcmphgti(i64, i32)
+define i32 @A4_vcmphgti(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmphgti(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmph.gt(r1:0, #0)
+
+declare i32 @llvm.hexagon.A4.vcmphgtui(i64, i32)
+define i32 @A4_vcmphgtui(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmphgtui(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmph.gtu(r1:0, #0)
+
+; Vector compare bytes for any match
+declare i32 @llvm.hexagon.A4.vcmpbeq.any(i64, i64)
+define i32 @A4_vcmpbeq_any(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = any8(vcmpb.eq(r1:0, r3:2))
+
+; Vector compare bytes
+declare i32 @llvm.hexagon.A2.vcmpbeq(i64, i64)
+define i32 @A2_vcmpbeq(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmpbeq(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpb.eq(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A2.vcmpbgtu(i64, i64)
+define i32 @A2_vcmpbgtu(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmpbgtu(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpb.gtu(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A4.vcmpbgt(i64, i64)
+define i32 @A4_vcmpbgt(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A4.vcmpbgt(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpb.gt(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A4.vcmpbeqi(i64, i32)
+define i32 @A4_vcmpbeqi(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpbeqi(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpb.eq(r1:0, #0)
+
+declare i32 @llvm.hexagon.A4.vcmpbgti(i64, i32)
+define i32 @A4_vcmpbgti(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpbgti(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpb.gt(r1:0, #0)
+
+declare i32 @llvm.hexagon.A4.vcmpbgtui(i64, i32)
+define i32 @A4_vcmpbgtui(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpbgtui(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpb.gtu(r1:0, #0)
+
+; Vector compare words
+declare i32 @llvm.hexagon.A2.vcmpweq(i64, i64)
+define i32 @A2_vcmpweq(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmpweq(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpw.eq(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A2.vcmpwgt(i64, i64)
+define i32 @A2_vcmpwgt(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmpwgt(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpw.gt(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A2.vcmpwgtu(i64, i64)
+define i32 @A2_vcmpwgtu(i64 %a, i64 %b) {
+ %z = call i32 @llvm.hexagon.A2.vcmpwgtu(i64 %a, i64 %b)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpw.gtu(r1:0, r3:2)
+
+declare i32 @llvm.hexagon.A4.vcmpweqi(i64, i32)
+define i32 @A4_vcmpweqi(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpweqi(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpw.eq(r1:0, #0)
+
+declare i32 @llvm.hexagon.A4.vcmpwgti(i64, i32)
+define i32 @A4_vcmpwgti(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpwgti(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpw.gt(r1:0, #0)
+
+declare i32 @llvm.hexagon.A4.vcmpwgtui(i64, i32)
+define i32 @A4_vcmpwgtui(i64 %a) {
+ %z = call i32 @llvm.hexagon.A4.vcmpwgtui(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: p0 = vcmpw.gtu(r1:0, #0)
+
+; Viterbi pack even and odd predicate bitsclr
+declare i32 @llvm.hexagon.C2.vitpack(i32, i32)
+define i32 @C2_vitpack(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.C2.vitpack(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vitpack(p1, p0)
+
+; Vector mux
+declare i64 @llvm.hexagon.C2.vmux(i32, i64, i64)
+define i64 @C2_vmux(i32 %a, i64 %b, i64 %c) {
+ %z = call i64 @llvm.hexagon.C2.vmux(i32 %a, i64 %b, i64 %c)
+ ret i64 %z
+}
+; CHECK: = vmux(p0, r3:2, r5:4)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
new file mode 100644
index 0000000..c84999b
--- /dev/null
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
@@ -0,0 +1,723 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; Hexagon Programmer's Reference Manual 11.10.8 XTYPE/SHIFT
+
+; Shift by immediate
+declare i64 @llvm.hexagon.S2.asr.i.p(i64, i32)
+define i64 @S2_asr_i_p(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = asr(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32)
+define i64 @S2_lsr_i_p(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = lsr(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32)
+define i64 @S2_asl_i_p(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = asl(r1:0, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r(i32, i32)
+define i32 @S2_asr_i_r(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = asr(r0, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r(i32, i32)
+define i32 @S2_lsr_i_r(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = lsr(r0, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r(i32, i32)
+define i32 @S2_asl_i_r(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = asl(r0, #0)
+
+; Shift by immediate and accumulate
+declare i64 @llvm.hexagon.S2.asr.i.p.nac(i64, i64, i32)
+define i64 @S2_asr_i_p_nac(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.nac(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 -= asr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.nac(i64, i64, i32)
+define i64 @S2_lsr_i_p_nac(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.nac(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 -= lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.nac(i64, i64, i32)
+define i64 @S2_asl_i_p_nac(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.nac(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 -= asl(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asr.i.p.acc(i64, i64, i32)
+define i64 @S2_asr_i_p_acc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.acc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 += asr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.acc(i64, i64, i32)
+define i64 @S2_lsr_i_p_acc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.acc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 += lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.acc(i64, i64, i32)
+define i64 @S2_asl_i_p_acc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.acc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 += asl(r3:2, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r.nac(i32, i32, i32)
+define i32 @S2_asr_i_r_nac(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.nac(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 -= asr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.nac(i32, i32, i32)
+define i32 @S2_lsr_i_r_nac(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.nac(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 -= lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.nac(i32, i32, i32)
+define i32 @S2_asl_i_r_nac(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.nac(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 -= asl(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r.acc(i32, i32, i32)
+define i32 @S2_asr_i_r_acc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.acc(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 += asr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.acc(i32, i32, i32)
+define i32 @S2_lsr_i_r_acc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.acc(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 += lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.acc(i32, i32, i32)
+define i32 @S2_asl_i_r_acc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.acc(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 += asl(r1, #0)
+
+; Shift by immediate and add
+declare i32 @llvm.hexagon.S4.addi.asl.ri(i32, i32, i32)
+define i32 @S4_addi_asl_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.addi.asl.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(#0, asl(r0, #0))
+
+declare i32 @llvm.hexagon.S4.subi.asl.ri(i32, i32, i32)
+define i32 @S4_subi_asl_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.subi.asl.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = sub(#0, asl(r0, #0))
+
+declare i32 @llvm.hexagon.S4.addi.lsr.ri(i32, i32, i32)
+define i32 @S4_addi_lsr_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = add(#0, lsr(r0, #0))
+
+declare i32 @llvm.hexagon.S4.subi.lsr.ri(i32, i32, i32)
+define i32 @S4_subi_lsr_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = sub(#0, lsr(r0, #0))
+
+declare i32 @llvm.hexagon.S2.addasl.rrri(i32, i32, i32)
+define i32 @S2_addasl_rrri(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.addasl.rrri(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = addasl(r0, r1, #0)
+
+; Shift by immediate and logical
+declare i64 @llvm.hexagon.S2.asr.i.p.and(i64, i64, i32)
+define i64 @S2_asr_i_p_and(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.and(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 &= asr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.and(i64, i64, i32)
+define i64 @S2_lsr_i_p_and(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.and(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 &= lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.and(i64, i64, i32)
+define i64 @S2_asl_i_p_and(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.and(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 &= asl(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asr.i.p.or(i64, i64, i32)
+define i64 @S2_asr_i_p_or(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.or(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 |= asr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.or(i64, i64, i32)
+define i64 @S2_lsr_i_p_or(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.or(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 |= lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.or(i64, i64, i32)
+define i64 @S2_asl_i_p_or(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.or(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 |= asl(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64, i64, i32)
+define i64 @S2_lsr_i_p_xacc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 ^= lsr(r3:2, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.p.xacc(i64, i64, i32)
+define i64 @S2_asl_i_p_xacc(i64 %a, i64 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.p.xacc(i64 %a, i64 %b, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 ^= asl(r3:2, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r.and(i32, i32, i32)
+define i32 @S2_asr_i_r_and(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.and(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 &= asr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.and(i32, i32, i32)
+define i32 @S2_lsr_i_r_and(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.and(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 &= lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.and(i32, i32, i32)
+define i32 @S2_asl_i_r_and(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.and(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 &= asl(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asr.i.r.or(i32, i32, i32)
+define i32 @S2_asr_i_r_or(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.or(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 |= asr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.or(i32, i32, i32)
+define i32 @S2_lsr_i_r_or(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.or(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 |= lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.or(i32, i32, i32)
+define i32 @S2_asl_i_r_or(i32%a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.or(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 |= asl(r1, #0)
+
+declare i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32, i32, i32)
+define i32 @S2_lsr_i_r_xacc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32%a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 ^= lsr(r1, #0)
+
+declare i32 @llvm.hexagon.S2.asl.i.r.xacc(i32, i32, i32)
+define i32 @S2_asl_i_r_xacc(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.xacc(i32 %a, i32 %b, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 ^= asl(r1, #0)
+
+declare i32 @llvm.hexagon.S4.andi.asl.ri(i32, i32, i32)
+define i32 @S4_andi_asl_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.andi.asl.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = and(#0, asl(r0, #0))
+
+declare i32 @llvm.hexagon.S4.ori.asl.ri(i32, i32, i32)
+define i32 @S4_ori_asl_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.ori.asl.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = or(#0, asl(r0, #0))
+
+declare i32 @llvm.hexagon.S4.andi.lsr.ri(i32, i32, i32)
+define i32 @S4_andi_lsr_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = and(#0, lsr(r0, #0))
+
+declare i32 @llvm.hexagon.S4.ori.lsr.ri(i32, i32, i32)
+define i32 @S4_ori_lsr_ri(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 0, i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = or(#0, lsr(r0, #0))
+
+; Shift right by immediate with rounding
+declare i64 @llvm.hexagon.S2.asr.i.p.rnd(i64, i32)
+define i64 @S2_asr_i_p_rnd(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.p.rnd(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = asr(r1:0, #0):rnd
+
+declare i32 @llvm.hexagon.S2.asr.i.r.rnd(i32, i32)
+define i32 @S2_asr_i_r_rnd(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.r.rnd(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = asr(r0, #0):rnd
+
+; Shift left by immediate with saturation
+declare i32 @llvm.hexagon.S2.asl.i.r.sat(i32, i32)
+define i32 @S2_asl_i_r_sat(i32 %a) {
+ %z = call i32 @llvm.hexagon.S2.asl.i.r.sat(i32 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = asl(r0, #0):sat
+
+; Shift by register
+declare i64 @llvm.hexagon.S2.asr.r.p(i64, i32)
+define i64 @S2_asr_r_p(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = asr(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p(i64, i32)
+define i64 @S2_lsr_r_p(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = lsr(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32)
+define i64 @S2_asl_r_p(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = asl(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p(i64, i32)
+define i64 @S2_lsl_r_p(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = lsl(r1:0, r2)
+
+declare i32 @llvm.hexagon.S2.asr.r.r(i32, i32)
+define i32 @S2_asr_r_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = asr(r0, r1)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32)
+define i32 @S2_lsr_r_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = lsr(r0, r1)
+
+declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32)
+define i32 @S2_asl_r_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = asl(r0, r1)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r(i32, i32)
+define i32 @S2_lsl_r_r(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = lsl(r0, r1)
+
+declare i32 @llvm.hexagon.S4.lsli(i32, i32)
+define i32 @S4_lsli(i32 %a) {
+ %z = call i32 @llvm.hexagon.S4.lsli(i32 0, i32 %a)
+ ret i32 %z
+}
+; CHECK: r0 = lsl(#0, r0)
+
+; Shift by register and accumulate
+declare i64 @llvm.hexagon.S2.asr.r.p.nac(i64, i64, i32)
+define i64 @S2_asr_r_p_nac(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p.nac(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= asr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p.nac(i64, i64, i32)
+define i64 @S2_lsr_r_p_nac(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p.nac(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= lsr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asl.r.p.nac(i64, i64, i32)
+define i64 @S2_asl_r_p_nac(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p.nac(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= asl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p.nac(i64, i64, i32)
+define i64 @S2_lsl_r_p_nac(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p.nac(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 -= lsl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asr.r.p.acc(i64, i64, i32)
+define i64 @S2_asr_r_p_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p.acc(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += asr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p.acc(i64, i64, i32)
+define i64 @S2_lsr_r_p_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p.acc(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += lsr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asl.r.p.acc(i64, i64, i32)
+define i64 @S2_asl_r_p_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p.acc(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += asl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p.acc(i64, i64, i32)
+define i64 @S2_lsl_r_p_acc(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p.acc(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 += lsl(r3:2, r4)
+
+declare i32 @llvm.hexagon.S2.asr.r.r.nac(i32, i32, i32)
+define i32 @S2_asr_r_r_nac(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.nac(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= asr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r.nac(i32, i32, i32)
+define i32 @S2_lsr_r_r_nac(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r.nac(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= lsr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asl.r.r.nac(i32, i32, i32)
+define i32 @S2_asl_r_r_nac(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.nac(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= asl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r.nac(i32, i32, i32)
+define i32 @S2_lsl_r_r_nac(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r.nac(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 -= lsl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asr.r.r.acc(i32, i32, i32)
+define i32 @S2_asr_r_r_acc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.acc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += asr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r.acc(i32, i32, i32)
+define i32 @S2_lsr_r_r_acc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r.acc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += lsr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asl.r.r.acc(i32, i32, i32)
+define i32 @S2_asl_r_r_acc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.acc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += asl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r.acc(i32, i32, i32)
+define i32 @S2_lsl_r_r_acc(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r.acc(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 += lsl(r1, r2)
+
+; Shift by register and logical
+declare i64 @llvm.hexagon.S2.asr.r.p.or(i64, i64, i32)
+define i64 @S2_asr_r_p_or(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p.or(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 |= asr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p.or(i64, i64, i32)
+define i64 @S2_lsr_r_p_or(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p.or(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 |= lsr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asl.r.p.or(i64, i64, i32)
+define i64 @S2_asl_r_p_or(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p.or(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 |= asl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p.or(i64, i64, i32)
+define i64 @S2_lsl_r_p_or(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p.or(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 |= lsl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asr.r.p.and(i64, i64, i32)
+define i64 @S2_asr_r_p_and(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.p.and(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 &= asr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsr.r.p.and(i64, i64, i32)
+define i64 @S2_lsr_r_p_and(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.p.and(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 &= lsr(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.asl.r.p.and(i64, i64, i32)
+define i64 @S2_asl_r_p_and(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.p.and(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 &= asl(r3:2, r4)
+
+declare i64 @llvm.hexagon.S2.lsl.r.p.and(i64, i64, i32)
+define i64 @S2_lsl_r_p_and(i64 %a, i64 %b, i32 %c) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.p.and(i64 %a, i64 %b, i32 %c)
+ ret i64 %z
+}
+; CHECK: r1:0 &= lsl(r3:2, r4)
+
+declare i32 @llvm.hexagon.S2.asr.r.r.or(i32, i32, i32)
+define i32 @S2_asr_r_r_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= asr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r.or(i32, i32, i32)
+define i32 @S2_lsr_r_r_or(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= lsr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asl.r.r.or(i32, i32, i32)
+define i32 @S2_asl_r_r_or(i32%a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= asl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r.or(i32, i32, i32)
+define i32 @S2_lsl_r_r_or(i32%a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r.or(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 |= lsl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asr.r.r.and(i32, i32, i32)
+define i32 @S2_asr_r_r_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= asr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsr.r.r.and(i32, i32, i32)
+define i32 @S2_lsr_r_r_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsr.r.r.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= lsr(r1, r2)
+
+declare i32 @llvm.hexagon.S2.asl.r.r.and(i32, i32, i32)
+define i32 @S2_asl_r_r_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= asl(r1, r2)
+
+declare i32 @llvm.hexagon.S2.lsl.r.r.and(i32, i32, i32)
+define i32 @S2_lsl_r_r_and(i32 %a, i32 %b, i32 %c) {
+ %z = call i32 @llvm.hexagon.S2.lsl.r.r.and(i32 %a, i32 %b, i32 %c)
+ ret i32 %z
+}
+; CHECK: r0 &= lsl(r1, r2)
+
+; Shift by register with saturation
+declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32)
+define i32 @S2_asr_r_r_sat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = asr(r0, r1):sat
+
+declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32)
+define i32 @S2_asl_r_r_sat(i32 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = asl(r0, r1):sat
+
+; Vector shift halfwords by immediate
+declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32)
+define i64 @S2_asr_i_vh(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.vh(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vasrh(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.vh(i64, i32)
+define i64 @S2_lsr_i_vh(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vlsrh(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.vh(i64, i32)
+define i64 @S2_asl_i_vh(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.vh(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaslh(r1:0, #0)
+
+; Vector shift halfwords by register
+declare i64 @llvm.hexagon.S2.asr.r.vh(i64, i32)
+define i64 @S2_asr_r_vh(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.asr.r.vh(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vasrh(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.lsr.r.vh(i64, i32)
+define i64 @S2_lsr_r_vh(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsr.r.vh(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vlsrh(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.asl.r.vh(i64, i32)
+define i64 @S2_asl_r_vh(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.asl.r.vh(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaslh(r1:0, r2)
+
+declare i64 @llvm.hexagon.S2.lsl.r.vh(i64, i32)
+define i64 @S2_lsl_r_vh(i64 %a, i32 %b) {
+ %z = call i64 @llvm.hexagon.S2.lsl.r.vh(i64 %a, i32 %b)
+ ret i64 %z
+}
+; CHECK: r1:0 = vlslh(r1:0, r2)
+
+; Vector shift words by immediate
+declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32)
+define i64 @S2_asr_i_vw(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asr.i.vw(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vasrw(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.lsr.i.vw(i64, i32)
+define i64 @S2_lsr_i_vw(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vlsrw(r1:0, #0)
+
+declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32)
+define i64 @S2_asl_i_vw(i64 %a) {
+ %z = call i64 @llvm.hexagon.S2.asl.i.vw(i64 %a, i32 0)
+ ret i64 %z
+}
+; CHECK: r1:0 = vaslw(r1:0, #0)
+
+; Vector shift words by with truncate and pack
+declare i32 @llvm.hexagon.S2.asr.i.svw.trun(i64, i32)
+define i32 @S2_asr_i_svw_trun(i64 %a) {
+ %z = call i32 @llvm.hexagon.S2.asr.i.svw.trun(i64 %a, i32 0)
+ ret i32 %z
+}
+; CHECK: r0 = vasrw(r1:0, #0)
+
+declare i32 @llvm.hexagon.S2.asr.r.svw.trun(i64, i32)
+define i32 @S2_asr_r_svw_trun(i64 %a, i32 %b) {
+ %z = call i32 @llvm.hexagon.S2.asr.r.svw.trun(i64 %a, i32 %b)
+ ret i32 %z
+}
+; CHECK: r0 = vasrw(r1:0, r2)
diff --git a/test/CodeGen/Hexagon/newvaluestore.ll b/test/CodeGen/Hexagon/newvaluestore.ll
index 186e393..93cf347 100644
--- a/test/CodeGen/Hexagon/newvaluestore.ll
+++ b/test/CodeGen/Hexagon/newvaluestore.ll
@@ -7,7 +7,7 @@
define i32 @main() nounwind {
entry:
-; CHECK: memw(r{{[0-9]+}} + #{{[0-9]+}}) = r{{[0-9]+}}.new
+; CHECK: memw(r{{[0-9]+}}+#{{[0-9]+}}) = r{{[0-9]+}}.new
%number1 = alloca i32, align 4
%number2 = alloca i32, align 4
%number3 = alloca i32, align 4
diff --git a/test/CodeGen/Hexagon/pred-absolute-store.ll b/test/CodeGen/Hexagon/pred-absolute-store.ll
index b1b09f4..64635b1 100644
--- a/test/CodeGen/Hexagon/pred-absolute-store.ll
+++ b/test/CodeGen/Hexagon/pred-absolute-store.ll
@@ -2,7 +2,7 @@
; Check that we are able to predicate instructions with abosolute
; addressing mode.
-; CHECK: if{{ *}}(p{{[0-3]+}}){{ *}}memw(##gvar){{ *}}={{ *}}r{{[0-9]+}}
+; CHECK: if{{ *}}(p{{[0-3]+}}.new){{ *}}memw(##gvar){{ *}}={{ *}}r{{[0-9]+}}
@gvar = external global i32
define i32 @test2(i32 %a, i32 %b) nounwind {
diff --git a/test/CodeGen/Hexagon/struct_args_large.ll b/test/CodeGen/Hexagon/struct_args_large.ll
index f09fd10..db87d9e 100644
--- a/test/CodeGen/Hexagon/struct_args_large.ll
+++ b/test/CodeGen/Hexagon/struct_args_large.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
; CHECK: r[[T0:[0-9]+]] = CONST32(#s2)
-; CHECK: memw(r29 + #0) = r{{.}}
+; CHECK: memw(r29+#0) = r{{.}}
; CHECK: memw(r29+#8) = r{{.}}
%struct.large = type { i64, i64 }
diff --git a/test/CodeGen/Inputs/DbgValueOtherTargets.ll b/test/CodeGen/Inputs/DbgValueOtherTargets.ll
index 2d05b45..d21a4ee 100644
--- a/test/CodeGen/Inputs/DbgValueOtherTargets.ll
+++ b/test/CodeGen/Inputs/DbgValueOtherTargets.ll
@@ -3,7 +3,7 @@
define i32 @main() nounwind ssp {
entry:
; CHECK: DEBUG_VALUE
- call void @llvm.dbg.value(metadata !6, i64 0, metadata !7, metadata !{metadata !"0x102"}), !dbg !9
+ call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !7, metadata !{!"0x102"}), !dbg !9
ret i32 0, !dbg !10
}
@@ -14,17 +14,17 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!13}
-!0 = metadata !{metadata !"0x2e\00main\00main\00\002\000\001\000\006\000\000\000", metadata !12, metadata !1, metadata !3, null, i32 ()* @main, null, null, null} ; [ DW_TAG_subprogram ]
-!1 = metadata !{metadata !"0x29", metadata !12} ; [ DW_TAG_file_type ]
-!2 = metadata !{metadata !"0x11\0012\00clang version 2.9 (trunk 120996)\000\00\000\00\000", metadata !12, metadata !6, metadata !6, metadata !11, null, null} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !12, metadata !1, null, metadata !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5}
-!5 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !12, metadata !2} ; [ DW_TAG_base_type ]
-!6 = metadata !{i32 0}
-!7 = metadata !{metadata !"0x100\00i\003\000", metadata !8, metadata !1, metadata !5} ; [ DW_TAG_auto_variable ]
-!8 = metadata !{metadata !"0xb\002\0012\000", metadata !12, metadata !0} ; [ DW_TAG_lexical_block ]
-!9 = metadata !{i32 3, i32 11, metadata !8, null}
-!10 = metadata !{i32 4, i32 2, metadata !8, null}
-!11 = metadata !{metadata !0}
-!12 = metadata !{metadata !"/tmp/x.c", metadata !"/Users/manav"}
-!13 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x2e\00main\00main\00\002\000\001\000\006\000\000\000", !12, !1, !3, null, i32 ()* @main, null, null, null} ; [ DW_TAG_subprogram ]
+!1 = !{!"0x29", !12} ; [ DW_TAG_file_type ]
+!2 = !{!"0x11\0012\00clang version 2.9 (trunk 120996)\000\00\000\00\000", !12, !6, !6, !11, null, null} ; [ DW_TAG_compile_unit ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", !12, !1, null, !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !{!5}
+!5 = !{!"0x24\00int\000\0032\0032\000\000\005", !12, !2} ; [ DW_TAG_base_type ]
+!6 = !{i32 0}
+!7 = !{!"0x100\00i\003\000", !8, !1, !5} ; [ DW_TAG_auto_variable ]
+!8 = !{!"0xb\002\0012\000", !12, !0} ; [ DW_TAG_lexical_block ]
+!9 = !MDLocation(line: 3, column: 11, scope: !8)
+!10 = !MDLocation(line: 4, column: 2, scope: !8)
+!11 = !{!0}
+!12 = !{!"/tmp/x.c", !"/Users/manav"}
+!13 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/Mips/2008-08-01-AsmInline.ll b/test/CodeGen/Mips/2008-08-01-AsmInline.ll
index 3c1bb39..ae06ffe 100644
--- a/test/CodeGen/Mips/2008-08-01-AsmInline.ll
+++ b/test/CodeGen/Mips/2008-08-01-AsmInline.ll
@@ -1,5 +1,5 @@
; RUN: llc -march=mips -mcpu=mips32 < %s | FileCheck %s
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi=n64 < %s | FileCheck %s
%struct.DWstruct = type { i32, i32 }
diff --git a/test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll b/test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll
index c3791df..f736ddd 100644
--- a/test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll
+++ b/test/CodeGen/Mips/2009-11-16-CstPoolLoad.ll
@@ -1,9 +1,9 @@
; RUN: llc -march=mipsel -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-O32
; RUN: llc -march=mipsel -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-O32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n32 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n32 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi n32 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N32
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi n32 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N32
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
define float @h() nounwind readnone {
entry:
diff --git a/test/CodeGen/Mips/Fast-ISel/callabi.ll b/test/CodeGen/Mips/Fast-ISel/callabi.ll
index 44b94bb..e76d7a7 100644
--- a/test/CodeGen/Mips/Fast-ISel/callabi.ll
+++ b/test/CodeGen/Mips/Fast-ISel/callabi.ll
@@ -474,4 +474,4 @@ attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "n
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"clang version 3.6.0 (gitosis@dmz-portal.mips.com:clang 43992fe7b17de5553ac06d323cb80cc6723a9ae3) (gitosis@dmz-portal.mips.com:llvm.git 0834e6839eb170197c81bb02e916258d1527e312)"}
+!0 = !{!"clang version 3.6.0 (gitosis@dmz-portal.mips.com:clang 43992fe7b17de5553ac06d323cb80cc6723a9ae3) (gitosis@dmz-portal.mips.com:llvm.git 0834e6839eb170197c81bb02e916258d1527e312)"}
diff --git a/test/CodeGen/Mips/Fast-ISel/overflt.ll b/test/CodeGen/Mips/Fast-ISel/overflt.ll
new file mode 100644
index 0000000..94abd2d
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/overflt.ll
@@ -0,0 +1,64 @@
+; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \
+; RUN: < %s | FileCheck %s
+; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32 \
+; RUN: < %s | FileCheck %s
+
+@x = common global [128000 x float] zeroinitializer, align 4
+@y = global float* getelementptr inbounds ([128000 x float]* @x, i32 0, i32 0), align 4
+@result = common global float 0.000000e+00, align 4
+@.str = private unnamed_addr constant [5 x i8] c"%f \0A\00", align 1
+
+; Function Attrs: nounwind
+define void @foo() {
+entry:
+; CHECK-LABEL: .ent foo
+ %0 = load float** @y, align 4
+ %arrayidx = getelementptr inbounds float* %0, i32 64000
+ store float 5.500000e+00, float* %arrayidx, align 4
+; CHECK: lui $[[REG_FPCONST_INT:[0-9]+]], 16560
+; CHECK: mtc1 $[[REG_FPCONST_INT]], $f[[REG_FPCONST:[0-9]+]]
+; CHECK: lw $[[REG_Y_GOT:[0-9]+]], %got(y)(${{[0-9]+}})
+; CHECK: lw $[[REG_Y:[0-9]+]], 0($[[REG_Y_GOT]])
+; CHECK: lui $[[REG_IDX_UPPER:[0-9]+]], 3
+; CHECK: ori $[[REG_IDX:[0-9]+]], $[[REG_IDX_UPPER]], 59392
+; CHECK: addu $[[REG_Y_IDX:[0-9]+]], $[[REG_IDX]], $[[REG_Y]]
+; CHECK: swc1 $f[[REG_FPCONST]], 0($[[REG_Y_IDX]])
+ ret void
+; CHECK-LABEL: .end foo
+}
+
+; Function Attrs: nounwind
+define void @goo() {
+entry:
+; CHECK-LABEL: .ent goo
+ %0 = load float** @y, align 4
+ %arrayidx = getelementptr inbounds float* %0, i32 64000
+ %1 = load float* %arrayidx, align 4
+ store float %1, float* @result, align 4
+; CHECK-DAG: lw $[[REG_RESULT:[0-9]+]], %got(result)(${{[0-9]+}})
+; CHECK-DAG: lw $[[REG_Y_GOT:[0-9]+]], %got(y)(${{[0-9]+}})
+; CHECK-DAG: lw $[[REG_Y:[0-9]+]], 0($[[REG_Y_GOT]])
+; CHECK-DAG: lui $[[REG_IDX_UPPER:[0-9]+]], 3
+; CHECK-DAG: ori $[[REG_IDX:[0-9]+]], $[[REG_IDX_UPPER]], 59392
+; CHECK-DAG: addu $[[REG_Y_IDX:[0-9]+]], $[[REG_IDX]], $[[REG_Y]]
+; CHECK-DAG: lwc1 $f[[Y_IDX:[0-9]+]], 0($[[REG_Y_IDX]])
+; CHECK-DAG: swc1 $f[[Y_IDX]], 0($[[REG_RESULT]])
+; CHECK-LABEL: .end goo
+ ret void
+}
+
+;
+; Original C code for test.
+;
+;float x[128000];
+;float *y = x;
+;float result;
+
+
+;void foo() {
+; y[64000] = 5.5;
+;}
+
+;void goo() {
+; result = y[64000];
+;}
diff --git a/test/CodeGen/Mips/Fast-ISel/retabi.ll b/test/CodeGen/Mips/Fast-ISel/retabi.ll
new file mode 100644
index 0000000..d271aef
--- /dev/null
+++ b/test/CodeGen/Mips/Fast-ISel/retabi.ll
@@ -0,0 +1,80 @@
+; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \
+; RUN: < %s | FileCheck %s
+
+@i = global i32 75, align 4
+@s = global i16 -345, align 2
+@c = global i8 118, align 1
+@f = global float 0x40BE623360000000, align 4
+@d = global double 1.298330e+03, align 8
+
+; Function Attrs: nounwind
+define i32 @reti() {
+entry:
+; CHECK-LABEL: reti:
+ %0 = load i32* @i, align 4
+ ret i32 %0
+; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
+; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
+; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25
+; CHECK: lw $[[REG_I_ADDR:[0-9]+]], %got(i)($[[REG_GP]])
+; CHECK: lw $2, 0($[[REG_I_ADDR]])
+; CHECK: jr $ra
+}
+
+; Function Attrs: nounwind
+define signext i16 @rets() {
+entry:
+; CHECK-LABEL: rets:
+ %0 = load i16* @s, align 2
+ ret i16 %0
+; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
+; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
+; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25
+; CHECK: lw $[[REG_S_ADDR:[0-9]+]], %got(s)($[[REG_GP]])
+; CHECK: lhu $[[REG_S:[0-9]+]], 0($[[REG_S_ADDR]])
+; CHECK: seh $2, $[[REG_S]]
+; CHECK: jr $ra
+}
+
+; Function Attrs: nounwind
+define signext i8 @retc() {
+entry:
+; CHECK-LABEL: retc:
+ %0 = load i8* @c, align 1
+ ret i8 %0
+; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
+; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
+; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25
+; CHECK: lw $[[REG_C_ADDR:[0-9]+]], %got(c)($[[REG_GP]])
+; CHECK: lbu $[[REG_C:[0-9]+]], 0($[[REG_C_ADDR]])
+; CHECK: seb $2, $[[REG_C]]
+; CHECK: jr $ra
+}
+
+; Function Attrs: nounwind
+define float @retf() {
+entry:
+; CHECK-LABEL: retf:
+ %0 = load float* @f, align 4
+ ret float %0
+; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
+; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
+; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25
+; CHECK: lw $[[REG_F_ADDR:[0-9]+]], %got(f)($[[REG_GP]])
+; CHECK: lwc1 $f0, 0($[[REG_F_ADDR]])
+; CHECK: jr $ra
+}
+
+; Function Attrs: nounwind
+define double @retd() {
+entry:
+; CHECK-LABEL: retd:
+ %0 = load double* @d, align 8
+ ret double %0
+; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
+; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
+; CHECK: addu $[[REG_GP:[0-9]+]], $[[REG_GPb]], $25
+; CHECK: lw $[[REG_D_ADDR:[0-9]+]], %got(d)($[[REG_GP]])
+; CHECK: ldc1 $f0, 0($[[REG_D_ADDR]])
+; CHECK: jr $ra
+}
diff --git a/test/CodeGen/Mips/abiflags32.ll b/test/CodeGen/Mips/abiflags32.ll
index e32d4a5..39e2a90 100644
--- a/test/CodeGen/Mips/abiflags32.ll
+++ b/test/CodeGen/Mips/abiflags32.ll
@@ -1,6 +1,6 @@
; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 %s -o - | FileCheck %s
; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 -mattr=fp64 %s -o - | FileCheck -check-prefix=CHECK-64 %s
-; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips64 -mattr=-n64,n32 %s -o - | FileCheck -check-prefix=CHECK-64n %s
+; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips64 -target-abi n32 %s -o - | FileCheck -check-prefix=CHECK-64n %s
; CHECK: .nan legacy
; We don't emit '.module fp=32' for compatibility with binutils 2.24 which
diff --git a/test/CodeGen/Mips/atomic.ll b/test/CodeGen/Mips/atomic.ll
index 78fd829..ccfeb00 100644
--- a/test/CodeGen/Mips/atomic.ll
+++ b/test/CodeGen/Mips/atomic.ll
@@ -1,14 +1,15 @@
-; RUN: llc -march=mipsel --disable-machine-licm -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EL
-; RUN: llc -march=mipsel --disable-machine-licm -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL
-; RUN: llc -march=mipsel --disable-machine-licm -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL
-; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips4 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EL
-; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EL
-; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips64r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL
-; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips64r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL
+; RUN: llc -march=mipsel --disable-machine-licm -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EL -check-prefix=NOT-MICROMIPS
+; RUN: llc -march=mipsel --disable-machine-licm -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL -check-prefix=NOT-MICROMIPS
+; RUN: llc -march=mipsel --disable-machine-licm -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL -check-prefix=NOT-MICROMIPS
+; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips4 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EL -check-prefix=NOT-MICROMIPS
+; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EL -check-prefix=NOT-MICROMIPS
+; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips64r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL -check-prefix=NOT-MICROMIPS
+; RUN: llc -march=mips64el --disable-machine-licm -mcpu=mips64r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL -check-prefix=NOT-MICROMIPS
+; RUN: llc -march=mipsel --disable-machine-licm -mcpu=mips32r2 -mattr=micromips < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=HAS-SEB-SEH -check-prefix=CHECK-EL -check-prefix=MICROMIPS
; Keep one big-endian check so that we don't reduce testing, but don't add more
; since endianness doesn't affect the body of the atomic operations.
-; RUN: llc -march=mips --disable-machine-licm -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EB
+; RUN: llc -march=mips --disable-machine-licm -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32-ANY -check-prefix=NO-SEB-SEH -check-prefix=CHECK-EB -check-prefix=NOT-MICROMIPS
@x = common global i32 0, align 4
@@ -26,7 +27,8 @@ entry:
; ALL: ll $[[R1:[0-9]+]], 0($[[R0]])
; ALL: addu $[[R2:[0-9]+]], $[[R1]], $4
; ALL: sc $[[R2]], 0($[[R0]])
-; ALL: beqz $[[R2]], $[[BB0]]
+; NOT-MICROMIPS: beqz $[[R2]], $[[BB0]]
+; MICROMIPS: beqzc $[[R2]], $[[BB0]]
}
define i32 @AtomicLoadNand32(i32 signext %incr) nounwind {
@@ -44,7 +46,8 @@ entry:
; ALL: and $[[R3:[0-9]+]], $[[R1]], $4
; ALL: nor $[[R2:[0-9]+]], $zero, $[[R3]]
; ALL: sc $[[R2]], 0($[[R0]])
-; ALL: beqz $[[R2]], $[[BB0]]
+; NOT-MICROMIPS: beqz $[[R2]], $[[BB0]]
+; MICROMIPS: beqzc $[[R2]], $[[BB0]]
}
define i32 @AtomicSwap32(i32 signext %newval) nounwind {
@@ -63,7 +66,8 @@ entry:
; ALL: $[[BB0:[A-Z_0-9]+]]:
; ALL: ll ${{[0-9]+}}, 0($[[R0]])
; ALL: sc $[[R2:[0-9]+]], 0($[[R0]])
-; ALL: beqz $[[R2]], $[[BB0]]
+; NOT-MICROMIPS: beqz $[[R2]], $[[BB0]]
+; MICROMIPS: beqzc $[[R2]], $[[BB0]]
}
define i32 @AtomicCmpSwap32(i32 signext %oldval, i32 signext %newval) nounwind {
@@ -84,7 +88,8 @@ entry:
; ALL: ll $2, 0($[[R0]])
; ALL: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
; ALL: sc $[[R2:[0-9]+]], 0($[[R0]])
-; ALL: beqz $[[R2]], $[[BB0]]
+; NOT-MICROMIPS: beqz $[[R2]], $[[BB0]]
+; MICROMIPS: beqzc $[[R2]], $[[BB0]]
; ALL: $[[BB1]]:
}
@@ -120,7 +125,8 @@ entry:
; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
; ALL: sc $[[R14]], 0($[[R2]])
-; ALL: beqz $[[R14]], $[[BB0]]
+; NOT-MICROMIPS: beqz $[[R14]], $[[BB0]]
+; MICROMIPS: beqzc $[[R14]], $[[BB0]]
; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
@@ -159,7 +165,8 @@ entry:
; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
; ALL: sc $[[R14]], 0($[[R2]])
-; ALL: beqz $[[R14]], $[[BB0]]
+; NOT-MICROMIPS: beqz $[[R14]], $[[BB0]]
+; MICROMIPS: beqzc $[[R14]], $[[BB0]]
; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
@@ -199,7 +206,8 @@ entry:
; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
; ALL: sc $[[R14]], 0($[[R2]])
-; ALL: beqz $[[R14]], $[[BB0]]
+; NOT-MICROMIPS: beqz $[[R14]], $[[BB0]]
+; MICROMIPS: beqzc $[[R14]], $[[BB0]]
; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
@@ -237,7 +245,8 @@ entry:
; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R18]]
; ALL: sc $[[R14]], 0($[[R2]])
-; ALL: beqz $[[R14]], $[[BB0]]
+; NOT-MICROMIPS: beqz $[[R14]], $[[BB0]]
+; MICROMIPS: beqzc $[[R14]], $[[BB0]]
; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
@@ -282,7 +291,8 @@ entry:
; ALL: and $[[R15:[0-9]+]], $[[R13]], $[[R8]]
; ALL: or $[[R16:[0-9]+]], $[[R15]], $[[R12]]
; ALL: sc $[[R16]], 0($[[R2]])
-; ALL: beqz $[[R16]], $[[BB0]]
+; NOT-MICROMIPS: beqz $[[R16]], $[[BB0]]
+; MICROMIPS: beqzc $[[R16]], $[[BB0]]
; ALL: $[[BB1]]:
; ALL: srlv $[[R17:[0-9]+]], $[[R14]], $[[R5]]
@@ -322,7 +332,8 @@ entry:
; ALL: and $[[R15:[0-9]+]], $[[R13]], $[[R8]]
; ALL: or $[[R16:[0-9]+]], $[[R15]], $[[R12]]
; ALL: sc $[[R16]], 0($[[R2]])
-; ALL: beqz $[[R16]], $[[BB0]]
+; NOT-MICROMIPS: beqz $[[R16]], $[[BB0]]
+; MICROMIPS: beqzc $[[R16]], $[[BB0]]
; ALL: $[[BB1]]:
; ALL: srlv $[[R17:[0-9]+]], $[[R14]], $[[R5]]
@@ -367,7 +378,8 @@ entry:
; ALL: and $[[R13:[0-9]+]], $[[R10]], $[[R8]]
; ALL: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
; ALL: sc $[[R14]], 0($[[R2]])
-; ALL: beqz $[[R14]], $[[BB0]]
+; NOT-MICROMIPS: beqz $[[R14]], $[[BB0]]
+; MICROMIPS: beqzc $[[R14]], $[[BB0]]
; ALL: and $[[R15:[0-9]+]], $[[R10]], $[[R7]]
; ALL: srlv $[[R16:[0-9]+]], $[[R15]], $[[R5]]
@@ -430,5 +442,6 @@ entry:
; ALL: ll $[[R1:[0-9]+]], 0($[[PTR]])
; ALL: addu $[[R2:[0-9]+]], $[[R1]], $4
; ALL: sc $[[R2]], 0($[[PTR]])
-; ALL: beqz $[[R2]], $[[BB0]]
+; NOT-MICROMIPS: beqz $[[R2]], $[[BB0]]
+; MICROMIPS: beqzc $[[R2]], $[[BB0]]
}
diff --git a/test/CodeGen/Mips/blockaddr.ll b/test/CodeGen/Mips/blockaddr.ll
index d6dc7e7..f743637 100644
--- a/test/CodeGen/Mips/blockaddr.ll
+++ b/test/CodeGen/Mips/blockaddr.ll
@@ -1,9 +1,9 @@
; RUN: llc -march=mipsel -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-O32
; RUN: llc -march=mipsel -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-O32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n32 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n32 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi n32 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N32
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi n32 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N32
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-MIPS16-1
; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-MIPS16-2
diff --git a/test/CodeGen/Mips/brsize3.ll b/test/CodeGen/Mips/brsize3.ll
index 7b1f440..3620868 100644
--- a/test/CodeGen/Mips/brsize3.ll
+++ b/test/CodeGen/Mips/brsize3.ll
@@ -30,4 +30,4 @@ x: ; preds = %x, %entry
attributes #0 = { noreturn nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
attributes #1 = { nounwind }
-!1 = metadata !{i32 45}
+!1 = !{i32 45}
diff --git a/test/CodeGen/Mips/brsize3a.ll b/test/CodeGen/Mips/brsize3a.ll
index 6382fa2..f05e211 100644
--- a/test/CodeGen/Mips/brsize3a.ll
+++ b/test/CodeGen/Mips/brsize3a.ll
@@ -23,4 +23,4 @@ x: ; preds = %x, %entry
attributes #0 = { noreturn nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
attributes #1 = { nounwind }
-!1 = metadata !{i32 45}
+!1 = !{i32 45}
diff --git a/test/CodeGen/Mips/cconv/arguments-float.ll b/test/CodeGen/Mips/cconv/arguments-float.ll
index 14a3baa..ee40d7f 100644
--- a/test/CodeGen/Mips/cconv/arguments-float.ll
+++ b/test/CodeGen/Mips/cconv/arguments-float.ll
@@ -1,14 +1,14 @@
; RUN: llc -march=mips -relocation-model=static -soft-float < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32BE %s
; RUN: llc -march=mipsel -relocation-model=static -soft-float < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32LE %s
-; RUN-TODO: llc -march=mips64 -relocation-model=static -soft-float -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64el -relocation-model=static -soft-float -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64 -relocation-model=static -soft-float -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -relocation-model=static -soft-float -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
-; RUN: llc -march=mips64 -relocation-model=static -soft-float -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
-; RUN: llc -march=mips64el -relocation-model=static -soft-float -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
+; RUN: llc -march=mips64 -relocation-model=static -soft-float -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -soft-float -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
-; RUN: llc -march=mips64 -relocation-model=static -soft-float -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
-; RUN: llc -march=mips64el -relocation-model=static -soft-float -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
+; RUN: llc -march=mips64 -relocation-model=static -soft-float -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -soft-float -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
; Test the floating point arguments for all ABI's and byte orders as specified
; by section 5 of MD00305 (MIPS ABIs Described).
diff --git a/test/CodeGen/Mips/cconv/arguments-fp128.ll b/test/CodeGen/Mips/cconv/arguments-fp128.ll
index c8cd8fd..1666974 100644
--- a/test/CodeGen/Mips/cconv/arguments-fp128.ll
+++ b/test/CodeGen/Mips/cconv/arguments-fp128.ll
@@ -1,8 +1,8 @@
-; RUN: llc -march=mips64 -relocation-model=static -soft-float -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
-; RUN: llc -march=mips64el -relocation-model=static -soft-float -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
+; RUN: llc -march=mips64 -relocation-model=static -soft-float -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
+; RUN: llc -march=mips64el -relocation-model=static -soft-float -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
-; RUN: llc -march=mips64 -relocation-model=static -soft-float -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
-; RUN: llc -march=mips64el -relocation-model=static -soft-float -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
+; RUN: llc -march=mips64 -relocation-model=static -soft-float -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
+; RUN: llc -march=mips64el -relocation-model=static -soft-float -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
; Test the fp128 arguments for all ABI's and byte orders as specified
; by section 2 of the MIPSpro N32 Handbook.
diff --git a/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll b/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll
index 70ccf14..380bd5c 100644
--- a/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll
+++ b/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll
@@ -1,14 +1,14 @@
; RUN: llc -march=mips -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32BE %s
; RUN: llc -march=mipsel -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32LE %s
-; RUN-TODO: llc -march=mips64 -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64el -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64 -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
-; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=N32 --check-prefix=NEW --check-prefix=NEWBE %s
-; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=N32 --check-prefix=NEW --check-prefix=NEWLE %s
+; RUN: llc -march=mips64 -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=N32 --check-prefix=NEW --check-prefix=NEWBE %s
+; RUN: llc -march=mips64el -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=N32 --check-prefix=NEW --check-prefix=NEWLE %s
-; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=N64 --check-prefix=NEW --check-prefix=NEWBE %s
-; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=N64 --check-prefix=NEW --check-prefix=NEWLE %s
+; RUN: llc -march=mips64 -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=N64 --check-prefix=NEW --check-prefix=NEWBE %s
+; RUN: llc -march=mips64el -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=N64 --check-prefix=NEW --check-prefix=NEWLE %s
; Test the effect of varargs on floating point types in the non-variable part
; of the argument list as specified by section 2 of the MIPSpro N32 Handbook.
diff --git a/test/CodeGen/Mips/cconv/arguments-hard-float.ll b/test/CodeGen/Mips/cconv/arguments-hard-float.ll
index 9837f7e..3221e23 100644
--- a/test/CodeGen/Mips/cconv/arguments-hard-float.ll
+++ b/test/CodeGen/Mips/cconv/arguments-hard-float.ll
@@ -1,14 +1,14 @@
; RUN: llc -march=mips -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32BE %s
; RUN: llc -march=mipsel -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 --check-prefix=O32LE %s
-; RUN-TODO: llc -march=mips64 -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64el -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64 -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
-; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
-; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
+; RUN: llc -march=mips64 -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
-; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
-; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
+; RUN: llc -march=mips64 -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
; Test the floating point arguments for all ABI's and byte orders as specified
; by section 5 of MD00305 (MIPS ABIs Described).
diff --git a/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll b/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll
index 5e3f403..583759a 100644
--- a/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll
+++ b/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll
@@ -1,8 +1,8 @@
-; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
-; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
+; RUN: llc -march=mips64 -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
+; RUN: llc -march=mips64el -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 %s
-; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
-; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
+; RUN: llc -march=mips64 -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
+; RUN: llc -march=mips64el -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 %s
; Test the fp128 arguments for all ABI's and byte orders as specified
; by section 2 of the MIPSpro N32 Handbook.
diff --git a/test/CodeGen/Mips/cconv/arguments-struct.ll b/test/CodeGen/Mips/cconv/arguments-struct.ll
new file mode 100644
index 0000000..7ff894f
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/arguments-struct.ll
@@ -0,0 +1,41 @@
+; RUN: llc -mtriple=mips-unknown-linux-gnu -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32-BE %s
+; RUN: llc -mtriple=mipsel-unknown-linux-gnu -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32-LE %s
+
+; RUN-TODO: llc -mtriple=mips64-unknown-linux-gnu -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32-BE %s
+; RUN-TODO: llc -mtriple=mips64el-unknown-linux-gnu -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32-LE %s
+
+; RUN: llc -mtriple=mips64-unknown-linux-gnu -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW-BE %s
+; RUN: llc -mtriple=mips64el-unknown-linux-gnu -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW-LE %s
+
+; RUN: llc -mtriple=mips64-unknown-linux-gnu -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW-BE %s
+; RUN: llc -mtriple=mips64el-unknown-linux-gnu -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW-LE %s
+
+; Test small structures for all ABI's and byte orders.
+;
+; N32/N64 are identical in this area so their checks have been combined into
+; the 'NEW' prefix (the N stands for New).
+
+@bytes = global [2 x i8] zeroinitializer
+
+define void @s_i8(i8 inreg %a) nounwind {
+entry:
+ store i8 %a, i8* getelementptr inbounds ([2 x i8]* @bytes, i32 0, i32 1)
+ ret void
+}
+
+; ALL-LABEL: s_i8:
+
+; SYM32-DAG: lui [[PTR_HI:\$[0-9]+]], %hi(bytes)
+; SYM32-DAG: addiu [[PTR:\$[0-9]+]], [[PTR_HI]], %lo(bytes)
+
+; SYM64-DAG: ld [[PTR:\$[0-9]+]], %got_disp(bytes)(
+
+; O32-BE-DAG: srl [[ARG:\$[0-9]+]], $4, 24
+; O32-BE-DAG: sb [[ARG]], 1([[PTR]])
+
+; O32-LE-DAG: sb $4, 1([[PTR]])
+
+; NEW-BE-DAG: dsrl [[ARG:\$[0-9]+]], $4, 56
+; NEW-BE-DAG: sb [[ARG]], 1([[PTR]])
+
+; NEW-LE-DAG: sb $4, 1([[PTR]])
diff --git a/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll
new file mode 100644
index 0000000..458b124
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll
@@ -0,0 +1,282 @@
+; RUN: llc --march=mips64 -mcpu=mips64r2 < %s | FileCheck %s
+
+; Generated from the C program:
+;
+; #include <stdio.h>
+; #include <string.h>
+;
+; struct SmallStruct_1b {
+; char x1;
+; };
+;
+; struct SmallStruct_2b {
+; char x1;
+; char x2;
+; };
+;
+; struct SmallStruct_3b {
+; char x1;
+; char x2;
+; char x3;
+; };
+;
+; struct SmallStruct_4b {
+; char x1;
+; char x2;
+; char x3;
+; char x4;
+; };
+;
+; struct SmallStruct_5b {
+; char x1;
+; char x2;
+; char x3;
+; char x4;
+; char x5;
+; };
+;
+; struct SmallStruct_6b {
+; char x1;
+; char x2;
+; char x3;
+; char x4;
+; char x5;
+; char x6;
+; };
+;
+; struct SmallStruct_7b {
+; char x1;
+; char x2;
+; char x3;
+; char x4;
+; char x5;
+; char x6;
+; char x7;
+; };
+;
+; struct SmallStruct_8b {
+; char x1;
+; char x2;
+; char x3;
+; char x4;
+; char x5;
+; char x6;
+; char x7;
+; char x8;
+; };
+;
+; struct SmallStruct_9b {
+; char x1;
+; char x2;
+; char x3;
+; char x4;
+; char x5;
+; char x6;
+; char x7;
+; char x8;
+; char x9;
+; };
+;
+; void varArgF_SmallStruct(char* c, ...);
+;
+; void smallStruct_1b(struct SmallStruct_1b* ss) {
+; varArgF_SmallStruct("", *ss);
+; }
+;
+; void smallStruct_2b(struct SmallStruct_2b* ss) {
+; varArgF_SmallStruct("", *ss);
+; }
+;
+; void smallStruct_3b(struct SmallStruct_3b* ss)
+; {
+; varArgF_SmallStruct("", *ss);
+; }
+;
+; void smallStruct_4b(struct SmallStruct_4b* ss)
+; {
+; varArgF_SmallStruct("", *ss);
+; }
+;
+; void smallStruct_5b(struct SmallStruct_5b* ss)
+; {
+; varArgF_SmallStruct("", *ss);
+; }
+;
+; void smallStruct_6b(struct SmallStruct_6b* ss)
+; {
+; varArgF_SmallStruct("", *ss);
+; }
+;
+; void smallStruct_7b(struct SmallStruct_7b* ss)
+; {
+; varArgF_SmallStruct("", *ss);
+; }
+;
+; void smallStruct_8b(struct SmallStruct_8b* ss)
+; {
+; varArgF_SmallStruct("", *ss);
+; }
+;
+; void smallStruct_9b(struct SmallStruct_9b* ss)
+; {
+; varArgF_SmallStruct("", *ss);
+; }
+
+%struct.SmallStruct_1b = type { i8 }
+%struct.SmallStruct_2b = type { i8, i8 }
+%struct.SmallStruct_3b = type { i8, i8, i8 }
+%struct.SmallStruct_4b = type { i8, i8, i8, i8 }
+%struct.SmallStruct_5b = type { i8, i8, i8, i8, i8 }
+%struct.SmallStruct_6b = type { i8, i8, i8, i8, i8, i8 }
+%struct.SmallStruct_7b = type { i8, i8, i8, i8, i8, i8, i8 }
+%struct.SmallStruct_8b = type { i8, i8, i8, i8, i8, i8, i8, i8 }
+%struct.SmallStruct_9b = type { i8, i8, i8, i8, i8, i8, i8, i8, i8 }
+
+@.str = private unnamed_addr constant [3 x i8] c"01\00", align 1
+
+declare void @varArgF_SmallStruct(i8* %c, ...)
+
+define void @smallStruct_1b(%struct.SmallStruct_1b* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_1b*, align 8
+ store %struct.SmallStruct_1b* %ss, %struct.SmallStruct_1b** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_1b** %ss.addr, align 8
+ %1 = bitcast %struct.SmallStruct_1b* %0 to { i8 }*
+ %2 = getelementptr { i8 }* %1, i32 0, i32 0
+ %3 = load i8* %2, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i8 inreg %3)
+ ret void
+ ; CHECK-LABEL: smallStruct_1b:
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
+}
+
+define void @smallStruct_2b(%struct.SmallStruct_2b* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_2b*, align 8
+ store %struct.SmallStruct_2b* %ss, %struct.SmallStruct_2b** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_2b** %ss.addr, align 8
+ %1 = bitcast %struct.SmallStruct_2b* %0 to { i16 }*
+ %2 = getelementptr { i16 }* %1, i32 0, i32 0
+ %3 = load i16* %2, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i16 inreg %3)
+ ret void
+ ; CHECK-LABEL: smallStruct_2b:
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 48
+}
+
+define void @smallStruct_3b(%struct.SmallStruct_3b* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_3b*, align 8
+ %.coerce = alloca { i24 }
+ store %struct.SmallStruct_3b* %ss, %struct.SmallStruct_3b** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_3b** %ss.addr, align 8
+ %1 = bitcast { i24 }* %.coerce to i8*
+ %2 = bitcast %struct.SmallStruct_3b* %0 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 3, i32 0, i1 false)
+ %3 = getelementptr { i24 }* %.coerce, i32 0, i32 0
+ %4 = load i24* %3, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i24 inreg %4)
+ ret void
+ ; CHECK-LABEL: smallStruct_3b:
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 40
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
+
+define void @smallStruct_4b(%struct.SmallStruct_4b* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_4b*, align 8
+ store %struct.SmallStruct_4b* %ss, %struct.SmallStruct_4b** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_4b** %ss.addr, align 8
+ %1 = bitcast %struct.SmallStruct_4b* %0 to { i32 }*
+ %2 = getelementptr { i32 }* %1, i32 0, i32 0
+ %3 = load i32* %2, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 inreg %3)
+ ret void
+ ; CHECK-LABEL: smallStruct_4b:
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 32
+}
+
+define void @smallStruct_5b(%struct.SmallStruct_5b* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_5b*, align 8
+ %.coerce = alloca { i40 }
+ store %struct.SmallStruct_5b* %ss, %struct.SmallStruct_5b** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_5b** %ss.addr, align 8
+ %1 = bitcast { i40 }* %.coerce to i8*
+ %2 = bitcast %struct.SmallStruct_5b* %0 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 5, i32 0, i1 false)
+ %3 = getelementptr { i40 }* %.coerce, i32 0, i32 0
+ %4 = load i40* %3, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i40 inreg %4)
+ ret void
+ ; CHECK-LABEL: smallStruct_5b:
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 24
+}
+
+define void @smallStruct_6b(%struct.SmallStruct_6b* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_6b*, align 8
+ %.coerce = alloca { i48 }
+ store %struct.SmallStruct_6b* %ss, %struct.SmallStruct_6b** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_6b** %ss.addr, align 8
+ %1 = bitcast { i48 }* %.coerce to i8*
+ %2 = bitcast %struct.SmallStruct_6b* %0 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false)
+ %3 = getelementptr { i48 }* %.coerce, i32 0, i32 0
+ %4 = load i48* %3, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
+ ret void
+ ; CHECK-LABEL: smallStruct_6b:
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16
+}
+
+define void @smallStruct_7b(%struct.SmallStruct_7b* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_7b*, align 8
+ %.coerce = alloca { i56 }
+ store %struct.SmallStruct_7b* %ss, %struct.SmallStruct_7b** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_7b** %ss.addr, align 8
+ %1 = bitcast { i56 }* %.coerce to i8*
+ %2 = bitcast %struct.SmallStruct_7b* %0 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 7, i32 0, i1 false)
+ %3 = getelementptr { i56 }* %.coerce, i32 0, i32 0
+ %4 = load i56* %3, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i56 inreg %4)
+ ret void
+ ; CHECK-LABEL: smallStruct_7b:
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 8
+}
+
+define void @smallStruct_8b(%struct.SmallStruct_8b* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_8b*, align 8
+ store %struct.SmallStruct_8b* %ss, %struct.SmallStruct_8b** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_8b** %ss.addr, align 8
+ %1 = bitcast %struct.SmallStruct_8b* %0 to { i64 }*
+ %2 = getelementptr { i64 }* %1, i32 0, i32 0
+ %3 = load i64* %2, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
+ ret void
+ ; CHECK-LABEL: smallStruct_8b:
+ ; CHECK-NOT: dsll
+}
+
+define void @smallStruct_9b(%struct.SmallStruct_9b* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_9b*, align 8
+ %.coerce = alloca { i64, i8 }
+ store %struct.SmallStruct_9b* %ss, %struct.SmallStruct_9b** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_9b** %ss.addr, align 8
+ %1 = bitcast { i64, i8 }* %.coerce to i8*
+ %2 = bitcast %struct.SmallStruct_9b* %0 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 9, i32 0, i1 false)
+ %3 = getelementptr { i64, i8 }* %.coerce, i32 0, i32 0
+ %4 = load i64* %3, align 1
+ %5 = getelementptr { i64, i8 }* %.coerce, i32 0, i32 1
+ %6 = load i8* %5, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %4, i8 inreg %6)
+ ret void
+ ; CHECK-LABEL: smallStruct_9b:
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
+}
diff --git a/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll
new file mode 100644
index 0000000..899a3e8
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll
@@ -0,0 +1,149 @@
+; RUN: llc --march=mips64 -mcpu=mips64r2 < %s | FileCheck %s
+
+; Generated from the C program:
+;
+; #include <stdio.h>
+; #include <string.h>
+;
+; struct SmallStruct_1b1s {
+; char x1;
+; short x2;
+; };
+;
+; struct SmallStruct_1b1i {
+; char x1;
+; int x2;
+; };
+;
+; struct SmallStruct_1b1s1b {
+; char x1;
+; short x2;
+; char x3;
+; };
+;
+; struct SmallStruct_1s1i {
+; short x1;
+; int x2;
+; };
+;
+; struct SmallStruct_3b1s {
+; char x1;
+; char x2;
+; char x3;
+; short x4;
+; };
+;
+; void varArgF_SmallStruct(char* c, ...);
+;
+; void smallStruct_1b1s(struct SmallStruct_1b1s* ss)
+; {
+; varArgF_SmallStruct("", *ss);
+; }
+;
+; void smallStruct_1b1i(struct SmallStruct_1b1i* ss)
+; {
+; varArgF_SmallStruct("", *ss);
+; }
+;
+; void smallStruct_1b1s1b(struct SmallStruct_1b1s1b* ss)
+; {
+; varArgF_SmallStruct("", *ss);
+; }
+;
+; void smallStruct_1s1i(struct SmallStruct_1s1i* ss)
+; {
+; varArgF_SmallStruct("", *ss);
+; }
+;
+; void smallStruct_3b1s(struct SmallStruct_3b1s* ss)
+; {
+; varArgF_SmallStruct("", *ss);
+; }
+
+%struct.SmallStruct_1b1s = type { i8, i16 }
+%struct.SmallStruct_1b1i = type { i8, i32 }
+%struct.SmallStruct_1b1s1b = type { i8, i16, i8 }
+%struct.SmallStruct_1s1i = type { i16, i32 }
+%struct.SmallStruct_3b1s = type { i8, i8, i8, i16 }
+
+@.str = private unnamed_addr constant [3 x i8] c"01\00", align 1
+
+declare void @varArgF_SmallStruct(i8* %c, ...)
+
+define void @smallStruct_1b1s(%struct.SmallStruct_1b1s* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_1b1s*, align 8
+ store %struct.SmallStruct_1b1s* %ss, %struct.SmallStruct_1b1s** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_1b1s** %ss.addr, align 8
+ %1 = bitcast %struct.SmallStruct_1b1s* %0 to { i32 }*
+ %2 = getelementptr { i32 }* %1, i32 0, i32 0
+ %3 = load i32* %2, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 inreg %3)
+ ret void
+ ; CHECK-LABEL: smallStruct_1b1s:
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 32
+}
+
+define void @smallStruct_1b1i(%struct.SmallStruct_1b1i* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_1b1i*, align 8
+ store %struct.SmallStruct_1b1i* %ss, %struct.SmallStruct_1b1i** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_1b1i** %ss.addr, align 8
+ %1 = bitcast %struct.SmallStruct_1b1i* %0 to { i64 }*
+ %2 = getelementptr { i64 }* %1, i32 0, i32 0
+ %3 = load i64* %2, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
+ ret void
+ ; CHECK-LABEL: smallStruct_1b1i:
+ ; CHECK-NOT: dsll
+}
+
+define void @smallStruct_1b1s1b(%struct.SmallStruct_1b1s1b* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_1b1s1b*, align 8
+ %.coerce = alloca { i48 }
+ store %struct.SmallStruct_1b1s1b* %ss, %struct.SmallStruct_1b1s1b** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_1b1s1b** %ss.addr, align 8
+ %1 = bitcast { i48 }* %.coerce to i8*
+ %2 = bitcast %struct.SmallStruct_1b1s1b* %0 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false)
+ %3 = getelementptr { i48 }* %.coerce, i32 0, i32 0
+ %4 = load i48* %3, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
+ ret void
+ ; CHECK-LABEL: smallStruct_1b1s1b:
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
+
+define void @smallStruct_1s1i(%struct.SmallStruct_1s1i* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_1s1i*, align 8
+ store %struct.SmallStruct_1s1i* %ss, %struct.SmallStruct_1s1i** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_1s1i** %ss.addr, align 8
+ %1 = bitcast %struct.SmallStruct_1s1i* %0 to { i64 }*
+ %2 = getelementptr { i64 }* %1, i32 0, i32 0
+ %3 = load i64* %2, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
+ ret void
+ ; CHECK-LABEL: smallStruct_1s1i:
+ ; CHECK-NOT: dsll
+}
+
+define void @smallStruct_3b1s(%struct.SmallStruct_3b1s* %ss) #0 {
+entry:
+ %ss.addr = alloca %struct.SmallStruct_3b1s*, align 8
+ %.coerce = alloca { i48 }
+ store %struct.SmallStruct_3b1s* %ss, %struct.SmallStruct_3b1s** %ss.addr, align 8
+ %0 = load %struct.SmallStruct_3b1s** %ss.addr, align 8
+ %1 = bitcast { i48 }* %.coerce to i8*
+ %2 = bitcast %struct.SmallStruct_3b1s* %0 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false)
+ %3 = getelementptr { i48 }* %.coerce, i32 0, i32 0
+ %4 = load i48* %3, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
+ ret void
+ ; CHECK-LABEL: smallStruct_3b1s:
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16
+}
diff --git a/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll
new file mode 100644
index 0000000..1f73625
--- /dev/null
+++ b/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll
@@ -0,0 +1,161 @@
+; RUN: llc --march=mips64 -mcpu=mips64r2 < %s | FileCheck %s
+
+; Generated from the C program:
+;
+; #include <stdio.h>
+; #include <string.h>
+;
+; struct SmallStruct_1b {
+; char x1;
+; };
+;
+; struct SmallStruct_2b {
+; char x1;
+; char x2;
+; };
+;
+; struct SmallStruct_3b {
+; char x1;
+; char x2;
+; char x3;
+; };
+;
+; struct SmallStruct_4b {
+; char x1;
+; char x2;
+; char x3;
+; char x4;
+; };
+;
+; struct SmallStruct_5b {
+; char x1;
+; char x2;
+; char x3;
+; char x4;
+; char x5;
+; };
+;
+; struct SmallStruct_6b {
+; char x1;
+; char x2;
+; char x3;
+; char x4;
+; char x5;
+; char x6;
+; };
+;
+; struct SmallStruct_7b {
+; char x1;
+; char x2;
+; char x3;
+; char x4;
+; char x5;
+; char x6;
+; char x7;
+; };
+;
+; struct SmallStruct_8b {
+; char x1;
+; char x2;
+; char x3;
+; char x4;
+; char x5;
+; char x6;
+; char x7;
+; char x8;
+; };
+;
+; struct SmallStruct_9b {
+; char x1;
+; char x2;
+; char x3;
+; char x4;
+; char x5;
+; char x6;
+; char x7;
+; char x8;
+; char x9;
+; };
+;
+; void varArgF_SmallStruct(char* c, ...);
+;
+; void smallStruct_1b_x9(struct SmallStruct_1b* ss1, struct SmallStruct_1b* ss2, struct SmallStruct_1b* ss3, struct SmallStruct_1b* ss4, struct SmallStruct_1b* ss5, struct SmallStruct_1b* ss6, struct SmallStruct_1b* ss7, struct SmallStruct_1b* ss8, struct SmallStruct_1b* ss9)
+; {
+; varArgF_SmallStruct("", *ss1, *ss2, *ss3, *ss4, *ss5, *ss6, *ss7, *ss8, *ss9);
+; }
+
+%struct.SmallStruct_1b = type { i8 }
+
+@.str = private unnamed_addr constant [3 x i8] c"01\00", align 1
+
+declare void @varArgF_SmallStruct(i8* %c, ...)
+
+define void @smallStruct_1b_x9(%struct.SmallStruct_1b* %ss1, %struct.SmallStruct_1b* %ss2, %struct.SmallStruct_1b* %ss3, %struct.SmallStruct_1b* %ss4, %struct.SmallStruct_1b* %ss5, %struct.SmallStruct_1b* %ss6, %struct.SmallStruct_1b* %ss7, %struct.SmallStruct_1b* %ss8, %struct.SmallStruct_1b* %ss9) #0 {
+entry:
+ %ss1.addr = alloca %struct.SmallStruct_1b*, align 8
+ %ss2.addr = alloca %struct.SmallStruct_1b*, align 8
+ %ss3.addr = alloca %struct.SmallStruct_1b*, align 8
+ %ss4.addr = alloca %struct.SmallStruct_1b*, align 8
+ %ss5.addr = alloca %struct.SmallStruct_1b*, align 8
+ %ss6.addr = alloca %struct.SmallStruct_1b*, align 8
+ %ss7.addr = alloca %struct.SmallStruct_1b*, align 8
+ %ss8.addr = alloca %struct.SmallStruct_1b*, align 8
+ %ss9.addr = alloca %struct.SmallStruct_1b*, align 8
+ store %struct.SmallStruct_1b* %ss1, %struct.SmallStruct_1b** %ss1.addr, align 8
+ store %struct.SmallStruct_1b* %ss2, %struct.SmallStruct_1b** %ss2.addr, align 8
+ store %struct.SmallStruct_1b* %ss3, %struct.SmallStruct_1b** %ss3.addr, align 8
+ store %struct.SmallStruct_1b* %ss4, %struct.SmallStruct_1b** %ss4.addr, align 8
+ store %struct.SmallStruct_1b* %ss5, %struct.SmallStruct_1b** %ss5.addr, align 8
+ store %struct.SmallStruct_1b* %ss6, %struct.SmallStruct_1b** %ss6.addr, align 8
+ store %struct.SmallStruct_1b* %ss7, %struct.SmallStruct_1b** %ss7.addr, align 8
+ store %struct.SmallStruct_1b* %ss8, %struct.SmallStruct_1b** %ss8.addr, align 8
+ store %struct.SmallStruct_1b* %ss9, %struct.SmallStruct_1b** %ss9.addr, align 8
+ %0 = load %struct.SmallStruct_1b** %ss1.addr, align 8
+ %1 = load %struct.SmallStruct_1b** %ss2.addr, align 8
+ %2 = load %struct.SmallStruct_1b** %ss3.addr, align 8
+ %3 = load %struct.SmallStruct_1b** %ss4.addr, align 8
+ %4 = load %struct.SmallStruct_1b** %ss5.addr, align 8
+ %5 = load %struct.SmallStruct_1b** %ss6.addr, align 8
+ %6 = load %struct.SmallStruct_1b** %ss7.addr, align 8
+ %7 = load %struct.SmallStruct_1b** %ss8.addr, align 8
+ %8 = load %struct.SmallStruct_1b** %ss9.addr, align 8
+ %9 = bitcast %struct.SmallStruct_1b* %0 to { i8 }*
+ %10 = getelementptr { i8 }* %9, i32 0, i32 0
+ %11 = load i8* %10, align 1
+ %12 = bitcast %struct.SmallStruct_1b* %1 to { i8 }*
+ %13 = getelementptr { i8 }* %12, i32 0, i32 0
+ %14 = load i8* %13, align 1
+ %15 = bitcast %struct.SmallStruct_1b* %2 to { i8 }*
+ %16 = getelementptr { i8 }* %15, i32 0, i32 0
+ %17 = load i8* %16, align 1
+ %18 = bitcast %struct.SmallStruct_1b* %3 to { i8 }*
+ %19 = getelementptr { i8 }* %18, i32 0, i32 0
+ %20 = load i8* %19, align 1
+ %21 = bitcast %struct.SmallStruct_1b* %4 to { i8 }*
+ %22 = getelementptr { i8 }* %21, i32 0, i32 0
+ %23 = load i8* %22, align 1
+ %24 = bitcast %struct.SmallStruct_1b* %5 to { i8 }*
+ %25 = getelementptr { i8 }* %24, i32 0, i32 0
+ %26 = load i8* %25, align 1
+ %27 = bitcast %struct.SmallStruct_1b* %6 to { i8 }*
+ %28 = getelementptr { i8 }* %27, i32 0, i32 0
+ %29 = load i8* %28, align 1
+ %30 = bitcast %struct.SmallStruct_1b* %7 to { i8 }*
+ %31 = getelementptr { i8 }* %30, i32 0, i32 0
+ %32 = load i8* %31, align 1
+ %33 = bitcast %struct.SmallStruct_1b* %8 to { i8 }*
+ %34 = getelementptr { i8 }* %33, i32 0, i32 0
+ %35 = load i8* %34, align 1
+ call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i8 inreg %11, i8 inreg %14, i8 inreg %17, i8 inreg %20, i8 inreg %23, i8 inreg %26, i8 inreg %29, i8 inreg %32, i8 inreg %35)
+ ret void
+ ; CHECK-LABEL: smallStruct_1b_x9:
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
+ ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
+}
diff --git a/test/CodeGen/Mips/cconv/arguments-varargs.ll b/test/CodeGen/Mips/cconv/arguments-varargs.ll
index adacda5..6e6f48b 100644
--- a/test/CodeGen/Mips/cconv/arguments-varargs.ll
+++ b/test/CodeGen/Mips/cconv/arguments-varargs.ll
@@ -1,14 +1,14 @@
; RUN: llc -mtriple=mips-linux -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 --check-prefix=O32-BE %s
; RUN: llc -mtriple=mipsel-linux -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 --check-prefix=O32-LE %s
-; RUN-TODO: llc -march=mips64 -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64el -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64 -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN: llc -mtriple=mips64-linux -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=NEW --check-prefix=N32 --check-prefix=NEW-BE %s
-; RUN: llc -mtriple=mips64el-linux -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=NEW --check-prefix=N32 --check-prefix=NEW-LE %s
+; RUN: llc -mtriple=mips64-linux -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=NEW --check-prefix=N32 --check-prefix=NEW-BE %s
+; RUN: llc -mtriple=mips64el-linux -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=NEW --check-prefix=N32 --check-prefix=NEW-LE %s
-; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=NEW --check-prefix=N64 --check-prefix=NEW-BE %s
-; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=NEW --check-prefix=N64 --check-prefix=NEW-LE %s
+; RUN: llc -march=mips64 -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=NEW --check-prefix=N64 --check-prefix=NEW-BE %s
+; RUN: llc -march=mips64el -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=NEW --check-prefix=N64 --check-prefix=NEW-LE %s
@hwords = global [3 x i16] zeroinitializer, align 1
@words = global [3 x i32] zeroinitializer, align 1
diff --git a/test/CodeGen/Mips/cconv/arguments.ll b/test/CodeGen/Mips/cconv/arguments.ll
index 43da604..98671aa 100644
--- a/test/CodeGen/Mips/cconv/arguments.ll
+++ b/test/CodeGen/Mips/cconv/arguments.ll
@@ -1,14 +1,14 @@
; RUN: llc -march=mips -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
; RUN: llc -march=mipsel -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64 -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64el -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64 -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=O32 %s
-; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
-; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
+; RUN: llc -march=mips64 -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM32 --check-prefix=NEW %s
-; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
-; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
+; RUN: llc -march=mips64 -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
+; RUN: llc -march=mips64el -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=SYM64 --check-prefix=NEW %s
; Test the integer arguments for all ABI's and byte orders as specified by
; section 5 of MD00305 (MIPS ABIs Described).
diff --git a/test/CodeGen/Mips/cconv/callee-saved-float.ll b/test/CodeGen/Mips/cconv/callee-saved-float.ll
index de4d917..c84f0f4 100644
--- a/test/CodeGen/Mips/cconv/callee-saved-float.ll
+++ b/test/CodeGen/Mips/cconv/callee-saved-float.ll
@@ -3,20 +3,20 @@
; RUN: llc -march=mips < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
; RUN: llc -march=mipsel < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
-; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=O32-INV %s
-; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=O32-INV %s
+; RUN-TODO: llc -march=mips64 -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64 -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=O32-INV %s
+; RUN-TODO: llc -march=mips64el -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=O32-INV %s
-; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=N32-INV %s
-; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=N32-INV %s
+; RUN: llc -march=mips64 -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64el -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64 -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=N32-INV %s
+; RUN: llc -march=mips64el -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=N32-INV %s
-; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
-; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
-; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=N64-INV %s
-; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=N64-INV %s
+; RUN: llc -march=mips64 -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64el -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64 -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=N64-INV %s
+; RUN: llc -march=mips64el -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=ALL-INV --check-prefix=N64-INV %s
; Test the the callee-saved registers are callee-saved as specified by section
; 2 of the MIPSpro N32 Handbook and section 3 of the SYSV ABI spec.
diff --git a/test/CodeGen/Mips/cconv/callee-saved.ll b/test/CodeGen/Mips/cconv/callee-saved.ll
index 293e99f..d0b1e64 100644
--- a/test/CodeGen/Mips/cconv/callee-saved.ll
+++ b/test/CodeGen/Mips/cconv/callee-saved.ll
@@ -3,20 +3,20 @@
; RUN: llc -march=mips < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
; RUN: llc -march=mipsel < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
-; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
-; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
+; RUN-TODO: llc -march=mips64 -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64 -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
+; RUN-TODO: llc -march=mips64el -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32-INV %s
-; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32-INV %s
-; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32-INV %s
+; RUN: llc -march=mips64 -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64el -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64 -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32-INV %s
+; RUN: llc -march=mips64el -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32-INV %s
-; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
-; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
-; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64-INV %s
-; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64-INV %s
+; RUN: llc -march=mips64 -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64el -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64 -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64-INV %s
+; RUN: llc -march=mips64el -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64-INV %s
; Test the the callee-saved registers are callee-saved as specified by section
; 2 of the MIPSpro N32 Handbook and section 3 of the SYSV ABI spec.
diff --git a/test/CodeGen/Mips/cconv/memory-layout.ll b/test/CodeGen/Mips/cconv/memory-layout.ll
index 0c3cc9e..33a68da 100644
--- a/test/CodeGen/Mips/cconv/memory-layout.ll
+++ b/test/CodeGen/Mips/cconv/memory-layout.ll
@@ -1,14 +1,14 @@
; RUN: llc -march=mips < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
; RUN: llc -march=mipsel < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64 -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64 -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64el -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
-; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64 -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64el -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
; Test the memory layout for all ABI's and byte orders as specified by section
; 4 of MD00305 (MIPS ABIs Described).
diff --git a/test/CodeGen/Mips/cconv/reserved-space.ll b/test/CodeGen/Mips/cconv/reserved-space.ll
index b36f89e..23190c2 100644
--- a/test/CodeGen/Mips/cconv/reserved-space.ll
+++ b/test/CodeGen/Mips/cconv/reserved-space.ll
@@ -1,14 +1,14 @@
; RUN: llc -march=mips < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
; RUN: llc -march=mipsel < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64 -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64 -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64el -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
-; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64 -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64el -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
; Test that O32 correctly reserved space for the four arguments, even when
; there aren't any as per section 5 of MD00305 (MIPS ABIs Described).
diff --git a/test/CodeGen/Mips/cconv/return-float.ll b/test/CodeGen/Mips/cconv/return-float.ll
index d1a5e4f..8c4c31c 100644
--- a/test/CodeGen/Mips/cconv/return-float.ll
+++ b/test/CodeGen/Mips/cconv/return-float.ll
@@ -1,14 +1,14 @@
; RUN: llc -mtriple=mips-linux-gnu -soft-float -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
; RUN: llc -mtriple=mipsel-linux-gnu -soft-float -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -mtriple=mips64-linux-gnu -soft-float -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -mtriple=mips64el-linux-gnu -soft-float -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -mtriple=mips64-linux-gnu -soft-float -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -mtriple=mips64el-linux-gnu -soft-float -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN: llc -mtriple=mips64-linux-gnu -soft-float -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -mtriple=mips64el-linux-gnu -soft-float -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -mtriple=mips64-linux-gnu -soft-float -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -soft-float -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -mtriple=mips64-linux-gnu -soft-float -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
-; RUN: llc -mtriple=mips64el-linux-gnu -soft-float -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -mtriple=mips64-linux-gnu -soft-float -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -soft-float -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
; Test the float returns for all ABI's and byte orders as specified by
; section 5 of MD00305 (MIPS ABIs Described).
diff --git a/test/CodeGen/Mips/cconv/return-hard-float.ll b/test/CodeGen/Mips/cconv/return-hard-float.ll
index 123b499..f0aeb12 100644
--- a/test/CodeGen/Mips/cconv/return-hard-float.ll
+++ b/test/CodeGen/Mips/cconv/return-hard-float.ll
@@ -1,14 +1,14 @@
; RUN: llc -mtriple=mips-linux-gnu -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
; RUN: llc -mtriple=mipsel-linux-gnu -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
-; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
; RUN: llc -mtriple=mips-linux-gnu -relocation-model=static -mattr=+o32,+fp64 < %s | FileCheck --check-prefix=ALL --check-prefix=032FP64 %s
; RUN: llc -mtriple=mipsel-linux-gnu -relocation-model=static -mattr=+o32,+fp64 < %s | FileCheck --check-prefix=ALL --check-prefix=032FP64 %s
diff --git a/test/CodeGen/Mips/cconv/return-hard-fp128.ll b/test/CodeGen/Mips/cconv/return-hard-fp128.ll
index 0da59ef..05dacfe 100644
--- a/test/CodeGen/Mips/cconv/return-hard-fp128.ll
+++ b/test/CodeGen/Mips/cconv/return-hard-fp128.ll
@@ -1,8 +1,8 @@
-; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64 -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64el -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -march=mips64 -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
-; RUN: llc -march=mips64el -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64 -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64el -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
; Test the fp128 returns for N32/N64 and all byte orders as specified by
; section 5 of MD00305 (MIPS ABIs Described).
diff --git a/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll b/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll
index 2e84477..4ce26b1 100644
--- a/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll
+++ b/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll
@@ -1,8 +1,8 @@
-; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
-; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
; Test return of {fp128} agrees with de-facto N32/N64 ABI.
diff --git a/test/CodeGen/Mips/cconv/return-struct.ll b/test/CodeGen/Mips/cconv/return-struct.ll
index 11a8cf0..3d591df 100644
--- a/test/CodeGen/Mips/cconv/return-struct.ll
+++ b/test/CodeGen/Mips/cconv/return-struct.ll
@@ -1,14 +1,14 @@
; RUN: llc -mtriple=mips-linux-gnu -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 --check-prefix=O32-BE %s
; RUN: llc -mtriple=mipsel-linux-gnu -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 --check-prefix=O32-LE %s
-; RUN-TODO: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 --check-prefix=N32-BE %s
-; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 --check-prefix=N32-LE %s
+; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 --check-prefix=N32-BE %s
+; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 --check-prefix=N32-LE %s
-; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 --check-prefix=N64-BE %s
-; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 --check-prefix=N64-LE %s
+; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 --check-prefix=N64-BE %s
+; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 --check-prefix=N64-LE %s
; Test struct returns for all ABI's and byte orders.
diff --git a/test/CodeGen/Mips/cconv/return.ll b/test/CodeGen/Mips/cconv/return.ll
index 63f9b5f..516026d 100644
--- a/test/CodeGen/Mips/cconv/return.ll
+++ b/test/CodeGen/Mips/cconv/return.ll
@@ -1,14 +1,14 @@
; RUN: llc -mtriple=mips-linux-gnu -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
; RUN: llc -mtriple=mipsel-linux-gnu -relocation-model=static < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
-; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
; Test the integer returns for all ABI's and byte orders as specified by
; section 5 of MD00305 (MIPS ABIs Described).
diff --git a/test/CodeGen/Mips/cconv/stack-alignment.ll b/test/CodeGen/Mips/cconv/stack-alignment.ll
index 834033b..f21bc30 100644
--- a/test/CodeGen/Mips/cconv/stack-alignment.ll
+++ b/test/CodeGen/Mips/cconv/stack-alignment.ll
@@ -1,14 +1,14 @@
; RUN: llc -march=mips < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
; RUN: llc -march=mipsel < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64 -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN-TODO: llc -march=mips64el -mattr=-n64,+o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64 -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
+; RUN-TODO: llc -march=mips64el -target-abi o32 < %s | FileCheck --check-prefix=ALL --check-prefix=O32 %s
-; RUN: llc -march=mips64 -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -march=mips64el -mattr=-n64,+n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64 -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
+; RUN: llc -march=mips64el -target-abi n32 < %s | FileCheck --check-prefix=ALL --check-prefix=N32 %s
-; RUN: llc -march=mips64 -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
-; RUN: llc -march=mips64el -mattr=-n64,+n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64 -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
+; RUN: llc -march=mips64el -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64 %s
; Test the stack alignment for all ABI's and byte orders as specified by
; section 5 of MD00305 (MIPS ABIs Described).
diff --git a/test/CodeGen/Mips/ci2.ll b/test/CodeGen/Mips/ci2.ll
index 7187f0c..e2068fd 100644
--- a/test/CodeGen/Mips/ci2.ll
+++ b/test/CodeGen/Mips/ci2.ll
@@ -36,4 +36,4 @@ if.end: ; preds = %if.else, %if.then
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
-!1 = metadata !{i32 103}
+!1 = !{i32 103}
diff --git a/test/CodeGen/Mips/const1.ll b/test/CodeGen/Mips/const1.ll
index cb2baca..f32ce24 100644
--- a/test/CodeGen/Mips/const1.ll
+++ b/test/CodeGen/Mips/const1.ll
@@ -32,4 +32,4 @@ attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"=
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"clang version 3.4 (gitosis@dmz-portal.mips.com:clang.git b754974ec32ab712ea7d8b52cd8037b24e7d6ed3) (gitosis@dmz-portal.mips.com:llvm.git 8e211187b501bc73edb938fde0019c9a20bcffd5)"}
+!0 = !{!"clang version 3.4 (gitosis@dmz-portal.mips.com:clang.git b754974ec32ab712ea7d8b52cd8037b24e7d6ed3) (gitosis@dmz-portal.mips.com:llvm.git 8e211187b501bc73edb938fde0019c9a20bcffd5)"}
diff --git a/test/CodeGen/Mips/const4a.ll b/test/CodeGen/Mips/const4a.ll
index b4c509f..ac6795b 100644
--- a/test/CodeGen/Mips/const4a.ll
+++ b/test/CodeGen/Mips/const4a.ll
@@ -177,4 +177,4 @@ attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "n
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"clang version 3.4 (gitosis@dmz-portal.mips.com:clang.git b310439121c875937d78cc49cc969bc1197fc025) (gitosis@dmz-portal.mips.com:llvm.git 7fc0ca9656ebec8dad61f72f5a5ddfb232c070fd)"}
+!0 = !{!"clang version 3.4 (gitosis@dmz-portal.mips.com:clang.git b310439121c875937d78cc49cc969bc1197fc025) (gitosis@dmz-portal.mips.com:llvm.git 7fc0ca9656ebec8dad61f72f5a5ddfb232c070fd)"}
diff --git a/test/CodeGen/Mips/const6.ll b/test/CodeGen/Mips/const6.ll
index 3f02ab9..c26e02f 100644
--- a/test/CodeGen/Mips/const6.ll
+++ b/test/CodeGen/Mips/const6.ll
@@ -159,6 +159,6 @@ attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "n
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"clang version 3.4 (gitosis@dmz-portal.mips.com:clang.git b310439121c875937d78cc49cc969bc1197fc025) (gitosis@dmz-portal.mips.com:llvm.git 7fc0ca9656ebec8dad61f72f5a5ddfb232c070fd)"}
+!0 = !{!"clang version 3.4 (gitosis@dmz-portal.mips.com:clang.git b310439121c875937d78cc49cc969bc1197fc025) (gitosis@dmz-portal.mips.com:llvm.git 7fc0ca9656ebec8dad61f72f5a5ddfb232c070fd)"}
diff --git a/test/CodeGen/Mips/const6a.ll b/test/CodeGen/Mips/const6a.ll
index d342390..aff1357 100644
--- a/test/CodeGen/Mips/const6a.ll
+++ b/test/CodeGen/Mips/const6a.ll
@@ -26,4 +26,4 @@ entry:
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
attributes #1 = { nounwind }
-!1 = metadata !{i32 121}
+!1 = !{i32 121}
diff --git a/test/CodeGen/Mips/fcmp.ll b/test/CodeGen/Mips/fcmp.ll
index b775983..8e83b00 100644
--- a/test/CodeGen/Mips/fcmp.ll
+++ b/test/CodeGen/Mips/fcmp.ll
@@ -781,3 +781,93 @@ define i32 @true_f64(double %a, double %b) nounwind {
%2 = zext i1 %1 to i32
ret i32 %2
}
+
+; The optimizers sometimes produce setlt instead of setolt/setult.
+define float @bug1_f32(float %angle, float %at) #0 {
+entry:
+; ALL-LABEL: bug1_f32:
+
+; 32-C-DAG: add.s $[[T0:f[0-9]+]], $f14, $f12
+; 32-C-DAG: lwc1 $[[T1:f[0-9]+]], %lo($CPI32_0)(
+; 32-C-DAG: c.ole.s $[[T0]], $[[T1]]
+; 32-C-DAG: bc1t
+
+; 32-CMP-DAG: add.s $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: lwc1 $[[T1:f[0-9]+]], %lo($CPI32_0)(
+; 32-CMP-DAG: cmp.le.s $[[T2:f[0-9]+]], $[[T0]], $[[T1]]
+; 32-CMP-DAG: mfc1 $[[T3:[0-9]+]], $[[T2]]
+; FIXME: This instruction is redundant.
+; 32-CMP-DAG: andi $[[T4:[0-9]+]], $[[T3]], 1
+; 32-CMP-DAG: bnez $[[T4]],
+
+; 64-C-DAG: add.s $[[T0:f[0-9]+]], $f13, $f12
+; 64-C-DAG: lwc1 $[[T1:f[0-9]+]], %got_ofst($CPI32_0)(
+; 64-C-DAG: c.ole.s $[[T0]], $[[T1]]
+; 64-C-DAG: bc1t
+
+; 64-CMP-DAG: add.s $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: lwc1 $[[T1:f[0-9]+]], %got_ofst($CPI32_0)(
+; 64-CMP-DAG: cmp.le.s $[[T2:f[0-9]+]], $[[T0]], $[[T1]]
+; 64-CMP-DAG: mfc1 $[[T3:[0-9]+]], $[[T2]]
+; FIXME: This instruction is redundant.
+; 64-CMP-DAG: andi $[[T4:[0-9]+]], $[[T3]], 1
+; 64-CMP-DAG: bnez $[[T4]],
+
+ %add = fadd fast float %at, %angle
+ %cmp = fcmp ogt float %add, 1.000000e+00
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %sub = fadd fast float %add, -1.000000e+00
+ br label %if.end
+
+if.end:
+ %theta.0 = phi float [ %sub, %if.then ], [ %add, %entry ]
+ ret float %theta.0
+}
+
+; The optimizers sometimes produce setlt instead of setolt/setult.
+define double @bug1_f64(double %angle, double %at) #0 {
+entry:
+; ALL-LABEL: bug1_f64:
+
+; 32-C-DAG: add.d $[[T0:f[0-9]+]], $f14, $f12
+; 32-C-DAG: ldc1 $[[T1:f[0-9]+]], %lo($CPI33_0)(
+; 32-C-DAG: c.ole.d $[[T0]], $[[T1]]
+; 32-C-DAG: bc1t
+
+; 32-CMP-DAG: add.d $[[T0:f[0-9]+]], $f14, $f12
+; 32-CMP-DAG: ldc1 $[[T1:f[0-9]+]], %lo($CPI33_0)(
+; 32-CMP-DAG: cmp.le.d $[[T2:f[0-9]+]], $[[T0]], $[[T1]]
+; 32-CMP-DAG: mfc1 $[[T3:[0-9]+]], $[[T2]]
+; FIXME: This instruction is redundant.
+; 32-CMP-DAG: andi $[[T4:[0-9]+]], $[[T3]], 1
+; 32-CMP-DAG: bnez $[[T4]],
+
+; 64-C-DAG: add.d $[[T0:f[0-9]+]], $f13, $f12
+; 64-C-DAG: ldc1 $[[T1:f[0-9]+]], %got_ofst($CPI33_0)(
+; 64-C-DAG: c.ole.d $[[T0]], $[[T1]]
+; 64-C-DAG: bc1t
+
+; 64-CMP-DAG: add.d $[[T0:f[0-9]+]], $f13, $f12
+; 64-CMP-DAG: ldc1 $[[T1:f[0-9]+]], %got_ofst($CPI33_0)(
+; 64-CMP-DAG: cmp.le.d $[[T2:f[0-9]+]], $[[T0]], $[[T1]]
+; 64-CMP-DAG: mfc1 $[[T3:[0-9]+]], $[[T2]]
+; FIXME: This instruction is redundant.
+; 64-CMP-DAG: andi $[[T4:[0-9]+]], $[[T3]], 1
+; 64-CMP-DAG: bnez $[[T4]],
+
+ %add = fadd fast double %at, %angle
+ %cmp = fcmp ogt double %add, 1.000000e+00
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %sub = fadd fast double %add, -1.000000e+00
+ br label %if.end
+
+if.end:
+ %theta.0 = phi double [ %sub, %if.then ], [ %add, %entry ]
+ ret double %theta.0
+}
+
+attributes #0 = { nounwind readnone "no-nans-fp-math"="true" }
diff --git a/test/CodeGen/Mips/fcopysign-f32-f64.ll b/test/CodeGen/Mips/fcopysign-f32-f64.ll
index 148a780..860bc79 100644
--- a/test/CodeGen/Mips/fcopysign-f32-f64.ll
+++ b/test/CodeGen/Mips/fcopysign-f32-f64.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=n64 | FileCheck %s -check-prefix=64
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s -check-prefix=64
-; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 | FileCheck %s -check-prefix=64R2
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -target-abi=n64 | FileCheck %s -check-prefix=64
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -target-abi=n64 | FileCheck %s -check-prefix=64
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -target-abi=n64 | FileCheck %s -check-prefix=64R2
declare double @copysign(double, double) nounwind readnone
diff --git a/test/CodeGen/Mips/fcopysign.ll b/test/CodeGen/Mips/fcopysign.ll
index 3a9d9c7..6928f2f 100644
--- a/test/CodeGen/Mips/fcopysign.ll
+++ b/test/CodeGen/Mips/fcopysign.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=32
; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=32R2
-; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=n64 | FileCheck %s -check-prefix=64
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s -check-prefix=64
-; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 | FileCheck %s -check-prefix=64R2
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -target-abi=n64 | FileCheck %s -check-prefix=64
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -target-abi=n64 | FileCheck %s -check-prefix=64
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -target-abi=n64 | FileCheck %s -check-prefix=64R2
define double @func0(double %d0, double %d1) nounwind readnone {
entry:
diff --git a/test/CodeGen/Mips/fmadd1.ll b/test/CodeGen/Mips/fmadd1.ll
index 271631e..99d99fa 100644
--- a/test/CodeGen/Mips/fmadd1.ll
+++ b/test/CodeGen/Mips/fmadd1.ll
@@ -8,15 +8,15 @@
; RUN: llc < %s -march=mipsel -mcpu=mips32 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=32 -check-prefix=32-NONAN
; RUN: llc < %s -march=mipsel -mcpu=mips32r2 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=32R2 -check-prefix=32R2-NONAN
; RUN: llc < %s -march=mipsel -mcpu=mips32r6 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=32R6 -check-prefix=32R6-NONAN
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=64 -check-prefix=64-NONAN
-; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=64R2 -check-prefix=64R2-NONAN
-; RUN: llc < %s -march=mips64el -mcpu=mips64r6 -mattr=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=64R6 -check-prefix=64R6-NONAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -target-abi=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=64 -check-prefix=64-NONAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -target-abi=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=64R2 -check-prefix=64R2-NONAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64r6 -target-abi=n64 -enable-no-nans-fp-math | FileCheck %s -check-prefix=ALL -check-prefix=64R6 -check-prefix=64R6-NONAN
; RUN: llc < %s -march=mipsel -mcpu=mips32 | FileCheck %s -check-prefix=ALL -check-prefix=32 -check-prefix=32-NAN
; RUN: llc < %s -march=mipsel -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL -check-prefix=32R2 -check-prefix=32R2-NAN
; RUN: llc < %s -march=mipsel -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL -check-prefix=32R6 -check-prefix=32R6-NAN
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s -check-prefix=ALL -check-prefix=64 -check-prefix=64-NAN
-; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 | FileCheck %s -check-prefix=ALL -check-prefix=64R2 -check-prefix=64R2-NAN
-; RUN: llc < %s -march=mips64el -mcpu=mips64r6 -mattr=n64 | FileCheck %s -check-prefix=ALL -check-prefix=64R6 -check-prefix=64R6-NAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -target-abi=n64 | FileCheck %s -check-prefix=ALL -check-prefix=64 -check-prefix=64-NAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -target-abi=n64 | FileCheck %s -check-prefix=ALL -check-prefix=64R2 -check-prefix=64R2-NAN
+; RUN: llc < %s -march=mips64el -mcpu=mips64r6 -target-abi=n64 | FileCheck %s -check-prefix=ALL -check-prefix=64R6 -check-prefix=64R6-NAN
define float @FOO0float(float %a, float %b, float %c) nounwind readnone {
entry:
@@ -39,10 +39,9 @@ entry:
; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
; 32R6-DAG: add.s $f0, $[[T1]], $[[T2]]
-; 64-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
-; 64-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $f14
-; 64-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
-; 64-DAG: add.s $f0, $[[T1]], $[[T2]]
+; 64-DAG: madd.s $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64-DAG: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64-DAG: add.s $f0, $[[T0]], $[[T1]]
; 64R2: madd.s $[[T0:f[0-9]+]], $f14, $f12, $f13
; 64R2: mtc1 $zero, $[[T1:f[0-9]+]]
@@ -80,10 +79,9 @@ entry:
; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
; 32R6-DAG: add.s $f0, $[[T1]], $[[T2]]
-; 64-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
-; 64-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $f14
-; 64-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
-; 64-DAG: add.s $f0, $[[T1]], $[[T2]]
+; 64-DAG: msub.s $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64-DAG: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64-DAG: add.s $f0, $[[T0]], $[[T1]]
; 64R2: msub.s $[[T0:f[0-9]+]], $f14, $f12, $f13
; 64R2: mtc1 $zero, $[[T1:f[0-9]+]]
@@ -124,10 +122,11 @@ entry:
; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
; 32R6-DAG: sub.s $f0, $[[T2]], $[[T1]]
-; 64-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
-; 64-DAG: add.s $[[T2:f[0-9]+]], $[[T1]], $f14
-; 64-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
-; 64-DAG: sub.s $f0, $[[T2]], $[[T1]]
+; 64-NONAN: nmadd.s $f0, $f14, $f12, $f13
+
+; 64-NAN: madd.s $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64-NAN: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64-NAN: sub.s $f0, $[[T1]], $[[T0]]
; 64R2-NONAN: nmadd.s $f0, $f14, $f12, $f13
@@ -164,10 +163,11 @@ entry:
; 32R2-NAN: mtc1 $zero, $[[T2:f[0-9]+]]
; 32R2-NAN: sub.s $f0, $[[T2]], $[[T1]]
-; 64-DAG: mul.s $[[T1:f[0-9]+]], $f12, $f13
-; 64-DAG: sub.s $[[T2:f[0-9]+]], $[[T1]], $f14
-; 64-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
-; 64-DAG: sub.s $f0, $[[T2]], $[[T1]]
+; 64-NAN: msub.s $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64-NAN: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64-NAN: sub.s $f0, $[[T1]], $[[T0]]
+
+; 64-NONAN: nmsub.s $f0, $f14, $f12, $f13
; 64R2-NAN: msub.s $[[T0:f[0-9]+]], $f14, $f12, $f13
; 64R2-NAN: mtc1 $zero, $[[T1:f[0-9]+]]
@@ -206,10 +206,9 @@ entry:
; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
; 32R6-DAG: add.d $f0, $[[T1]], $[[T2]]
-; 64-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
-; 64-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $f14
-; 64-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
-; 64-DAG: add.d $f0, $[[T1]], $[[T2]]
+; 64-DAG: madd.d $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64-DAG: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64-DAG: add.d $f0, $[[T0]], $[[T1]]
; 64R2: madd.d $[[T0:f[0-9]+]], $f14, $f12, $f13
; 64R2: mtc1 $zero, $[[T1:f[0-9]+]]
@@ -248,10 +247,9 @@ entry:
; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
; 32R6-DAG: add.d $f0, $[[T1]], $[[T2]]
-; 64-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
-; 64-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $f14
-; 64-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
-; 64-DAG: add.d $f0, $[[T1]], $[[T2]]
+; 64-DAG: msub.d $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64-DAG: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64-DAG: add.d $f0, $[[T0]], $[[T1]]
; 64R2: msub.d $[[T0:f[0-9]+]], $f14, $f12, $f13
; 64R2: mtc1 $zero, $[[T1:f[0-9]+]]
@@ -293,10 +291,11 @@ entry:
; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
; 32R6-DAG: sub.d $f0, $[[T2]], $[[T1]]
-; 64-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
-; 64-DAG: add.d $[[T2:f[0-9]+]], $[[T1]], $f14
-; 64-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
-; 64-DAG: sub.d $f0, $[[T2]], $[[T1]]
+; 64-NONAN: nmadd.d $f0, $f14, $f12, $f13
+
+; 64-NAN: madd.d $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64-NAN: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64-NAN: sub.d $f0, $[[T1]], $[[T0]]
; 64R2-NONAN: nmadd.d $f0, $f14, $f12, $f13
@@ -340,10 +339,11 @@ entry:
; 32R6-DAG: mtc1 $zero, $[[T2:f[0-9]+]]
; 32R6-DAG: sub.d $f0, $[[T2]], $[[T1]]
-; 64-DAG: mul.d $[[T1:f[0-9]+]], $f12, $f13
-; 64-DAG: sub.d $[[T2:f[0-9]+]], $[[T1]], $f14
-; 64-DAG: dmtc1 $zero, $[[T2:f[0-9]+]]
-; 64-DAG: sub.d $f0, $[[T2]], $[[T1]]
+; 64-NONAN: nmsub.d $f0, $f14, $f12, $f13
+
+; 64-NAN: msub.d $[[T0:f[0-9]+]], $f14, $f12, $f13
+; 64-NAN: mtc1 $zero, $[[T1:f[0-9]+]]
+; 64-NAN: sub.d $f0, $[[T1]], $[[T0]]
; 64R2-NONAN: nmsub.d $f0, $f14, $f12, $f13
diff --git a/test/CodeGen/Mips/fp-indexed-ls.ll b/test/CodeGen/Mips/fp-indexed-ls.ll
index 787e131..ea337de 100644
--- a/test/CodeGen/Mips/fp-indexed-ls.ll
+++ b/test/CodeGen/Mips/fp-indexed-ls.ll
@@ -1,10 +1,10 @@
; RUN: llc -march=mipsel -mcpu=mips32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32R1
; RUN: llc -march=mipsel -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32R2
; RUN: llc -march=mipsel -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS32R6
-; RUN: llc -march=mips64el -mcpu=mips4 -mattr=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS4
-; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS4
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS4
-; RUN: llc -march=mips64el -mcpu=mips64r6 -mattr=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64R6
+; RUN: llc -march=mips64el -mcpu=mips4 -target-abi=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS4
+; RUN: llc -march=mips64el -mcpu=mips64 -target-abi=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS4
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS4
+; RUN: llc -march=mips64el -mcpu=mips64r6 -target-abi=n64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64R6
; Check that [ls][dwu]xc1 are not emitted for nacl.
; RUN: llc -mtriple=mipsel-none-nacl-gnu -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=CHECK-NACL
diff --git a/test/CodeGen/Mips/fptr2.ll b/test/CodeGen/Mips/fptr2.ll
deleted file mode 100644
index c8b5e0d..0000000
--- a/test/CodeGen/Mips/fptr2.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -relocation-model=static < %s | FileCheck %s -check-prefix=static16
-
-; Function Attrs: nounwind
-define double @my_mul(double %a, double %b) #0 {
-entry:
- %a.addr = alloca double, align 8
- %b.addr = alloca double, align 8
- store double %a, double* %a.addr, align 8
- store double %b, double* %b.addr, align 8
- %0 = load double* %a.addr, align 8
- %1 = load double* %b.addr, align 8
- %mul = fmul double %0, %1
- ret double %mul
-}
-
-; static16: .ent __fn_stub_my_mul
-; static16: .set reorder
-; static16-NEXT: #NO_APP
-; static16: .end __fn_stub_my_mul
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
diff --git a/test/CodeGen/Mips/fpxx.ll b/test/CodeGen/Mips/fpxx.ll
index 7e2ed22..5b42ece 100644
--- a/test/CodeGen/Mips/fpxx.ll
+++ b/test/CodeGen/Mips/fpxx.ll
@@ -10,11 +10,11 @@
; RUN: llc -march=mips64 -mcpu=mips64 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-NOFPXX
; RUN: not llc -march=mips64 -mcpu=mips64 -mattr=fpxx < %s 2>&1 | FileCheck %s -check-prefix=64-FPXX
-; RUN-TODO: llc -march=mips64 -mcpu=mips4 -mattr=-n64,+o32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=4-O32-NOFPXX
-; RUN-TODO: llc -march=mips64 -mcpu=mips4 -mattr=-n64,+o32 -mattr=fpxx < %s | FileCheck %s -check-prefix=ALL -check-prefix=4-O32-FPXX
+; RUN-TODO: llc -march=mips64 -mcpu=mips4 -target-abi o32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=4-O32-NOFPXX
+; RUN-TODO: llc -march=mips64 -mcpu=mips4 -target-abi o32 -mattr=fpxx < %s | FileCheck %s -check-prefix=ALL -check-prefix=4-O32-FPXX
-; RUN-TODO: llc -march=mips64 -mcpu=mips64 -mattr=-n64,+o32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-O32-NOFPXX
-; RUN-TODO: llc -march=mips64 -mcpu=mips64 -mattr=-n64,+o32 -mattr=fpxx < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-O32-FPXX
+; RUN-TODO: llc -march=mips64 -mcpu=mips64 -target-abi o32 < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-O32-NOFPXX
+; RUN-TODO: llc -march=mips64 -mcpu=mips64 -target-abi o32 -mattr=fpxx < %s | FileCheck %s -check-prefix=ALL -check-prefix=64-O32-FPXX
declare double @dbl();
diff --git a/test/CodeGen/Mips/global-address.ll b/test/CodeGen/Mips/global-address.ll
index 0785cfc..ae6afeb 100644
--- a/test/CodeGen/Mips/global-address.ll
+++ b/test/CodeGen/Mips/global-address.ll
@@ -1,9 +1,9 @@
; RUN: llc -march=mipsel -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-O32
; RUN: llc -march=mipsel -relocation-model=static -mtriple=mipsel-linux-gnu < %s | FileCheck %s -check-prefix=STATIC-O32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n32 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n32 -relocation-model=static -mtriple=mipsel-linux-gnu < %s | FileCheck %s -check-prefix=STATIC-N32
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=-n64,n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi n32 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N32
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi n32 -relocation-model=static -mtriple=mipsel-linux-gnu < %s | FileCheck %s -check-prefix=STATIC-N32
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi n64 -relocation-model=pic < %s | FileCheck %s -check-prefix=PIC-N64
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi n64 -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-N64
@s1 = internal unnamed_addr global i32 8, align 4
@g1 = external global i32
diff --git a/test/CodeGen/Mips/inlineasm-assembler-directives.ll b/test/CodeGen/Mips/inlineasm-assembler-directives.ll
new file mode 100644
index 0000000..e4a6d1e
--- /dev/null
+++ b/test/CodeGen/Mips/inlineasm-assembler-directives.ll
@@ -0,0 +1,23 @@
+; RUN: llc -march=mips < %s | FileCheck %s
+
+; Check for the emission of appropriate assembler directives before and
+; after the inline assembly code.
+define void @f() nounwind {
+entry:
+; CHECK: #APP
+; CHECK-NEXT: .set push
+; CHECK-NEXT: .set at
+; CHECK-NEXT: .set macro
+; CHECK-NEXT: .set reorder
+; CHECK: addi $9, ${{[2-9][0-9]?}}, 8
+; CHECK: subi ${{[2-9][0-9]?}}, $9, 6
+; CHECK: .set pop
+; CHECK-NEXT: #NO_APP
+ %a = alloca i32, align 4
+ %b = alloca i32, align 4
+ store i32 20, i32* %a, align 4
+ %0 = load i32* %a, align 4
+ %1 = call i32 asm sideeffect "addi $$9, $1, 8\0A\09subi $0, $$9, 6", "=r,r,~{$1}"(i32 %0)
+ store i32 %1, i32* %b, align 4
+ ret void
+}
diff --git a/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll b/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll
index a67ddce..41991d0 100644
--- a/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll
+++ b/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll
@@ -32,10 +32,10 @@ entry:
; Now l with 1024: make sure register lo is picked. We do this by checking the instruction
; after the inline expression for a mflo to pull the value out of lo.
-; CHECK: #APP
-; CHECK-NEXT: mtlo ${{[0-9]+}}
+; CHECK: #APP
+; CHECK: mtlo ${{[0-9]+}}
; CHECK-NEXT: madd ${{[0-9]+}},${{[0-9]+}}
-; CHECK-NEXT: #NO_APP
+; CHECK: #NO_APP
; CHECK-NEXT: mflo ${{[0-9]+}}
%bosco = alloca i32, align 4
call i32 asm sideeffect "\09mtlo $3 \0A\09\09madd $1,$2 ", "=l,r,r,r"(i32 7, i32 6, i32 44) nounwind
diff --git a/test/CodeGen/Mips/inlineasm-cnstrnt-reg64.ll b/test/CodeGen/Mips/inlineasm-cnstrnt-reg64.ll
index a7ba762..acce632 100644
--- a/test/CodeGen/Mips/inlineasm-cnstrnt-reg64.ll
+++ b/test/CodeGen/Mips/inlineasm-cnstrnt-reg64.ll
@@ -3,7 +3,7 @@
; The target is 64 bit.
;
;
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi=n64 < %s | FileCheck %s
define i32 @main() nounwind {
diff --git a/test/CodeGen/Mips/inlineasm64.ll b/test/CodeGen/Mips/inlineasm64.ll
index dbce3c3..a8e949b 100644
--- a/test/CodeGen/Mips/inlineasm64.ll
+++ b/test/CodeGen/Mips/inlineasm64.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi=n64 < %s | FileCheck %s
@gl2 = external global i64
@gl1 = external global i64
diff --git a/test/CodeGen/Mips/inlineasmmemop.ll b/test/CodeGen/Mips/inlineasmmemop.ll
index a08a024..5518520 100644
--- a/test/CodeGen/Mips/inlineasmmemop.ll
+++ b/test/CodeGen/Mips/inlineasmmemop.ll
@@ -5,6 +5,7 @@
define i32 @f1(i32 %x) nounwind {
entry:
+; CHECK-LABEL: f1:
; CHECK: addiu $[[T0:[0-9]+]], $sp
; CHECK: #APP
; CHECK: sw $4, 0($[[T0]])
@@ -22,42 +23,26 @@ entry:
ret i32 %0
}
-; "D": Second word of double word. This works for any memory element
+; CHECK-LABEL: main:
+; "D": Second word of a double word. This works for any memory element
; double or single.
; CHECK: #APP
-; CHECK-NEXT: lw ${{[0-9]+}},4(${{[0-9]+}});
-; CHECK-NEXT: #NO_APP
+; CHECK: lw ${{[0-9]+}},4(${{[0-9]+}});
+; CHECK: #NO_APP
-; No "D": First word of double word. This works for any memory element
+; No "D": First word of a double word. This works for any memory element
; double or single.
; CHECK: #APP
-; CHECK-NEXT: lw ${{[0-9]+}},0(${{[0-9]+}});
-; CHECK-NEXT: #NO_APP
-
-;int b[8] = {0,1,2,3,4,5,6,7};
-;int main()
-;{
-; int i;
-;
-; // The first word. Notice, no 'D'
-; { asm (
-; "lw %0,%1;\n"
-; : "=r" (i) : "m" (*(b+4)));}
-;
-; // The second word
-; { asm (
-; "lw %0,%D1;\n"
-; : "=r" (i) "m" (*(b+4)));}
-;}
+; CHECK: lw ${{[0-9]+}},0(${{[0-9]+}});
+; CHECK: #NO_APP
@b = common global [20 x i32] zeroinitializer, align 4
define void @main() {
entry:
+; Second word:
tail call void asm sideeffect " lw $0,${1:D};", "r,*m,~{$11}"(i32 undef, i32* getelementptr inbounds ([20 x i32]* @b, i32 0, i32 3))
+; First word. Notice, no 'D':
tail call void asm sideeffect " lw $0,${1};", "r,*m,~{$11}"(i32 undef, i32* getelementptr inbounds ([20 x i32]* @b, i32 0, i32 3))
ret void
}
-
-attributes #0 = { nounwind }
-
diff --git a/test/CodeGen/Mips/largeimmprinting.ll b/test/CodeGen/Mips/largeimmprinting.ll
index 0e9c91f..918dfee 100644
--- a/test/CodeGen/Mips/largeimmprinting.ll
+++ b/test/CodeGen/Mips/largeimmprinting.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32
-; RUN: llc -march=mips64el -mcpu=mips4 -mattr=n64 < %s | \
+; RUN: llc -march=mips64el -mcpu=mips4 -target-abi=n64 < %s | \
; RUN: FileCheck %s -check-prefix=64
-; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | \
+; RUN: llc -march=mips64el -mcpu=mips64 -target-abi=n64 < %s | \
; RUN: FileCheck %s -check-prefix=64
%struct.S1 = type { [65536 x i8] }
diff --git a/test/CodeGen/Mips/lcb2.ll b/test/CodeGen/Mips/lcb2.ll
index 715584b..59b96e6 100644
--- a/test/CodeGen/Mips/lcb2.ll
+++ b/test/CodeGen/Mips/lcb2.ll
@@ -120,14 +120,14 @@ attributes #1 = { nounwind }
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"clang version 3.5 (gitosis@dmz-portal.mips.com:clang.git ed197d08c90d82e1119774e10920e6f7a841c8ec) (gitosis@dmz-portal.mips.com:llvm.git b9235a363fa2dddb26ac01cbaed58efbc9eff392)"}
-!1 = metadata !{metadata !2, metadata !2, i64 0}
-!2 = metadata !{metadata !"int", metadata !3, i64 0}
-!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
-!4 = metadata !{metadata !"Simple C/C++ TBAA"}
-!5 = metadata !{i32 59}
-!6 = metadata !{i32 156}
-!7 = metadata !{i32 210}
-!8 = metadata !{i32 299}
-!9 = metadata !{i32 340}
-!10 = metadata !{i32 412}
+!0 = !{!"clang version 3.5 (gitosis@dmz-portal.mips.com:clang.git ed197d08c90d82e1119774e10920e6f7a841c8ec) (gitosis@dmz-portal.mips.com:llvm.git b9235a363fa2dddb26ac01cbaed58efbc9eff392)"}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{i32 59}
+!6 = !{i32 156}
+!7 = !{i32 210}
+!8 = !{i32 299}
+!9 = !{i32 340}
+!10 = !{i32 412}
diff --git a/test/CodeGen/Mips/lcb3c.ll b/test/CodeGen/Mips/lcb3c.ll
index 72a0b8c..eb83291 100644
--- a/test/CodeGen/Mips/lcb3c.ll
+++ b/test/CodeGen/Mips/lcb3c.ll
@@ -55,5 +55,5 @@ attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"=
attributes #1 = { nounwind }
-!1 = metadata !{i32 65}
-!2 = metadata !{i32 167}
+!1 = !{i32 65}
+!2 = !{i32 167}
diff --git a/test/CodeGen/Mips/lcb4a.ll b/test/CodeGen/Mips/lcb4a.ll
index e37feca..fbcadd2 100644
--- a/test/CodeGen/Mips/lcb4a.ll
+++ b/test/CodeGen/Mips/lcb4a.ll
@@ -59,11 +59,11 @@ attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointe
attributes #1 = { nounwind }
-!1 = metadata !{metadata !2, metadata !2, i64 0}
-!2 = metadata !{metadata !"int", metadata !3, i64 0}
-!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
-!4 = metadata !{metadata !"Simple C/C++ TBAA"}
-!5 = metadata !{i32 58}
-!6 = metadata !{i32 108}
-!7 = metadata !{i32 190}
-!8 = metadata !{i32 243}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{i32 58}
+!6 = !{i32 108}
+!7 = !{i32 190}
+!8 = !{i32 243}
diff --git a/test/CodeGen/Mips/lcb5.ll b/test/CodeGen/Mips/lcb5.ll
index 0a89c80..b2a8d1d 100644
--- a/test/CodeGen/Mips/lcb5.ll
+++ b/test/CodeGen/Mips/lcb5.ll
@@ -220,21 +220,21 @@ attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointe
attributes #1 = { nounwind }
-!1 = metadata !{metadata !2, metadata !2, i64 0}
-!2 = metadata !{metadata !"int", metadata !3, i64 0}
-!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
-!4 = metadata !{metadata !"Simple C/C++ TBAA"}
-!5 = metadata !{i32 57}
-!6 = metadata !{i32 107}
-!7 = metadata !{i32 188}
-!8 = metadata !{i32 241}
-!9 = metadata !{i32 338}
-!10 = metadata !{i32 391}
-!11 = metadata !{i32 477}
-!12 = metadata !{i32 533}
-!13 = metadata !{i32 621}
-!14 = metadata !{i32 663}
-!15 = metadata !{i32 747}
-!16 = metadata !{i32 792}
-!17 = metadata !{i32 867}
-!18 = metadata !{i32 953}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{i32 57}
+!6 = !{i32 107}
+!7 = !{i32 188}
+!8 = !{i32 241}
+!9 = !{i32 338}
+!10 = !{i32 391}
+!11 = !{i32 477}
+!12 = !{i32 533}
+!13 = !{i32 621}
+!14 = !{i32 663}
+!15 = !{i32 747}
+!16 = !{i32 792}
+!17 = !{i32 867}
+!18 = !{i32 953}
diff --git a/test/CodeGen/Mips/llvm-ir/add.ll b/test/CodeGen/Mips/llvm-ir/add.ll
new file mode 100644
index 0000000..6cccc7d
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/add.ll
@@ -0,0 +1,123 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
+
+define signext i1 @add_i1(i1 signext %a, i1 signext %b) {
+entry:
+; ALL-LABEL: add_i1:
+
+ ; ALL: addu $[[T0:[0-9]+]], $4, $5
+ ; ALL: sll $[[T0]], $[[T0]], 31
+ ; ALL: sra $2, $[[T0]], 31
+
+ %r = add i1 %a, %b
+ ret i1 %r
+}
+
+define signext i8 @add_i8(i8 signext %a, i8 signext %b) {
+entry:
+; ALL-LABEL: add_i8:
+
+ ; NOT-R2-R6: addu $[[T0:[0-9]+]], $4, $5
+ ; NOT-R2-R6: sll $[[T0]], $[[T0]], 24
+ ; NOT-R2-R6: sra $2, $[[T0]], 24
+
+ ; R2-R6: addu $[[T0:[0-9]+]], $4, $5
+ ; R2-R6: seb $2, $[[T0:[0-9]+]]
+
+ %r = add i8 %a, %b
+ ret i8 %r
+}
+
+define signext i16 @add_i16(i16 signext %a, i16 signext %b) {
+entry:
+; ALL-LABEL: add_i16:
+
+ ; NOT-R2-R6: addu $[[T0:[0-9]+]], $4, $5
+ ; NOT-R2-R6: sll $[[T0]], $[[T0]], 16
+ ; NOT-R2-R6: sra $2, $[[T0]], 16
+
+ ; R2-R6: addu $[[T0:[0-9]+]], $4, $5
+ ; R2-R6: seh $2, $[[T0:[0-9]+]]
+
+ %r = add i16 %a, %b
+ ret i16 %r
+}
+
+define signext i32 @add_i32(i32 signext %a, i32 signext %b) {
+entry:
+; ALL-LABEL: add_i32:
+
+ ; ALL: addu $2, $4, $5
+
+ %r = add i32 %a, %b
+ ret i32 %r
+}
+
+define signext i64 @add_i64(i64 signext %a, i64 signext %b) {
+entry:
+; ALL-LABEL: add_i64:
+
+ ; GP32: addu $3, $5, $7
+ ; GP32: sltu $[[T0:[0-9]+]], $3, $7
+ ; GP32: addu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP32: addu $2, $4, $[[T1]]
+
+ ; GP64: daddu $2, $4, $5
+
+ %r = add i64 %a, %b
+ ret i64 %r
+}
+
+define signext i128 @add_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: add_i128:
+
+ ; GP32: lw $[[T0:[0-9]+]], 28($sp)
+ ; GP32: addu $[[T1:[0-9]+]], $7, $[[T0]]
+ ; GP32: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; GP32: lw $[[T3:[0-9]+]], 24($sp)
+ ; GP32: addu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; GP32: addu $[[T5:[0-9]+]], $6, $[[T4]]
+ ; GP32: sltu $[[T6:[0-9]+]], $[[T5]], $[[T3]]
+ ; GP32: lw $[[T7:[0-9]+]], 20($sp)
+ ; GP32: addu $[[T8:[0-9]+]], $[[T6]], $[[T7]]
+ ; GP32: lw $[[T9:[0-9]+]], 16($sp)
+ ; GP32: addu $3, $5, $[[T8]]
+ ; GP32: sltu $[[T10:[0-9]+]], $3, $[[T7]]
+ ; GP32: addu $[[T11:[0-9]+]], $[[T10]], $[[T9]]
+ ; GP32: addu $2, $4, $[[T11]]
+ ; GP32: move $4, $[[T5]]
+ ; GP32: move $5, $[[T1]]
+
+ ; GP64: daddu $3, $5, $7
+ ; GP64: sltu $[[T0:[0-9]+]], $3, $7
+ ; GP64: daddu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP64: daddu $2, $4, $[[T1]]
+
+ %r = add i128 %a, %b
+ ret i128 %r
+}
diff --git a/test/CodeGen/Mips/llvm-ir/and.ll b/test/CodeGen/Mips/llvm-ir/and.ll
new file mode 100644
index 0000000..8ebcfe4
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/and.ll
@@ -0,0 +1,99 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+
+define signext i1 @and_i1(i1 signext %a, i1 signext %b) {
+entry:
+; ALL-LABEL: and_i1:
+
+ ; ALL: and $2, $4, $5
+
+ %r = and i1 %a, %b
+ ret i1 %r
+}
+
+define signext i8 @and_i8(i8 signext %a, i8 signext %b) {
+entry:
+; ALL-LABEL: and_i8:
+
+ ; ALL: and $2, $4, $5
+
+ %r = and i8 %a, %b
+ ret i8 %r
+}
+
+define signext i16 @and_i16(i16 signext %a, i16 signext %b) {
+entry:
+; ALL-LABEL: and_i16:
+
+ ; ALL: and $2, $4, $5
+
+ %r = and i16 %a, %b
+ ret i16 %r
+}
+
+define signext i32 @and_i32(i32 signext %a, i32 signext %b) {
+entry:
+; ALL-LABEL: and_i32:
+
+ ; ALL: and $2, $4, $5
+
+ %r = and i32 %a, %b
+ ret i32 %r
+}
+
+define signext i64 @and_i64(i64 signext %a, i64 signext %b) {
+entry:
+; ALL-LABEL: and_i64:
+
+ ; GP32: and $2, $4, $6
+ ; GP32: and $3, $5, $7
+
+ ; GP64: and $2, $4, $5
+
+ %r = and i64 %a, %b
+ ret i64 %r
+}
+
+define signext i128 @and_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: and_i128:
+
+ ; GP32: lw $[[T0:[0-9]+]], 24($sp)
+ ; GP32: lw $[[T1:[0-9]+]], 20($sp)
+ ; GP32: lw $[[T2:[0-9]+]], 16($sp)
+ ; GP32: and $2, $4, $[[T2]]
+ ; GP32: and $3, $5, $[[T1]]
+ ; GP32: and $4, $6, $[[T0]]
+ ; GP32: lw $[[T3:[0-9]+]], 28($sp)
+ ; GP32: and $5, $7, $[[T3]]
+
+ ; GP64: and $2, $4, $6
+ ; GP64: and $3, $5, $7
+
+ %r = and i128 %a, %b
+ ret i128 %r
+}
diff --git a/test/CodeGen/Mips/llvm-ir/ashr.ll b/test/CodeGen/Mips/llvm-ir/ashr.ll
new file mode 100644
index 0000000..7e1587c
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/ashr.ll
@@ -0,0 +1,200 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=M2
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R1-R5
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R1-R5
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R1-R5
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R1-R5
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R6
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=M3
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=64R6
+
+define signext i1 @ashr_i1(i1 signext %a, i1 signext %b) {
+entry:
+; ALL-LABEL: ashr_i1:
+
+ ; ALL: move $2, $4
+
+ %r = ashr i1 %a, %b
+ ret i1 %r
+}
+
+define signext i8 @ashr_i8(i8 signext %a, i8 signext %b) {
+entry:
+; ALL-LABEL: ashr_i8:
+
+ ; FIXME: The andi instruction is redundant.
+ ; ALL: andi $[[T0:[0-9]+]], $5, 255
+ ; ALL: srav $2, $4, $[[T0]]
+
+ %r = ashr i8 %a, %b
+ ret i8 %r
+}
+
+define signext i16 @ashr_i16(i16 signext %a, i16 signext %b) {
+entry:
+; ALL-LABEL: ashr_i16:
+
+ ; FIXME: The andi instruction is redundant.
+ ; ALL: andi $[[T0:[0-9]+]], $5, 65535
+ ; ALL: srav $2, $4, $[[T0]]
+
+ %r = ashr i16 %a, %b
+ ret i16 %r
+}
+
+define signext i32 @ashr_i32(i32 signext %a, i32 signext %b) {
+entry:
+; ALL-LABEL: ashr_i32:
+
+ ; ALL: srav $2, $4, $5
+
+ %r = ashr i32 %a, %b
+ ret i32 %r
+}
+
+define signext i64 @ashr_i64(i64 signext %a, i64 signext %b) {
+entry:
+; ALL-LABEL: ashr_i64:
+
+ ; M2: srav $[[T0:[0-9]+]], $4, $7
+ ; M2: andi $[[T1:[0-9]+]], $7, 32
+ ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]]
+ ; M2: move $3, $[[T0]]
+ ; M2: srlv $[[T2:[0-9]+]], $5, $7
+ ; M2: not $[[T3:[0-9]+]], $7
+ ; M2: sll $[[T4:[0-9]+]], $4, 1
+ ; M2: sllv $[[T5:[0-9]+]], $[[T4]], $[[T3]]
+ ; M2: or $3, $[[T3]], $[[T2]]
+ ; M2: $[[BB0]]:
+ ; M2: beqz $[[T1]], $[[BB1:BB[0-9_]+]]
+ ; M2: nop
+ ; M2: sra $2, $4, 31
+ ; M2: $[[BB1]]:
+ ; M2: jr $ra
+ ; M2: nop
+
+ ; 32R1-R5: srlv $[[T0:[0-9]+]], $5, $7
+ ; 32R1-R5: not $[[T1:[0-9]+]], $7
+ ; 32R1-R5: sll $[[T2:[0-9]+]], $4, 1
+ ; 32R1-R5: sllv $[[T3:[0-9]+]], $[[T2]], $[[T1]]
+ ; 32R1-R5: or $3, $[[T3]], $[[T0]]
+ ; 32R1-R5: srav $[[T4:[0-9]+]], $4, $7
+ ; 32R1-R5: andi $[[T5:[0-9]+]], $7, 32
+ ; 32R1-R5: movn $3, $[[T4]], $[[T5]]
+ ; 32R1-R5: sra $4, $4, 31
+ ; 32R1-R5: jr $ra
+ ; 32R1-R5: movn $2, $4, $[[T5]]
+
+ ; 32R6: srav $[[T0:[0-9]+]], $4, $7
+ ; 32R6: andi $[[T1:[0-9]+]], $7, 32
+ ; 32R6: seleqz $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+ ; 32R6: sra $[[T3:[0-9]+]], $4, 31
+ ; 32R6: selnez $[[T4:[0-9]+]], $[[T3]], $[[T1]]
+ ; 32R6: or $[[T5:[0-9]+]], $[[T4]], $[[T2]]
+ ; 32R6: srlv $[[T6:[0-9]+]], $5, $7
+ ; 32R6: not $[[T7:[0-9]+]], $7
+ ; 32R6: sll $[[T8:[0-9]+]], $4, 1
+ ; 32R6: sllv $[[T9:[0-9]+]], $[[T8]], $[[T7]]
+ ; 32R6: or $[[T10:[0-9]+]], $[[T9]], $[[T6]]
+ ; 32R6: seleqz $[[T11:[0-9]+]], $[[T10]], $[[T1]]
+ ; 32R6: selnez $[[T12:[0-9]+]], $[[T0]], $[[T1]]
+ ; 32R6: jr $ra
+ ; 32R6: or $3, $[[T0]], $[[T11]]
+
+ ; FIXME: The sll instruction below is redundant.
+ ; GP64: sll $[[T0:[0-9]+]], $5, 0
+ ; GP64: dsrav $2, $4, $[[T0]]
+
+ %r = ashr i64 %a, %b
+ ret i64 %r
+}
+
+define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: ashr_i128:
+
+ ; GP32: lw $25, %call16(__ashrti3)($gp)
+
+ ; M3: sll $[[T0:[0-9]+]], $7, 0
+ ; M3: dsrav $[[T1:[0-9]+]], $4, $[[T0]]
+ ; M3: andi $[[T2:[0-9]+]], $[[T0]], 32
+ ; M3: bnez $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
+ ; M3: move $3, $[[T1]]
+ ; M3: dsrlv $[[T4:[0-9]+]], $5, $[[T0]]
+ ; M3: dsll $[[T5:[0-9]+]], $4, 1
+ ; M3: not $[[T6:[0-9]+]], $[[T0]]
+ ; M3: dsllv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
+ ; M3: or $3, $[[T7]], $[[T4]]
+ ; M3: $[[BB0]]:
+ ; M3: beqz $[[T3]], $[[BB1:BB[0-9_]+]]
+ ; M3: nop
+ ; M3: dsra $2, $4, 31
+ ; M3: $[[BB1]]:
+ ; M3: jr $ra
+ ; M3: nop
+
+ ; GP64-NOT-R6: sll $[[T0:[0-9]+]], $7, 0
+ ; GP64-NOT-R6: dsrlv $[[T1:[0-9]+]], $5, $[[T0]]
+ ; GP64-NOT-R6: dsll $[[T2:[0-9]+]], $4, 1
+ ; GP64-NOT-R6: not $[[T3:[0-9]+]], $[[T0]]
+ ; GP64-NOT-R6: dsllv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; GP64-NOT-R6: or $3, $[[T4]], $[[T1]]
+ ; GP64-NOT-R6: dsrav $2, $4, $[[T0]]
+ ; GP64-NOT-R6: andi $[[T5:[0-9]+]], $[[T0]], 32
+
+ ; GP64-NOT-R6: movn $3, $2, $[[T5]]
+ ; GP64-NOT-R6: dsra $[[T6:[0-9]+]], $4, 31
+ ; GP64-NOT-R6: jr $ra
+ ; GP64-NOT-R6: movn $2, $[[T6]], $[[T5]]
+
+ ; 64R6: sll $[[T0:[0-9]+]], $7, 0
+ ; 64R6: dsrav $[[T1:[0-9]+]], $4, $[[T0]]
+ ; 64R6: andi $[[T2:[0-9]+]], $[[T0]], 32
+ ; 64R6: sll $[[T3:[0-9]+]], $[[T2]], 0
+ ; 64R6: seleqz $[[T4:[0-9]+]], $[[T1]], $[[T3]]
+ ; 64R6: dsra $[[T5:[0-9]+]], $4, 31
+ ; 64R6: selnez $[[T6:[0-9]+]], $[[T5]], $[[T3]]
+ ; 64R6: or $2, $[[T6]], $[[T4]]
+ ; 64R6: dsrlv $[[T7:[0-9]+]], $5, $[[T0]]
+ ; 64R6: dsll $[[T8:[0-9]+]], $4, 1
+ ; 64R6: not $[[T9:[0-9]+]], $[[T0]]
+ ; 64R6: dsllv $[[T10:[0-9]+]], $[[T8]], $[[T9]]
+ ; 64R6: or $[[T11:[0-9]+]], $[[T10]], $[[T7]]
+ ; 64R6: seleqz $[[T12:[0-9]+]], $[[T11]], $[[T3]]
+ ; 64R6: selnez $[[T13:[0-9]+]], $[[T1]], $[[T3]]
+ ; 64R6: jr $ra
+ ; 64R6: or $3, $[[T13]], $[[T12]]
+
+ %r = ashr i128 %a, %b
+ ret i128 %r
+}
diff --git a/test/CodeGen/Mips/llvm-ir/call.ll b/test/CodeGen/Mips/llvm-ir/call.ll
index 4cbf43c..112ab8e 100644
--- a/test/CodeGen/Mips/llvm-ir/call.ll
+++ b/test/CodeGen/Mips/llvm-ir/call.ll
@@ -3,10 +3,14 @@
; FIXME: We should remove the need for -enable-mips-tail-calls
; RUN: llc -march=mips -mcpu=mips32 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
; RUN: llc -march=mips -mcpu=mips32r2 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
+; RUN: llc -march=mips -mcpu=mips32r3 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
+; RUN: llc -march=mips -mcpu=mips32r5 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
; RUN: llc -march=mips -mcpu=mips32r6 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
; RUN: llc -march=mips64 -mcpu=mips4 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
; RUN: llc -march=mips64 -mcpu=mips64 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
; RUN: llc -march=mips64 -mcpu=mips64r2 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
+; RUN: llc -march=mips64 -mcpu=mips64r3 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
+; RUN: llc -march=mips64 -mcpu=mips64r5 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
; RUN: llc -march=mips64 -mcpu=mips64r6 -enable-mips-tail-calls < %s | FileCheck %s -check-prefix=ALL -check-prefix=N64
declare void @extern_void_void()
diff --git a/test/CodeGen/Mips/llvm-ir/indirectbr.ll b/test/CodeGen/Mips/llvm-ir/indirectbr.ll
index d8fd787..debfeb3 100644
--- a/test/CodeGen/Mips/llvm-ir/indirectbr.ll
+++ b/test/CodeGen/Mips/llvm-ir/indirectbr.ll
@@ -2,10 +2,14 @@
; RUN: llc -march=mips -mcpu=mips32 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
; RUN: llc -march=mips -mcpu=mips32r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips -mcpu=mips32r3 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips -mcpu=mips32r5 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
; RUN: llc -march=mips -mcpu=mips32r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=R6
; RUN: llc -march=mips64 -mcpu=mips4 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
; RUN: llc -march=mips64 -mcpu=mips64 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
; RUN: llc -march=mips64 -mcpu=mips64r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64r3 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64r5 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOT-R6
; RUN: llc -march=mips64 -mcpu=mips64r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=R6
define i32 @br(i8 *%addr) {
diff --git a/test/CodeGen/Mips/llvm-ir/lshr.ll b/test/CodeGen/Mips/llvm-ir/lshr.ll
new file mode 100644
index 0000000..7344d95
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/lshr.ll
@@ -0,0 +1,188 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=M2
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R1-R5
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R1-R5
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R1-R5
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R1-R5
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R6
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=M3
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=64R6
+
+define signext i1 @lshr_i1(i1 signext %a, i1 signext %b) {
+entry:
+; ALL-LABEL: lshr_i1:
+
+ ; ALL: move $2, $4
+
+ %r = lshr i1 %a, %b
+ ret i1 %r
+}
+
+define zeroext i8 @lshr_i8(i8 zeroext %a, i8 zeroext %b) {
+entry:
+; ALL-LABEL: lshr_i8:
+
+ ; ALL: srlv $[[T0:[0-9]+]], $4, $5
+ ; ALL: andi $2, $[[T0]], 255
+
+ %r = lshr i8 %a, %b
+ ret i8 %r
+}
+
+define zeroext i16 @lshr_i16(i16 zeroext %a, i16 zeroext %b) {
+entry:
+; ALL-LABEL: lshr_i16:
+
+ ; ALL: srlv $[[T0:[0-9]+]], $4, $5
+ ; ALL: andi $2, $[[T0]], 65535
+
+ %r = lshr i16 %a, %b
+ ret i16 %r
+}
+
+define signext i32 @lshr_i32(i32 signext %a, i32 signext %b) {
+entry:
+; ALL-LABEL: lshr_i32:
+
+ ; ALL: srlv $2, $4, $5
+
+ %r = lshr i32 %a, %b
+ ret i32 %r
+}
+
+define signext i64 @lshr_i64(i64 signext %a, i64 signext %b) {
+entry:
+; ALL-LABEL: lshr_i64:
+
+ ; M2: srlv $[[T0:[0-9]+]], $4, $7
+ ; M2: andi $[[T1:[0-9]+]], $7, 32
+ ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]]
+ ; M2: move $3, $[[T0]]
+ ; M2: srlv $[[T2:[0-9]+]], $5, $7
+ ; M2: not $[[T3:[0-9]+]], $7
+ ; M2: sll $[[T4:[0-9]+]], $4, 1
+ ; M2: sllv $[[T5:[0-9]+]], $[[T4]], $[[T3]]
+ ; M2: or $3, $[[T3]], $[[T2]]
+ ; M2: $[[BB0]]:
+ ; M2: bnez $[[T1]], $[[BB1:BB[0-9_]+]]
+ ; M2: addiu $2, $zero, 0
+ ; M2: move $2, $[[T0]]
+ ; M2: $[[BB1]]:
+ ; M2: jr $ra
+ ; M2: nop
+
+ ; 32R1-R5: srlv $[[T0:[0-9]+]], $5, $7
+ ; 32R1-R5: not $[[T1:[0-9]+]], $7
+ ; 32R1-R5: sll $[[T2:[0-9]+]], $4, 1
+ ; 32R1-R5: sllv $[[T3:[0-9]+]], $[[T2]], $[[T1]]
+ ; 32R1-R5: or $3, $[[T3]], $[[T0]]
+ ; 32R1-R5: srlv $[[T4:[0-9]+]], $4, $7
+ ; 32R1-R5: andi $[[T5:[0-9]+]], $7, 32
+ ; 32R1-R5: movn $3, $[[T4]], $[[T5]]
+ ; 32R1-R5: jr $ra
+ ; 32R1-R5: movn $2, $zero, $[[T5]]
+
+ ; 32R6: srlv $[[T0:[0-9]+]], $5, $7
+ ; 32R6: not $[[T1:[0-9]+]], $7
+ ; 32R6: sll $[[T2:[0-9]+]], $4, 1
+ ; 32R6: sllv $[[T3:[0-9]+]], $[[T2]], $[[T1]]
+ ; 32R6: or $[[T4:[0-9]+]], $[[T3]], $[[T0]]
+ ; 32R6: andi $[[T5:[0-9]+]], $7, 32
+ ; 32R6: seleqz $[[T6:[0-9]+]], $[[T4]], $[[T3]]
+ ; 32R6: srlv $[[T7:[0-9]+]], $4, $7
+ ; 32R6: selnez $[[T8:[0-9]+]], $[[T7]], $[[T5]]
+ ; 32R6: or $3, $[[T8]], $[[T6]]
+ ; 32R6: jr $ra
+ ; 32R6: seleqz $2, $[[T7]], $[[T5]]
+
+ ; GP64: sll $[[T0:[0-9]+]], $5, 0
+ ; GP64: dsrlv $2, $4, $[[T0]]
+
+ %r = lshr i64 %a, %b
+ ret i64 %r
+}
+
+define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: lshr_i128:
+
+ ; GP32: lw $25, %call16(__lshrti3)($gp)
+
+ ; M3: sll $[[T0:[0-9]+]], $7, 0
+ ; M3: dsrlv $[[T1:[0-9]+]], $4, $[[T0]]
+ ; M3: andi $[[T2:[0-9]+]], $[[T0]], 32
+ ; M3: bnez $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
+ ; M3: move $3, $[[T1]]
+ ; M3: dsrlv $[[T4:[0-9]+]], $5, $[[T0]]
+ ; M3: dsll $[[T5:[0-9]+]], $4, 1
+ ; M3: not $[[T6:[0-9]+]], $[[T0]]
+ ; M3: dsllv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
+ ; M3: or $3, $[[T7]], $[[T4]]
+ ; M3: $[[BB0]]:
+ ; M3: bnez $[[T3]], $[[BB1:BB[0-9_]+]]
+ ; M3: daddiu $2, $zero, 0
+ ; M3: move $2, $[[T1]]
+ ; M3: $[[BB1]]:
+ ; M3: jr $ra
+ ; M3: nop
+
+ ; GP64-NOT-R6: sll $[[T0:[0-9]+]], $7, 0
+ ; GP64-NOT-R6: dsrlv $[[T1:[0-9]+]], $5, $[[T0]]
+ ; GP64-NOT-R6: dsll $[[T2:[0-9]+]], $4, 1
+ ; GP64-NOT-R6: not $[[T3:[0-9]+]], $[[T0]]
+ ; GP64-NOT-R6: dsllv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; GP64-NOT-R6: or $3, $[[T4]], $[[T1]]
+ ; GP64-NOT-R6: dsrlv $2, $4, $[[T0]]
+ ; GP64-NOT-R6: andi $[[T5:[0-9]+]], $[[T0]], 32
+ ; GP64-NOT-R6: movn $3, $2, $[[T5]]
+ ; GP64-NOT-R6: jr $ra
+ ; GP64-NOT-R6: movn $2, $zero, $1
+
+ ; 64R6: sll $[[T0:[0-9]+]], $7, 0
+ ; 64R6: dsrlv $[[T1:[0-9]+]], $5, $[[T0]]
+ ; 64R6: dsll $[[T2:[0-9]+]], $4, 1
+ ; 64R6: not $[[T3:[0-9]+]], $[[T0]]
+ ; 64R6: dsllv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; 64R6: or $[[T5:[0-9]+]], $[[T4]], $[[T1]]
+ ; 64R6: andi $[[T6:[0-9]+]], $[[T0]], 32
+ ; 64R6: sll $[[T7:[0-9]+]], $[[T6]], 0
+ ; 64R6: seleqz $[[T8:[0-9]+]], $[[T5]], $[[T7]]
+ ; 64R6: dsrlv $[[T9:[0-9]+]], $4, $[[T0]]
+ ; 64R6: selnez $[[T10:[0-9]+]], $[[T9]], $[[T7]]
+ ; 64R6: or $3, $[[T10]], $[[T8]]
+ ; 64R6: jr $ra
+ ; 64R6: seleqz $2, $[[T0]], $[[T7]]
+
+ %r = lshr i128 %a, %b
+ ret i128 %r
+}
diff --git a/test/CodeGen/Mips/llvm-ir/mul.ll b/test/CodeGen/Mips/llvm-ir/mul.ll
index 1674124..a758280 100644
--- a/test/CodeGen/Mips/llvm-ir/mul.ll
+++ b/test/CodeGen/Mips/llvm-ir/mul.ll
@@ -1,19 +1,27 @@
-; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=M2
-; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=32R1-R2 -check-prefix=32R1
-; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=32R1-R2 -check-prefix=32R2
-; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=32R6
-; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=M4
-; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=64R1-R2
-; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=64R1-R2
-; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=64R6
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=M2 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=32R1-R5 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=32R1-R5 -check-prefix=32R2-R5 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=32R1-R5 -check-prefix=32R2-R5 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=32R1-R5 -check-prefix=32R2-R5 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=32R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=M4 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=64R1-R5 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=64R1-R5 -check-prefix=GP64 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=64R1-R5 -check-prefix=GP64 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=64R1-R5 -check-prefix=GP64 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=64R6
define signext i1 @mul_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -24,9 +32,9 @@ entry:
; M2: sll $[[T0]], $[[T0]], 31
; M2: sra $2, $[[T0]], 31
- ; 32R1-R2: mul $[[T0:[0-9]+]], $4, $5
- ; 32R1-R2: sll $[[T0]], $[[T0]], 31
- ; 32R1-R2: sra $2, $[[T0]], 31
+ ; 32R1-R5: mul $[[T0:[0-9]+]], $4, $5
+ ; 32R1-R5: sll $[[T0]], $[[T0]], 31
+ ; 32R1-R5: sra $2, $[[T0]], 31
; 32R6: mul $[[T0:[0-9]+]], $4, $5
; 32R6: sll $[[T0]], $[[T0]], 31
@@ -37,9 +45,9 @@ entry:
; M4: sll $[[T0]], $[[T0]], 31
; M4: sra $2, $[[T0]], 31
- ; 64R1-R2: mul $[[T0:[0-9]+]], $4, $5
- ; 64R1-R2: sll $[[T0]], $[[T0]], 31
- ; 64R1-R2: sra $2, $[[T0]], 31
+ ; 64R1-R5: mul $[[T0:[0-9]+]], $4, $5
+ ; 64R1-R5: sll $[[T0]], $[[T0]], 31
+ ; 64R1-R5: sra $2, $[[T0]], 31
; 64R6: mul $[[T0:[0-9]+]], $4, $5
; 64R6: sll $[[T0]], $[[T0]], 31
@@ -62,8 +70,8 @@ entry:
; 32R1: sll $[[T0]], $[[T0]], 24
; 32R1: sra $2, $[[T0]], 24
- ; 32R2: mul $[[T0:[0-9]+]], $4, $5
- ; 32R2: seb $2, $[[T0]]
+ ; 32R2-R5: mul $[[T0:[0-9]+]], $4, $5
+ ; 32R2-R5: seb $2, $[[T0]]
; 32R6: mul $[[T0:[0-9]+]], $4, $5
; 32R6: seb $2, $[[T0]]
@@ -99,8 +107,8 @@ entry:
; 32R1: sll $[[T0]], $[[T0]], 16
; 32R1: sra $2, $[[T0]], 16
- ; 32R2: mul $[[T0:[0-9]+]], $4, $5
- ; 32R2: seh $2, $[[T0]]
+ ; 32R2-R5: mul $[[T0:[0-9]+]], $4, $5
+ ; 32R2-R5: seh $2, $[[T0]]
; 32R6: mul $[[T0:[0-9]+]], $4, $5
; 32R6: seh $2, $[[T0]]
@@ -130,10 +138,10 @@ entry:
; M2: mult $4, $5
; M2: mflo $2
- ; 32R1-R2: mul $2, $4, $5
+ ; 32R1-R5: mul $2, $4, $5
; 32R6: mul $2, $4, $5
- ; 64R1-R2: mul $2, $4, $5
+ ; 64R1-R5: mul $2, $4, $5
; 64R6: mul $2, $4, $5
%r = mul i32 %a, %b
ret i32 %r
@@ -153,13 +161,13 @@ entry:
; M2: addu $[[T2:[0-9]+]], $4, $[[T1]]
; M2: addu $2, $[[T2]], $[[T0]]
- ; 32R1-R2: multu $5, $7
- ; 32R1-R2: mflo $3
- ; 32R1-R2: mfhi $[[T0:[0-9]+]]
- ; 32R1-R2: mul $[[T1:[0-9]+]], $4, $7
- ; 32R1-R2: mul $[[T2:[0-9]+]], $5, $6
- ; 32R1-R2: addu $[[T0]], $[[T0]], $[[T2:[0-9]+]]
- ; 32R1-R2: addu $2, $[[T0]], $[[T1]]
+ ; 32R1-R5: multu $5, $7
+ ; 32R1-R5: mflo $3
+ ; 32R1-R5: mfhi $[[T0:[0-9]+]]
+ ; 32R1-R5: mul $[[T1:[0-9]+]], $4, $7
+ ; 32R1-R5: mul $[[T2:[0-9]+]], $5, $6
+ ; 32R1-R5: addu $[[T0]], $[[T0]], $[[T2:[0-9]+]]
+ ; 32R1-R5: addu $2, $[[T0]], $[[T1]]
; 32R6: mul $[[T0:[0-9]+]], $5, $6
; 32R6: muhu $[[T1:[0-9]+]], $5, $7
@@ -171,11 +179,38 @@ entry:
; M4: dmult $4, $5
; M4: mflo $2
- ; 64R1-R2: dmult $4, $5
- ; 64R1-R2: mflo $2
+ ; 64R1-R5: dmult $4, $5
+ ; 64R1-R5: mflo $2
; 64R6: dmul $2, $4, $5
%r = mul i64 %a, %b
ret i64 %r
}
+
+define signext i128 @mul_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: mul_i128:
+
+ ; GP32: lw $25, %call16(__multi3)($gp)
+
+ ; GP64-NOT-R6: dmult $4, $7
+ ; GP64-NOT-R6: mflo $[[T0:[0-9]+]]
+ ; GP64-NOT-R6: dmult $5, $6
+ ; GP64-NOT-R6: mflo $[[T1:[0-9]+]]
+ ; GP64-NOT-R6: dmultu $5, $7
+ ; GP64-NOT-R6: mflo $3
+ ; GP64-NOT-R6: mfhi $[[T2:[0-9]+]]
+ ; GP64-NOT-R6: daddu $[[T3:[0-9]+]], $[[T2]], $[[T1]]
+ ; GP64-NOT-R6: daddu $2, $[[T3:[0-9]+]], $[[T0]]
+
+ ; 64R6: dmul $[[T0:[0-9]+]], $5, $6
+ ; 64R6: dmuhu $[[T1:[0-9]+]], $5, $7
+ ; 64R6: daddu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; 64R6: dmul $[[T3:[0-9]+]], $4, $7
+ ; 64R6: daddu $2, $[[T2]], $[[T3]]
+ ; 64R6: dmul $3, $5, $7
+
+ %r = mul i128 %a, %b
+ ret i128 %r
+}
diff --git a/test/CodeGen/Mips/llvm-ir/or.ll b/test/CodeGen/Mips/llvm-ir/or.ll
new file mode 100644
index 0000000..6215e40
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/or.ll
@@ -0,0 +1,99 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+
+define signext i1 @or_i1(i1 signext %a, i1 signext %b) {
+entry:
+; ALL-LABEL: or_i1:
+
+ ; ALL: or $2, $4, $5
+
+ %r = or i1 %a, %b
+ ret i1 %r
+}
+
+define signext i8 @or_i8(i8 signext %a, i8 signext %b) {
+entry:
+; ALL-LABEL: or_i8:
+
+ ; ALL: or $2, $4, $5
+
+ %r = or i8 %a, %b
+ ret i8 %r
+}
+
+define signext i16 @or_i16(i16 signext %a, i16 signext %b) {
+entry:
+; ALL-LABEL: or_i16:
+
+ ; ALL: or $2, $4, $5
+
+ %r = or i16 %a, %b
+ ret i16 %r
+}
+
+define signext i32 @or_i32(i32 signext %a, i32 signext %b) {
+entry:
+; ALL-LABEL: or_i32:
+
+ ; ALL: or $2, $4, $5
+
+ %r = or i32 %a, %b
+ ret i32 %r
+}
+
+define signext i64 @or_i64(i64 signext %a, i64 signext %b) {
+entry:
+; ALL-LABEL: or_i64:
+
+ ; GP32: or $2, $4, $6
+ ; GP32: or $3, $5, $7
+
+ ; GP64: or $2, $4, $5
+
+ %r = or i64 %a, %b
+ ret i64 %r
+}
+
+define signext i128 @or_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: or_i128:
+
+ ; GP32: lw $[[T0:[0-9]+]], 24($sp)
+ ; GP32: lw $[[T1:[0-9]+]], 20($sp)
+ ; GP32: lw $[[T2:[0-9]+]], 16($sp)
+ ; GP32: or $2, $4, $[[T2]]
+ ; GP32: or $3, $5, $[[T1]]
+ ; GP32: or $4, $6, $[[T0]]
+ ; GP32: lw $[[T3:[0-9]+]], 28($sp)
+ ; GP32: or $5, $7, $[[T3]]
+
+ ; GP64: or $2, $4, $6
+ ; GP64: or $3, $5, $7
+
+ %r = or i128 %a, %b
+ ret i128 %r
+}
diff --git a/test/CodeGen/Mips/llvm-ir/ret.ll b/test/CodeGen/Mips/llvm-ir/ret.ll
index 8f5b115..0561c24 100644
--- a/test/CodeGen/Mips/llvm-ir/ret.ll
+++ b/test/CodeGen/Mips/llvm-ir/ret.ll
@@ -9,10 +9,14 @@
; RUN: llc -march=mips -mcpu=mips32 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=NO-MTHC1 -check-prefix=NOT-R6
; RUN: llc -march=mips -mcpu=mips32r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=MTHC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips -mcpu=mips32r3 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=MTHC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips -mcpu=mips32r5 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=MTHC1 -check-prefix=NOT-R6
; RUN: llc -march=mips -mcpu=mips32r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=MTHC1 -check-prefix=R6
; RUN: llc -march=mips64 -mcpu=mips4 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6
; RUN: llc -march=mips64 -mcpu=mips64 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6
; RUN: llc -march=mips64 -mcpu=mips64r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64r3 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6
+; RUN: llc -march=mips64 -mcpu=mips64r5 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6
; RUN: llc -march=mips64 -mcpu=mips64r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=R6
define void @ret_void() {
diff --git a/test/CodeGen/Mips/llvm-ir/sdiv.ll b/test/CodeGen/Mips/llvm-ir/sdiv.ll
new file mode 100644
index 0000000..929ee88
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/sdiv.ll
@@ -0,0 +1,144 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=R2-R5 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=R2-R5 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=R2-R5 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: -check-prefix=R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=R2-R5 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=R2-R5 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=R2-R5 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: -check-prefix=R6 -check-prefix=64R6
+
+define signext i1 @sdiv_i1(i1 signext %a, i1 signext %b) {
+entry:
+; ALL-LABEL: sdiv_i1:
+
+ ; NOT-R6: div $zero, $4, $5
+ ; NOT-R6: teq $5, $zero, 7
+ ; NOT-R6: mflo $[[T0:[0-9]+]]
+ ; FIXME: The sll/sra instructions are redundant since div is signed.
+ ; NOT-R6: sll $[[T1:[0-9]+]], $[[T0]], 31
+ ; NOT-R6: sra $2, $[[T1]], 31
+
+ ; R6: div $[[T0:[0-9]+]], $4, $5
+ ; R6: teq $5, $zero, 7
+ ; FIXME: The sll/sra instructions are redundant since div is signed.
+ ; R6: sll $[[T1:[0-9]+]], $[[T0]], 31
+ ; R6: sra $2, $[[T1]], 31
+
+ %r = sdiv i1 %a, %b
+ ret i1 %r
+}
+
+define signext i8 @sdiv_i8(i8 signext %a, i8 signext %b) {
+entry:
+; ALL-LABEL: sdiv_i8:
+
+ ; NOT-R2-R6: div $zero, $4, $5
+ ; NOT-R2-R6: teq $5, $zero, 7
+ ; NOT-R2-R6: mflo $[[T0:[0-9]+]]
+ ; FIXME: The sll/sra instructions are redundant since div is signed.
+ ; NOT-R2-R6: sll $[[T1:[0-9]+]], $[[T0]], 24
+ ; NOT-R2-R6: sra $2, $[[T1]], 24
+
+ ; R2-R5: div $zero, $4, $5
+ ; R2-R5: teq $5, $zero, 7
+ ; R2-R5: mflo $[[T0:[0-9]+]]
+ ; FIXME: This instruction is redundant.
+ ; R2-R5: seb $2, $[[T0]]
+
+ ; R6: div $[[T0:[0-9]+]], $4, $5
+ ; R6: teq $5, $zero, 7
+ ; FIXME: This instruction is redundant.
+ ; R6: seb $2, $[[T0]]
+
+ %r = sdiv i8 %a, %b
+ ret i8 %r
+}
+
+define signext i16 @sdiv_i16(i16 signext %a, i16 signext %b) {
+entry:
+; ALL-LABEL: sdiv_i16:
+
+ ; NOT-R2-R6: div $zero, $4, $5
+ ; NOT-R2-R6: teq $5, $zero, 7
+ ; NOT-R2-R6: mflo $[[T0:[0-9]+]]
+ ; FIXME: The sll/sra instructions are redundant since div is signed.
+ ; NOT-R2-R6: sll $[[T1:[0-9]+]], $[[T0]], 16
+ ; NOT-R2-R6: sra $2, $[[T1]], 16
+
+ ; R2-R5: div $zero, $4, $5
+ ; R2-R5: teq $5, $zero, 7
+ ; R2-R5: mflo $[[T0:[0-9]+]]
+ ; FIXME: This is instruction is redundant since div is signed.
+ ; R2-R5: seh $2, $[[T0]]
+
+ ; R6: div $[[T0:[0-9]+]], $4, $5
+ ; R6: teq $5, $zero, 7
+ ; FIXME: This is instruction is redundant since div is signed.
+ ; R6: seh $2, $[[T0]]
+
+ %r = sdiv i16 %a, %b
+ ret i16 %r
+}
+
+define signext i32 @sdiv_i32(i32 signext %a, i32 signext %b) {
+entry:
+; ALL-LABEL: sdiv_i32:
+
+ ; NOT-R6: div $zero, $4, $5
+ ; NOT-R6: teq $5, $zero, 7
+ ; NOT-R6: mflo $2
+
+ ; R6: div $2, $4, $5
+ ; R6: teq $5, $zero, 7
+
+ %r = sdiv i32 %a, %b
+ ret i32 %r
+}
+
+define signext i64 @sdiv_i64(i64 signext %a, i64 signext %b) {
+entry:
+; ALL-LABEL: sdiv_i64:
+
+ ; GP32: lw $25, %call16(__divdi3)($gp)
+
+ ; GP64-NOT-R6: ddiv $zero, $4, $5
+ ; GP64-NOT-R6: teq $5, $zero, 7
+ ; GP64-NOT-R6: mflo $2
+
+ ; 64R6: ddiv $2, $4, $5
+ ; 64R6: teq $5, $zero, 7
+
+ %r = sdiv i64 %a, %b
+ ret i64 %r
+}
+
+define signext i128 @sdiv_i128(i128 signext %a, i128 signext %b) {
+entry:
+ ; ALL-LABEL: sdiv_i128:
+
+ ; GP32: lw $25, %call16(__divti3)($gp)
+
+ ; GP64-NOT-R6: ld $25, %call16(__divti3)($gp)
+ ; 64R6: ld $25, %call16(__divti3)($gp)
+
+ %r = sdiv i128 %a, %b
+ ret i128 %r
+}
diff --git a/test/CodeGen/Mips/llvm-ir/select.ll b/test/CodeGen/Mips/llvm-ir/select.ll
new file mode 100644
index 0000000..f17670a
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/select.ll
@@ -0,0 +1,712 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=M2 -check-prefix=M2-M3
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=CMOV \
+; RUN: -check-prefix=CMOV-32 -check-prefix=CMOV-32R1
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=CMOV \
+; RUN: -check-prefix=CMOV-32 -check-prefix=CMOV-32R2-R5
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=CMOV \
+; RUN: -check-prefix=CMOV-32 -check-prefix=CMOV-32R2-R5
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=CMOV \
+; RUN: -check-prefix=CMOV-32 -check-prefix=CMOV-32R2-R5
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=SEL -check-prefix=SEL-32
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=M3 -check-prefix=M2-M3
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=CMOV -check-prefix=CMOV-64
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=CMOV -check-prefix=CMOV-64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=CMOV -check-prefix=CMOV-64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=CMOV -check-prefix=CMOV-64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=CMOV -check-prefix=CMOV-64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=SEL -check-prefix=SEL-64
+
+define signext i1 @tst_select_i1_i1(i1 signext %s,
+ i1 signext %x, i1 signext %y) {
+entry:
+ ; ALL-LABEL: tst_select_i1_i1:
+
+ ; M2-M3: andi $[[T0:[0-9]+]], $4, 1
+ ; M2-M3: bnez $[[T0]], $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2-M3: move $5, $6
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: move $2, $5
+
+ ; CMOV: andi $[[T0:[0-9]+]], $4, 1
+ ; CMOV: movn $6, $5, $[[T0]]
+ ; CMOV: move $2, $6
+
+ ; SEL: andi $[[T0:[0-9]+]], $4, 1
+ ; SEL: seleqz $[[T1:[0-9]+]], $6, $[[T0]]
+ ; SEL: selnez $[[T2:[0-9]+]], $5, $[[T0]]
+ ; SEL: or $2, $[[T2]], $[[T1]]
+ %r = select i1 %s, i1 %x, i1 %y
+ ret i1 %r
+}
+
+define signext i8 @tst_select_i1_i8(i1 signext %s,
+ i8 signext %x, i8 signext %y) {
+entry:
+ ; ALL-LABEL: tst_select_i1_i8:
+
+ ; M2-M3: andi $[[T0:[0-9]+]], $4, 1
+ ; M2-M3: bnez $[[T0]], $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2-M3: move $5, $6
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: move $2, $5
+
+ ; CMOV: andi $[[T0:[0-9]+]], $4, 1
+ ; CMOV: movn $6, $5, $[[T0]]
+ ; CMOV: move $2, $6
+
+ ; SEL: andi $[[T0:[0-9]+]], $4, 1
+ ; SEL: seleqz $[[T1:[0-9]+]], $6, $[[T0]]
+ ; SEL: selnez $[[T2:[0-9]+]], $5, $[[T0]]
+ ; SEL: or $2, $[[T2]], $[[T1]]
+ %r = select i1 %s, i8 %x, i8 %y
+ ret i8 %r
+}
+
+define signext i32 @tst_select_i1_i32(i1 signext %s,
+ i32 signext %x, i32 signext %y) {
+entry:
+ ; ALL-LABEL: tst_select_i1_i32:
+
+ ; M2-M3: andi $[[T0:[0-9]+]], $4, 1
+ ; M2-M3: bnez $[[T0]], $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2-M3: move $5, $6
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: move $2, $5
+
+ ; CMOV: andi $[[T0:[0-9]+]], $4, 1
+ ; CMOV: movn $6, $5, $[[T0]]
+ ; CMOV: move $2, $6
+
+ ; SEL: andi $[[T0:[0-9]+]], $4, 1
+ ; SEL: seleqz $[[T1:[0-9]+]], $6, $[[T0]]
+ ; SEL: selnez $[[T2:[0-9]+]], $5, $[[T0]]
+ ; SEL: or $2, $[[T2]], $[[T1]]
+ %r = select i1 %s, i32 %x, i32 %y
+ ret i32 %r
+}
+
+define signext i64 @tst_select_i1_i64(i1 signext %s,
+ i64 signext %x, i64 signext %y) {
+entry:
+ ; ALL-LABEL: tst_select_i1_i64:
+
+ ; M2: andi $[[T0:[0-9]+]], $4, 1
+ ; M2: bnez $[[T0]], $[[BB0:BB[0-9_]+]]
+ ; M2: nop
+ ; M2: lw $[[T1:[0-9]+]], 16($sp)
+ ; M2: $[[BB0]]:
+ ; FIXME: This branch is redundant
+ ; M2: bnez $[[T0]], $[[BB1:BB[0-9_]+]]
+ ; M2: nop
+ ; M2: lw $[[T2:[0-9]+]], 20($sp)
+ ; M2: $[[BB1]]:
+ ; M2: move $2, $[[T1]]
+ ; M2: jr $ra
+ ; M2: move $3, $[[T2]]
+
+ ; CMOV-32: andi $[[T0:[0-9]+]], $4, 1
+ ; CMOV-32: lw $2, 16($sp)
+ ; CMOV-32: movn $2, $6, $[[T0]]
+ ; CMOV-32: lw $3, 20($sp)
+ ; CMOV-32: movn $3, $7, $[[T0]]
+
+ ; SEL-32: andi $[[T0:[0-9]+]], $4, 1
+ ; SEL-32: selnez $[[T1:[0-9]+]], $6, $[[T0]]
+ ; SEL-32: lw $[[T2:[0-9]+]], 16($sp)
+ ; SEL-32: seleqz $[[T3:[0-9]+]], $[[T2]], $[[T0]]
+ ; SEL-32: or $2, $[[T1]], $[[T3]]
+ ; SEL-32: selnez $[[T4:[0-9]+]], $7, $[[T0]]
+ ; SEL-32: lw $[[T5:[0-9]+]], 20($sp)
+ ; SEL-32: seleqz $[[T6:[0-9]+]], $[[T5]], $[[T0]]
+ ; SEL-32: or $3, $[[T4]], $[[T6]]
+
+ ; M3: andi $[[T0:[0-9]+]], $4, 1
+ ; M3: bnez $[[T0]], $[[BB0:BB[0-9_]+]]
+ ; M3: nop
+ ; M3: move $5, $6
+ ; M3: $[[BB0]]:
+ ; M3: jr $ra
+ ; M3: move $2, $5
+
+ ; CMOV-64: andi $[[T0:[0-9]+]], $4, 1
+ ; CMOV-64: movn $6, $5, $[[T0]]
+ ; CMOV-64: move $2, $6
+
+ ; SEL-64: andi $[[T0:[0-9]+]], $4, 1
+ ; FIXME: This shift is redundant
+ ; SEL-64: sll $[[T0]], $[[T0]], 0
+ ; SEL-64: seleqz $[[T1:[0-9]+]], $6, $[[T0]]
+ ; SEL-64: selnez $[[T0]], $5, $[[T0]]
+ ; SEL-64: or $2, $[[T0]], $[[T1]]
+ %r = select i1 %s, i64 %x, i64 %y
+ ret i64 %r
+}
+
+define float @tst_select_i1_float(i1 signext %s, float %x, float %y) {
+entry:
+ ; ALL-LABEL: tst_select_i1_float:
+
+ ; M2-M3: andi $[[T0:[0-9]+]], $4, 1
+ ; M2-M3: bnez $[[T0]], $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: jr $ra
+ ; M2: mtc1 $6, $f0
+ ; M3: mov.s $f13, $f14
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2: mtc1 $5, $f0
+ ; M3: mov.s $f0, $f13
+
+ ; CMOV-32: mtc1 $6, $f0
+ ; CMOV-32: mtc1 $5, $f1
+ ; CMOV-32: andi $[[T0:[0-9]+]], $4, 1
+ ; CMOV-32: movn.s $f0, $f1, $[[T0]]
+
+ ; SEL-32: mtc1 $5, $[[F0:f[0-9]+]]
+ ; SEL-32: mtc1 $6, $[[F1:f[0-9]+]]
+ ; SEL-32: mtc1 $4, $f0
+ ; SEL-32: sel.s $f0, $[[F1]], $[[F0]]
+
+ ; CMOV-64: andi $[[T0:[0-9]+]], $4, 1
+ ; CMOV-64: movn.s $f14, $f13, $[[T0]]
+ ; CMOV-64: mov.s $f0, $f14
+
+ ; SEL-64: mtc1 $4, $f0
+ ; SEL-64: sel.s $f0, $f14, $f13
+ %r = select i1 %s, float %x, float %y
+ ret float %r
+}
+
+define float @tst_select_i1_float_reordered(float %x, float %y,
+ i1 signext %s) {
+entry:
+ ; ALL-LABEL: tst_select_i1_float_reordered:
+
+ ; M2-M3: andi $[[T0:[0-9]+]], $6, 1
+ ; M2-M3: bnez $[[T0]], $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: mov.s $f12, $f14
+ ; M3: mov.s $f12, $f13
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: mov.s $f0, $f12
+
+ ; CMOV-32: andi $[[T0:[0-9]+]], $6, 1
+ ; CMOV-32: movn.s $f14, $f12, $[[T0]]
+ ; CMOV-32: mov.s $f0, $f14
+
+ ; SEL-32: mtc1 $6, $f0
+ ; SEL-32: sel.s $f0, $f14, $f12
+
+ ; CMOV-64: andi $[[T0:[0-9]+]], $6, 1
+ ; CMOV-64: movn.s $f13, $f12, $[[T0]]
+ ; CMOV-64: mov.s $f0, $f13
+
+ ; SEL-64: mtc1 $6, $f0
+ ; SEL-64: sel.s $f0, $f13, $f12
+ %r = select i1 %s, float %x, float %y
+ ret float %r
+}
+
+define double @tst_select_i1_double(i1 signext %s, double %x, double %y) {
+entry:
+ ; ALL-LABEL: tst_select_i1_double:
+
+ ; M2: andi $[[T0:[0-9]+]], $4, 1
+ ; M2: bnez $[[T0]], $[[BB0:BB[0-9_]+]]
+ ; M2: nop
+ ; M2: ldc1 $f0, 16($sp)
+ ; M2: jr $ra
+ ; M2: nop
+ ; M2: $[[BB0]]:
+ ; M2: mtc1 $7, $f0
+ ; M2: jr $ra
+ ; M2: mtc1 $6, $f1
+
+ ; CMOV-32: mtc1 $7, $[[F0:f[0-9]+]]
+ ; CMOV-32R1: mtc1 $6, $f{{[0-9]+}}
+ ; CMOV-32R2-R5: mthc1 $6, $[[F0]]
+ ; CMOV-32: andi $[[T0:[0-9]+]], $4, 1
+ ; CMOV-32: ldc1 $f0, 16($sp)
+ ; CMOV-32: movn.d $f0, $[[F0]], $[[T0]]
+
+ ; SEL-32: mtc1 $7, $[[F0:f[0-9]+]]
+ ; SEL-32: mthc1 $6, $[[F0]]
+ ; SEL-32: ldc1 $[[F1:f[0-9]+]], 16($sp)
+ ; SEL-32: mtc1 $4, $f0
+ ; SEL-32: sel.d $f0, $[[F1]], $[[F0]]
+
+ ; M3: andi $[[T0:[0-9]+]], $4, 1
+ ; M3: bnez $[[T0]], $[[BB0:BB[0-9_]+]]
+ ; M3: nop
+ ; M3: mov.d $f13, $f14
+ ; M3: $[[BB0]]:
+ ; M3: jr $ra
+ ; M3: mov.d $f0, $f13
+
+ ; CMOV-64: andi $[[T0:[0-9]+]], $4, 1
+ ; CMOV-64: movn.d $f14, $f13, $[[T0]]
+ ; CMOV-64: mov.d $f0, $f14
+
+ ; SEL-64: mtc1 $4, $f0
+ ; SEL-64: sel.d $f0, $f14, $f13
+ %r = select i1 %s, double %x, double %y
+ ret double %r
+}
+
+define double @tst_select_i1_double_reordered(double %x, double %y,
+ i1 signext %s) {
+entry:
+ ; ALL-LABEL: tst_select_i1_double_reordered:
+
+ ; M2: lw $[[T0:[0-9]+]], 16($sp)
+ ; M2: andi $[[T1:[0-9]+]], $[[T0]], 1
+ ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]]
+ ; M2: nop
+ ; M2: mov.d $f12, $f14
+ ; M2: $[[BB0]]:
+ ; M2: jr $ra
+ ; M2: mov.d $f0, $f12
+
+ ; CMOV-32: lw $[[T0:[0-9]+]], 16($sp)
+ ; CMOV-32: andi $[[T1:[0-9]+]], $[[T0]], 1
+ ; CMOV-32: movn.d $f14, $f12, $[[T1]]
+ ; CMOV-32: mov.d $f0, $f14
+
+ ; SEL-32: lw $[[T0:[0-9]+]], 16($sp)
+ ; SEL-32: mtc1 $[[T0]], $f0
+ ; SEL-32: sel.d $f0, $f14, $f12
+
+ ; M3: andi $[[T0:[0-9]+]], $6, 1
+ ; M3: bnez $[[T0]], $[[BB0:BB[0-9_]+]]
+ ; M3: nop
+ ; M3: mov.d $f12, $f13
+ ; M3: $[[BB0]]:
+ ; M3: jr $ra
+ ; M3: mov.d $f0, $f12
+
+ ; CMOV-64: andi $[[T0:[0-9]+]], $6, 1
+ ; CMOV-64: movn.d $f13, $f12, $[[T0]]
+ ; CMOV-64: mov.d $f0, $f13
+
+ ; SEL-64: mtc1 $6, $f0
+ ; SEL-64: sel.d $f0, $f13, $f12
+ %r = select i1 %s, double %x, double %y
+ ret double %r
+}
+
+define float @tst_select_fcmp_olt_float(float %x, float %y) {
+entry:
+ ; ALL-LABEL: tst_select_fcmp_olt_float:
+
+ ; M2: c.olt.s $f12, $f14
+ ; M3: c.olt.s $f12, $f13
+ ; M2-M3: bc1t $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: mov.s $f12, $f14
+ ; M3: mov.s $f12, $f13
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: mov.s $f0, $f12
+
+ ; CMOV-32: c.olt.s $f12, $f14
+ ; CMOV-32: movt.s $f14, $f12, $fcc0
+ ; CMOV-32: mov.s $f0, $f14
+
+ ; SEL-32: cmp.lt.s $f0, $f12, $f14
+ ; SEL-32: sel.s $f0, $f14, $f12
+
+ ; CMOV-64: c.olt.s $f12, $f13
+ ; CMOV-64: movt.s $f13, $f12, $fcc0
+ ; CMOV-64: mov.s $f0, $f13
+
+ ; SEL-64: cmp.lt.s $f0, $f12, $f13
+ ; SEL-64: sel.s $f0, $f13, $f12
+ %s = fcmp olt float %x, %y
+ %r = select i1 %s, float %x, float %y
+ ret float %r
+}
+
+define float @tst_select_fcmp_ole_float(float %x, float %y) {
+entry:
+ ; ALL-LABEL: tst_select_fcmp_ole_float:
+
+ ; M2: c.ole.s $f12, $f14
+ ; M3: c.ole.s $f12, $f13
+ ; M2-M3: bc1t $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: mov.s $f12, $f14
+ ; M3: mov.s $f12, $f13
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: mov.s $f0, $f12
+
+ ; CMOV-32: c.ole.s $f12, $f14
+ ; CMOV-32: movt.s $f14, $f12, $fcc0
+ ; CMOV-32: mov.s $f0, $f14
+
+ ; SEL-32: cmp.le.s $f0, $f12, $f14
+ ; SEL-32: sel.s $f0, $f14, $f12
+
+ ; CMOV-64: c.ole.s $f12, $f13
+ ; CMOV-64: movt.s $f13, $f12, $fcc0
+ ; CMOV-64: mov.s $f0, $f13
+
+ ; SEL-64: cmp.le.s $f0, $f12, $f13
+ ; SEL-64: sel.s $f0, $f13, $f12
+ %s = fcmp ole float %x, %y
+ %r = select i1 %s, float %x, float %y
+ ret float %r
+}
+
+define float @tst_select_fcmp_ogt_float(float %x, float %y) {
+entry:
+ ; ALL-LABEL: tst_select_fcmp_ogt_float:
+
+ ; M2: c.ule.s $f12, $f14
+ ; M3: c.ule.s $f12, $f13
+ ; M2-M3: bc1f $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: mov.s $f12, $f14
+ ; M3: mov.s $f12, $f13
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: mov.s $f0, $f12
+
+ ; CMOV-32: c.ule.s $f12, $f14
+ ; CMOV-32: movf.s $f14, $f12, $fcc0
+ ; CMOV-32: mov.s $f0, $f14
+
+ ; SEL-32: cmp.lt.s $f0, $f14, $f12
+ ; SEL-32: sel.s $f0, $f14, $f12
+
+ ; CMOV-64: c.ule.s $f12, $f13
+ ; CMOV-64: movf.s $f13, $f12, $fcc0
+ ; CMOV-64: mov.s $f0, $f13
+
+ ; SEL-64: cmp.lt.s $f0, $f13, $f12
+ ; SEL-64: sel.s $f0, $f13, $f12
+ %s = fcmp ogt float %x, %y
+ %r = select i1 %s, float %x, float %y
+ ret float %r
+}
+
+define float @tst_select_fcmp_oge_float(float %x, float %y) {
+entry:
+ ; ALL-LABEL: tst_select_fcmp_oge_float:
+
+ ; M2: c.ult.s $f12, $f14
+ ; M3: c.ult.s $f12, $f13
+ ; M2-M3: bc1f $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: mov.s $f12, $f14
+ ; M3: mov.s $f12, $f13
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: mov.s $f0, $f12
+
+ ; CMOV-32: c.ult.s $f12, $f14
+ ; CMOV-32: movf.s $f14, $f12, $fcc0
+ ; CMOV-32: mov.s $f0, $f14
+
+ ; SEL-32: cmp.le.s $f0, $f14, $f12
+ ; SEL-32: sel.s $f0, $f14, $f12
+
+ ; CMOV-64: c.ult.s $f12, $f13
+ ; CMOV-64: movf.s $f13, $f12, $fcc0
+ ; CMOV-64: mov.s $f0, $f13
+
+ ; SEL-64: cmp.le.s $f0, $f13, $f12
+ ; SEL-64: sel.s $f0, $f13, $f12
+ %s = fcmp oge float %x, %y
+ %r = select i1 %s, float %x, float %y
+ ret float %r
+}
+
+define float @tst_select_fcmp_oeq_float(float %x, float %y) {
+entry:
+ ; ALL-LABEL: tst_select_fcmp_oeq_float:
+
+ ; M2: c.eq.s $f12, $f14
+ ; M3: c.eq.s $f12, $f13
+ ; M2-M3: bc1t $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: mov.s $f12, $f14
+ ; M3: mov.s $f12, $f13
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: mov.s $f0, $f12
+
+ ; CMOV-32: c.eq.s $f12, $f14
+ ; CMOV-32: movt.s $f14, $f12, $fcc0
+ ; CMOV-32: mov.s $f0, $f14
+
+ ; SEL-32: cmp.eq.s $f0, $f12, $f14
+ ; SEL-32: sel.s $f0, $f14, $f12
+
+ ; CMOV-64: c.eq.s $f12, $f13
+ ; CMOV-64: movt.s $f13, $f12, $fcc0
+ ; CMOV-64: mov.s $f0, $f13
+
+ ; SEL-64: cmp.eq.s $f0, $f12, $f13
+ ; SEL-64: sel.s $f0, $f13, $f12
+ %s = fcmp oeq float %x, %y
+ %r = select i1 %s, float %x, float %y
+ ret float %r
+}
+
+define float @tst_select_fcmp_one_float(float %x, float %y) {
+entry:
+ ; ALL-LABEL: tst_select_fcmp_one_float:
+
+ ; M2: c.ueq.s $f12, $f14
+ ; M3: c.ueq.s $f12, $f13
+ ; M2-M3: bc1f $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: mov.s $f12, $f14
+ ; M3: mov.s $f12, $f13
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: mov.s $f0, $f12
+
+ ; CMOV-32: c.ueq.s $f12, $f14
+ ; CMOV-32: movf.s $f14, $f12, $fcc0
+ ; CMOV-32: mov.s $f0, $f14
+
+ ; SEL-32: cmp.ueq.s $f0, $f12, $f14
+ ; SEL-32: mfc1 $[[T0:[0-9]+]], $f0
+ ; SEL-32: not $[[T0]], $[[T0]]
+ ; SEL-32: mtc1 $[[T0:[0-9]+]], $f0
+ ; SEL-32: sel.s $f0, $f14, $f12
+
+ ; CMOV-64: c.ueq.s $f12, $f13
+ ; CMOV-64: movf.s $f13, $f12, $fcc0
+ ; CMOV-64: mov.s $f0, $f13
+
+ ; SEL-64: cmp.ueq.s $f0, $f12, $f13
+ ; SEL-64: mfc1 $[[T0:[0-9]+]], $f0
+ ; SEL-64: not $[[T0]], $[[T0]]
+ ; SEL-64: mtc1 $[[T0:[0-9]+]], $f0
+ ; SEL-64: sel.s $f0, $f13, $f12
+
+ %s = fcmp one float %x, %y
+ %r = select i1 %s, float %x, float %y
+ ret float %r
+}
+
+define double @tst_select_fcmp_olt_double(double %x, double %y) {
+entry:
+ ; ALL-LABEL: tst_select_fcmp_olt_double:
+
+ ; M2: c.olt.d $f12, $f14
+ ; M3: c.olt.d $f12, $f13
+ ; M2-M3: bc1t $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: mov.d $f12, $f14
+ ; M3: mov.d $f12, $f13
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: mov.d $f0, $f12
+
+ ; CMOV-32: c.olt.d $f12, $f14
+ ; CMOV-32: movt.d $f14, $f12, $fcc0
+ ; CMOV-32: mov.d $f0, $f14
+
+ ; SEL-32: cmp.lt.d $f0, $f12, $f14
+ ; SEL-32: sel.d $f0, $f14, $f12
+
+ ; CMOV-64: c.olt.d $f12, $f13
+ ; CMOV-64: movt.d $f13, $f12, $fcc0
+ ; CMOV-64: mov.d $f0, $f13
+
+ ; SEL-64: cmp.lt.d $f0, $f12, $f13
+ ; SEL-64: sel.d $f0, $f13, $f12
+ %s = fcmp olt double %x, %y
+ %r = select i1 %s, double %x, double %y
+ ret double %r
+}
+
+define double @tst_select_fcmp_ole_double(double %x, double %y) {
+entry:
+ ; ALL-LABEL: tst_select_fcmp_ole_double:
+
+ ; M2: c.ole.d $f12, $f14
+ ; M3: c.ole.d $f12, $f13
+ ; M2-M3: bc1t $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: mov.d $f12, $f14
+ ; M3: mov.d $f12, $f13
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: mov.d $f0, $f12
+
+ ; CMOV-32: c.ole.d $f12, $f14
+ ; CMOV-32: movt.d $f14, $f12, $fcc0
+ ; CMOV-32: mov.d $f0, $f14
+
+ ; SEL-32: cmp.le.d $f0, $f12, $f14
+ ; SEL-32: sel.d $f0, $f14, $f12
+
+ ; CMOV-64: c.ole.d $f12, $f13
+ ; CMOV-64: movt.d $f13, $f12, $fcc0
+ ; CMOV-64: mov.d $f0, $f13
+
+ ; SEL-64: cmp.le.d $f0, $f12, $f13
+ ; SEL-64: sel.d $f0, $f13, $f12
+ %s = fcmp ole double %x, %y
+ %r = select i1 %s, double %x, double %y
+ ret double %r
+}
+
+define double @tst_select_fcmp_ogt_double(double %x, double %y) {
+entry:
+ ; ALL-LABEL: tst_select_fcmp_ogt_double:
+
+ ; M2: c.ule.d $f12, $f14
+ ; M3: c.ule.d $f12, $f13
+ ; M2-M3: bc1f $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: mov.d $f12, $f14
+ ; M3: mov.d $f12, $f13
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: mov.d $f0, $f12
+
+ ; CMOV-32: c.ule.d $f12, $f14
+ ; CMOV-32: movf.d $f14, $f12, $fcc0
+ ; CMOV-32: mov.d $f0, $f14
+
+ ; SEL-32: cmp.lt.d $f0, $f14, $f12
+ ; SEL-32: sel.d $f0, $f14, $f12
+
+ ; CMOV-64: c.ule.d $f12, $f13
+ ; CMOV-64: movf.d $f13, $f12, $fcc0
+ ; CMOV-64: mov.d $f0, $f13
+
+ ; SEL-64: cmp.lt.d $f0, $f13, $f12
+ ; SEL-64: sel.d $f0, $f13, $f12
+ %s = fcmp ogt double %x, %y
+ %r = select i1 %s, double %x, double %y
+ ret double %r
+}
+
+define double @tst_select_fcmp_oge_double(double %x, double %y) {
+entry:
+ ; ALL-LABEL: tst_select_fcmp_oge_double:
+
+ ; M2: c.ult.d $f12, $f14
+ ; M3: c.ult.d $f12, $f13
+ ; M2-M3: bc1f $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: mov.d $f12, $f14
+ ; M3: mov.d $f12, $f13
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: mov.d $f0, $f12
+
+ ; CMOV-32: c.ult.d $f12, $f14
+ ; CMOV-32: movf.d $f14, $f12, $fcc0
+ ; CMOV-32: mov.d $f0, $f14
+
+ ; SEL-32: cmp.le.d $f0, $f14, $f12
+ ; SEL-32: sel.d $f0, $f14, $f12
+
+ ; CMOV-64: c.ult.d $f12, $f13
+ ; CMOV-64: movf.d $f13, $f12, $fcc0
+ ; CMOV-64: mov.d $f0, $f13
+
+ ; SEL-64: cmp.le.d $f0, $f13, $f12
+ ; SEL-64: sel.d $f0, $f13, $f12
+ %s = fcmp oge double %x, %y
+ %r = select i1 %s, double %x, double %y
+ ret double %r
+}
+
+define double @tst_select_fcmp_oeq_double(double %x, double %y) {
+entry:
+ ; ALL-LABEL: tst_select_fcmp_oeq_double:
+
+ ; M2: c.eq.d $f12, $f14
+ ; M3: c.eq.d $f12, $f13
+ ; M2-M3: bc1t $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: mov.d $f12, $f14
+ ; M3: mov.d $f12, $f13
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: mov.d $f0, $f12
+
+ ; CMOV-32: c.eq.d $f12, $f14
+ ; CMOV-32: movt.d $f14, $f12, $fcc0
+ ; CMOV-32: mov.d $f0, $f14
+
+ ; SEL-32: cmp.eq.d $f0, $f12, $f14
+ ; SEL-32: sel.d $f0, $f14, $f12
+
+ ; CMOV-64: c.eq.d $f12, $f13
+ ; CMOV-64: movt.d $f13, $f12, $fcc0
+ ; CMOV-64: mov.d $f0, $f13
+
+ ; SEL-64: cmp.eq.d $f0, $f12, $f13
+ ; SEL-64: sel.d $f0, $f13, $f12
+ %s = fcmp oeq double %x, %y
+ %r = select i1 %s, double %x, double %y
+ ret double %r
+}
+
+define double @tst_select_fcmp_one_double(double %x, double %y) {
+entry:
+ ; ALL-LABEL: tst_select_fcmp_one_double:
+
+ ; M2: c.ueq.d $f12, $f14
+ ; M3: c.ueq.d $f12, $f13
+ ; M2-M3: bc1f $[[BB0:BB[0-9_]+]]
+ ; M2-M3: nop
+ ; M2: mov.d $f12, $f14
+ ; M3: mov.d $f12, $f13
+ ; M2-M3: $[[BB0]]:
+ ; M2-M3: jr $ra
+ ; M2-M3: mov.d $f0, $f12
+
+ ; CMOV-32: c.ueq.d $f12, $f14
+ ; CMOV-32: movf.d $f14, $f12, $fcc0
+ ; CMOV-32: mov.d $f0, $f14
+
+ ; SEL-32: cmp.ueq.d $f0, $f12, $f14
+ ; SEL-32: mfc1 $[[T0:[0-9]+]], $f0
+ ; SEL-32: not $[[T0]], $[[T0]]
+ ; SEL-32: mtc1 $[[T0:[0-9]+]], $f0
+ ; SEL-32: sel.d $f0, $f14, $f12
+
+ ; CMOV-64: c.ueq.d $f12, $f13
+ ; CMOV-64: movf.d $f13, $f12, $fcc0
+ ; CMOV-64: mov.d $f0, $f13
+
+ ; SEL-64: cmp.ueq.d $f0, $f12, $f13
+ ; SEL-64: mfc1 $[[T0:[0-9]+]], $f0
+ ; SEL-64: not $[[T0]], $[[T0]]
+ ; SEL-64: mtc1 $[[T0:[0-9]+]], $f0
+ ; SEL-64: sel.d $f0, $f13, $f12
+ %s = fcmp one double %x, %y
+ %r = select i1 %s, double %x, double %y
+ ret double %r
+}
diff --git a/test/CodeGen/Mips/llvm-ir/shl.ll b/test/CodeGen/Mips/llvm-ir/shl.ll
new file mode 100644
index 0000000..6640320
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/shl.ll
@@ -0,0 +1,200 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=M2 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 -check-prefix=NOT-R2-R6 \
+; RUN: -check-prefix=32R1-R5
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R1-R5 -check-prefix=R2-R6
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R1-R5 -check-prefix=R2-R6
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R1-R5 -check-prefix=R2-R6
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32 \
+; RUN: -check-prefix=32R6 -check-prefix=R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=M3 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64 \
+; RUN: -check-prefix=64R6 -check-prefix=R2-R6
+
+define signext i1 @shl_i1(i1 signext %a, i1 signext %b) {
+entry:
+; ALL-LABEL: shl_i1:
+
+ ; ALL: move $2, $4
+
+ %r = shl i1 %a, %b
+ ret i1 %r
+}
+
+define signext i8 @shl_i8(i8 signext %a, i8 signext %b) {
+entry:
+; ALL-LABEL: shl_i8:
+
+ ; NOT-R2-R6: andi $[[T0:[0-9]+]], $5, 255
+ ; NOT-R2-R6: sllv $[[T1:[0-9]+]], $4, $[[T0]]
+ ; NOT-R2-R6: sll $[[T2:[0-9]+]], $[[T1]], 24
+ ; NOT-R2-R6: sra $2, $[[T2]], 24
+
+ ; R2-R6: andi $[[T0:[0-9]+]], $5, 255
+ ; R2-R6: sllv $[[T1:[0-9]+]], $4, $[[T0]]
+ ; R2-R6: seb $2, $[[T1]]
+
+ %r = shl i8 %a, %b
+ ret i8 %r
+}
+
+define signext i16 @shl_i16(i16 signext %a, i16 signext %b) {
+entry:
+; ALL-LABEL: shl_i16:
+
+ ; NOT-R2-R6: andi $[[T0:[0-9]+]], $5, 65535
+ ; NOT-R2-R6: sllv $[[T1:[0-9]+]], $4, $[[T0]]
+ ; NOT-R2-R6: sll $[[T2:[0-9]+]], $[[T1]], 16
+ ; NOT-R2-R6: sra $2, $[[T2]], 16
+
+ ; R2-R6: andi $[[T0:[0-9]+]], $5, 65535
+ ; R2-R6: sllv $[[T1:[0-9]+]], $4, $[[T0]]
+ ; R2-R6: seh $2, $[[T1]]
+
+ %r = shl i16 %a, %b
+ ret i16 %r
+}
+
+define signext i32 @shl_i32(i32 signext %a, i32 signext %b) {
+entry:
+; ALL-LABEL: shl_i32:
+
+ ; ALL: sllv $2, $4, $5
+
+ %r = shl i32 %a, %b
+ ret i32 %r
+}
+
+define signext i64 @shl_i64(i64 signext %a, i64 signext %b) {
+entry:
+; ALL-LABEL: shl_i64:
+
+ ; M2: sllv $[[T0:[0-9]+]], $5, $7
+ ; M2: andi $[[T1:[0-9]+]], $7, 32
+ ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]]
+ ; M2: move $2, $[[T0]]
+ ; M2: sllv $[[T2:[0-9]+]], $4, $7
+ ; M2: not $[[T3:[0-9]+]], $7
+ ; M2: srl $[[T4:[0-9]+]], $5, 1
+ ; M2: srlv $[[T5:[0-9]+]], $[[T4]], $[[T3]]
+ ; M2: or $2, $[[T2]], $[[T3]]
+ ; M2: $[[BB0]]:
+ ; M2: bnez $[[T1]], $[[BB1:BB[0-9_]+]]
+ ; M2: addiu $3, $zero, 0
+ ; M2: move $3, $[[T0]]
+ ; M2: $[[BB1]]:
+ ; M2: jr $ra
+ ; M2: nop
+
+ ; 32R1-R5: sllv $[[T0:[0-9]+]], $4, $7
+ ; 32R1-R5: not $[[T1:[0-9]+]], $7
+ ; 32R1-R5: srl $[[T2:[0-9]+]], $5, 1
+ ; 32R1-R5: srlv $[[T3:[0-9]+]], $[[T2]], $[[T1]]
+ ; 32R1-R5: or $2, $[[T0]], $[[T3]]
+ ; 32R1-R5: sllv $[[T4:[0-9]+]], $5, $7
+ ; 32R1-R5: andi $[[T5:[0-9]+]], $7, 32
+ ; 32R1-R5: movn $2, $[[T4]], $[[T5]]
+ ; 32R1-R5: jr $ra
+ ; 32R1-R5: movn $3, $zero, $[[T5]]
+
+ ; 32R6: sllv $[[T0:[0-9]+]], $4, $7
+ ; 32R6: not $[[T1:[0-9]+]], $7
+ ; 32R6: srl $[[T2:[0-9]+]], $5, 1
+ ; 32R6: srlv $[[T3:[0-9]+]], $[[T2]], $[[T1]]
+ ; 32R6: or $[[T4:[0-9]+]], $[[T0]], $[[T3]]
+ ; 32R6: andi $[[T5:[0-9]+]], $7, 32
+ ; 32R6: seleqz $[[T6:[0-9]+]], $[[T4]], $[[T2]]
+ ; 32R6: sllv $[[T7:[0-9]+]], $5, $7
+ ; 32R6: selnez $[[T8:[0-9]+]], $[[T7]], $[[T5]]
+ ; 32R6: or $2, $[[T8]], $[[T6]]
+ ; 32R6: jr $ra
+ ; 32R6: seleqz $3, $[[T7]], $[[T5]]
+
+ ; GP64: sll $[[T0:[0-9]+]], $5, 0
+ ; GP64: dsllv $2, $4, $1
+
+ %r = shl i64 %a, %b
+ ret i64 %r
+}
+
+define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: shl_i128:
+
+ ; GP32: lw $25, %call16(__ashlti3)($gp)
+
+ ; M3: sll $[[T0:[0-9]+]], $7, 0
+ ; M3: dsllv $[[T1:[0-9]+]], $5, $[[T0]]
+ ; M3: andi $[[T2:[0-9]+]], $[[T0]], 32
+ ; M3: bnez $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
+ ; M3: move $2, $[[T1]]
+ ; M3: dsllv $[[T4:[0-9]+]], $4, $[[T0]]
+ ; M3: dsrl $[[T5:[0-9]+]], $5, 1
+ ; M3: not $[[T6:[0-9]+]], $[[T0]]
+ ; M3: dsrlv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
+ ; M3: or $2, $[[T4]], $[[T7]]
+ ; M3: $[[BB0]]:
+ ; M3: bnez $[[T3]], $[[BB1:BB[0-9_]+]]
+ ; M3: daddiu $3, $zero, 0
+ ; M3: move $3, $[[T1]]
+ ; M3: $[[BB1]]:
+ ; M3: jr $ra
+ ; M3: nop
+
+ ; GP64-NOT-R6: sll $[[T0:[0-9]+]], $7, 0
+ ; GP64-NOT-R6: dsllv $[[T1:[0-9]+]], $4, $[[T0]]
+ ; GP64-NOT-R6: dsrl $[[T2:[0-9]+]], $5, 1
+ ; GP64-NOT-R6: not $[[T3:[0-9]+]], $[[T0]]
+ ; GP64-NOT-R6: dsrlv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; GP64-NOT-R6: or $2, $[[T1]], $[[T4]]
+ ; GP64-NOT-R6: dsllv $3, $5, $[[T0]]
+ ; GP64-NOT-R6: andi $[[T5:[0-9]+]], $[[T0]], 32
+ ; GP64-NOT-R6: movn $2, $3, $[[T5]]
+ ; GP64-NOT-R6: jr $ra
+ ; GP64-NOT-R6: movn $3, $zero, $1
+
+ ; 64R6: sll $[[T0:[0-9]+]], $7, 0
+ ; 64R6: dsllv $[[T1:[0-9]+]], $4, $[[T0]]
+ ; 64R6: dsrl $[[T2:[0-9]+]], $5, 1
+ ; 64R6: not $[[T3:[0-9]+]], $[[T0]]
+ ; 64R6: dsrlv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; 64R6: or $[[T5:[0-9]+]], $[[T1]], $[[T4]]
+ ; 64R6: andi $[[T6:[0-9]+]], $[[T0]], 32
+ ; 64R6: sll $[[T7:[0-9]+]], $[[T6]], 0
+ ; 64R6: seleqz $[[T8:[0-9]+]], $[[T5]], $[[T7]]
+ ; 64R6: dsllv $[[T9:[0-9]+]], $5, $[[T0]]
+ ; 64R6: selnez $[[T10:[0-9]+]], $[[T9]], $[[T7]]
+ ; 64R6: or $2, $[[T10]], $[[T8]]
+ ; 64R6: jr $ra
+ ; 64R6: seleqz $3, $[[T0]], $[[T7]]
+
+ %r = shl i128 %a, %b
+ ret i128 %r
+}
diff --git a/test/CodeGen/Mips/llvm-ir/srem.ll b/test/CodeGen/Mips/llvm-ir/srem.ll
new file mode 100644
index 0000000..ceb53ee
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/srem.ll
@@ -0,0 +1,139 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: -check-prefix=GP32 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: -check-prefix=GP32 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s -check-prefix=GP32 \
+; RUN: -check-prefix=R2-R5 -check-prefix=R2-R6 -check-prefix=NOT-R6
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s -check-prefix=GP32 \
+; RUN: -check-prefix=R2-R5 -check-prefix=R2-R6 -check-prefix=NOT-R6
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s -check-prefix=GP32 \
+; RUN: -check-prefix=R2-R5 -check-prefix=R2-R6 -check-prefix=NOT-R6
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: -check-prefix=GP32 -check-prefix=R6 -check-prefix=R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=R2-R5 -check-prefix=R2-R6 \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
+; RUN: -check-prefix=R2-R5 -check-prefix=R2-R6 \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
+; RUN: -check-prefix=R2-R5 -check-prefix=R2-R6 \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: -check-prefix=64R6 -check-prefix=R6 -check-prefix=R2-R6
+
+define signext i1 @srem_i1(i1 signext %a, i1 signext %b) {
+entry:
+; ALL-LABEL: srem_i1:
+
+ ; NOT-R6: div $zero, $4, $5
+ ; NOT-R6: teq $5, $zero, 7
+ ; NOT-R6: mfhi $[[T0:[0-9]+]]
+ ; NOT-R6: sll $[[T1:[0-9]+]], $[[T0]], 31
+ ; NOT-R6: sra $2, $[[T1]], 31
+
+ ; R6: mod $[[T0:[0-9]+]], $4, $5
+ ; R6: teq $5, $zero, 7
+ ; R6: sll $[[T3:[0-9]+]], $[[T0]], 31
+ ; R6: sra $2, $[[T3]], 31
+
+ %r = srem i1 %a, %b
+ ret i1 %r
+}
+
+define signext i8 @srem_i8(i8 signext %a, i8 signext %b) {
+entry:
+; ALL-LABEL: srem_i8:
+
+ ; NOT-R2-R6: div $zero, $4, $5
+ ; NOT-R2-R6: teq $5, $zero, 7
+ ; NOT-R2-R6: mfhi $[[T0:[0-9]+]]
+ ; NOT-R2-R6: sll $[[T1:[0-9]+]], $[[T0]], 24
+ ; NOT-R2-R6: sra $2, $[[T1]], 24
+
+ ; R2-R5: div $zero, $4, $5
+ ; R2-R5: teq $5, $zero, 7
+ ; R2-R5: mfhi $[[T0:[0-9]+]]
+ ; R2-R5: seb $2, $[[T0]]
+
+ ; R6: mod $[[T0:[0-9]+]], $4, $5
+ ; R6: teq $5, $zero, 7
+ ; R6: seb $2, $[[T0]]
+
+ %r = srem i8 %a, %b
+ ret i8 %r
+}
+
+define signext i16 @srem_i16(i16 signext %a, i16 signext %b) {
+entry:
+; ALL-LABEL: srem_i16:
+
+ ; NOT-R2-R6: div $zero, $4, $5
+ ; NOT-R2-R6: teq $5, $zero, 7
+ ; NOT-R2-R6: mfhi $[[T0:[0-9]+]]
+ ; NOT-R2-R6: sll $[[T1:[0-9]+]], $[[T0]], 16
+ ; NOT-R2-R6: sra $2, $[[T1]], 16
+
+ ; R2-R5: div $zero, $4, $5
+ ; R2-R5: teq $5, $zero, 7
+ ; R2-R5: mfhi $[[T0:[0-9]+]]
+ ; R2-R5: seh $2, $[[T1]]
+
+ ; R6: mod $[[T0:[0-9]+]], $4, $5
+ ; R6: teq $5, $zero, 7
+ ; R6: seh $2, $[[T0]]
+
+ %r = srem i16 %a, %b
+ ret i16 %r
+}
+
+define signext i32 @srem_i32(i32 signext %a, i32 signext %b) {
+entry:
+; ALL-LABEL: srem_i32:
+
+ ; NOT-R6: div $zero, $4, $5
+ ; NOT-R6: teq $5, $zero, 7
+ ; NOT-R6: mfhi $2
+
+ ; R6: mod $2, $4, $5
+ ; R6: teq $5, $zero, 7
+
+ %r = srem i32 %a, %b
+ ret i32 %r
+}
+
+define signext i64 @srem_i64(i64 signext %a, i64 signext %b) {
+entry:
+; ALL-LABEL: srem_i64:
+
+ ; GP32: lw $25, %call16(__moddi3)($gp)
+
+ ; GP64-NOT-R6: ddiv $zero, $4, $5
+ ; GP64-NOT-R6: teq $5, $zero, 7
+ ; GP64-NOT-R6: mfhi $2
+
+ ; 64R6: dmod $2, $4, $5
+ ; 64R6: teq $5, $zero, 7
+
+ %r = srem i64 %a, %b
+ ret i64 %r
+}
+
+define signext i128 @srem_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: srem_i128:
+
+ ; GP32: lw $25, %call16(__modti3)($gp)
+
+ ; GP64-NOT-R6: ld $25, %call16(__modti3)($gp)
+ ; 64-R6: ld $25, %call16(__modti3)($gp)
+
+ %r = srem i128 %a, %b
+ ret i128 %r
+}
diff --git a/test/CodeGen/Mips/llvm-ir/sub.ll b/test/CodeGen/Mips/llvm-ir/sub.ll
new file mode 100644
index 0000000..1649758
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/sub.ll
@@ -0,0 +1,122 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
+
+define signext i1 @sub_i1(i1 signext %a, i1 signext %b) {
+entry:
+; ALL-LABEL: sub_i1:
+
+ ; ALL: subu $[[T0:[0-9]+]], $4, $5
+ ; ALL: sll $[[T0]], $[[T0]], 31
+ ; ALL: sra $2, $[[T0]], 31
+
+ %r = sub i1 %a, %b
+ ret i1 %r
+}
+
+define signext i8 @sub_i8(i8 signext %a, i8 signext %b) {
+entry:
+; ALL-LABEL: sub_i8:
+
+ ; NOT-R2-R6: subu $[[T0:[0-9]+]], $4, $5
+ ; NOT-R2-R6: sll $[[T0]], $[[T0]], 24
+ ; NOT-R2-R6: sra $2, $[[T0]], 24
+
+ ; R2-R6: subu $[[T0:[0-9]+]], $4, $5
+ ; R2-R6: seb $2, $[[T0:[0-9]+]]
+
+ %r = sub i8 %a, %b
+ ret i8 %r
+}
+
+define signext i16 @sub_i16(i16 signext %a, i16 signext %b) {
+entry:
+; ALL-LABEL: sub_i16:
+
+ ; NOT-R2-R6: subu $[[T0:[0-9]+]], $4, $5
+ ; NOT-R2-R6: sll $[[T0]], $[[T0]], 16
+ ; NOT-R2-R6: sra $2, $[[T0]], 16
+
+ ; R2-R6: subu $[[T0:[0-9]+]], $4, $5
+ ; R2-R6: seh $2, $[[T0:[0-9]+]]
+
+ %r = sub i16 %a, %b
+ ret i16 %r
+}
+
+define signext i32 @sub_i32(i32 signext %a, i32 signext %b) {
+entry:
+; ALL-LABEL: sub_i32:
+
+ ; ALL: subu $2, $4, $5
+
+ %r = sub i32 %a, %b
+ ret i32 %r
+}
+
+define signext i64 @sub_i64(i64 signext %a, i64 signext %b) {
+entry:
+; ALL-LABEL: sub_i64:
+
+ ; GP32: subu $3, $5, $7
+ ; GP32: sltu $[[T0:[0-9]+]], $5, $7
+ ; GP32: addu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP32: subu $2, $4, $[[T1]]
+
+ ; GP64: dsubu $2, $4, $5
+
+ %r = sub i64 %a, %b
+ ret i64 %r
+}
+
+define signext i128 @sub_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: sub_i128:
+
+ ; GP32: lw $[[T0:[0-9]+]], 20($sp)
+ ; GP32: sltu $[[T1:[0-9]+]], $5, $[[T0]]
+ ; GP32: lw $[[T2:[0-9]+]], 16($sp)
+ ; GP32: addu $[[T3:[0-9]+]], $[[T1]], $[[T2]]
+ ; GP32: lw $[[T4:[0-9]+]], 24($sp)
+ ; GP32: lw $[[T5:[0-9]+]], 28($sp)
+ ; GP32: subu $[[T6:[0-9]+]], $7, $[[T5]]
+ ; GP32: subu $2, $4, $[[T3]]
+ ; GP32: sltu $[[T8:[0-9]+]], $6, $[[T4]]
+ ; GP32: addu $[[T9:[0-9]+]], $[[T8]], $[[T0]]
+ ; GP32: subu $3, $5, $[[T9]]
+ ; GP32: sltu $[[T10:[0-9]+]], $7, $[[T5]]
+ ; GP32: addu $[[T11:[0-9]+]], $[[T10]], $[[T4]]
+ ; GP32: subu $4, $6, $[[T11]]
+ ; GP32: move $5, $[[T6]]
+
+ ; GP64: dsubu $3, $5, $7
+ ; GP64: sltu $[[T0:[0-9]+]], $5, $7
+ ; GP64: daddu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP64: dsubu $2, $4, $[[T1]]
+
+ %r = sub i128 %a, %b
+ ret i128 %r
+}
diff --git a/test/CodeGen/Mips/llvm-ir/udiv.ll b/test/CodeGen/Mips/llvm-ir/udiv.ll
new file mode 100644
index 0000000..a7cafe5
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/udiv.ll
@@ -0,0 +1,116 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: -check-prefix=R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
+; RUN: -check-prefix=NOT-R6 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: -check-prefix=R6 -check-prefix=64R6
+
+define zeroext i1 @udiv_i1(i1 zeroext %a, i1 zeroext %b) {
+entry:
+; ALL-LABEL: udiv_i1:
+
+ ; NOT-R6: divu $zero, $4, $5
+ ; NOT-R6: teq $5, $zero, 7
+ ; NOT-R6: mflo $2
+
+ ; R6: divu $2, $4, $5
+ ; R6: teq $5, $zero, 7
+
+ %r = udiv i1 %a, %b
+ ret i1 %r
+}
+
+define zeroext i8 @udiv_i8(i8 zeroext %a, i8 zeroext %b) {
+entry:
+; ALL-LABEL: udiv_i8:
+
+ ; NOT-R6: divu $zero, $4, $5
+ ; NOT-R6: teq $5, $zero, 7
+ ; NOT-R6: mflo $2
+
+ ; R6: divu $2, $4, $5
+ ; R6: teq $5, $zero, 7
+
+ %r = udiv i8 %a, %b
+ ret i8 %r
+}
+
+define zeroext i16 @udiv_i16(i16 zeroext %a, i16 zeroext %b) {
+entry:
+; ALL-LABEL: udiv_i16:
+
+ ; NOT-R6: divu $zero, $4, $5
+ ; NOT-R6: teq $5, $zero, 7
+ ; NOT-R6: mflo $2
+
+ ; R6: divu $2, $4, $5
+ ; R6: teq $5, $zero, 7
+
+ %r = udiv i16 %a, %b
+ ret i16 %r
+}
+
+define signext i32 @udiv_i32(i32 signext %a, i32 signext %b) {
+entry:
+; ALL-LABEL: udiv_i32:
+
+ ; NOT-R6: divu $zero, $4, $5
+ ; NOT-R6: teq $5, $zero, 7
+ ; NOT-R6: mflo $2
+
+ ; R6: divu $2, $4, $5
+ ; R6: teq $5, $zero, 7
+
+ %r = udiv i32 %a, %b
+ ret i32 %r
+}
+
+define signext i64 @udiv_i64(i64 signext %a, i64 signext %b) {
+entry:
+; ALL-LABEL: udiv_i64:
+
+ ; GP32: lw $25, %call16(__udivdi3)($gp)
+
+ ; GP64-NOT-R6: ddivu $zero, $4, $5
+ ; GP64-NOT-R6: teq $5, $zero, 7
+ ; GP64-NOT-R6: mflo $2
+
+ ; 64R6: ddivu $2, $4, $5
+ ; 64R6: teq $5, $zero, 7
+
+ %r = udiv i64 %a, %b
+ ret i64 %r
+}
+
+define signext i128 @udiv_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: udiv_i128:
+
+ ; GP32: lw $25, %call16(__udivti3)($gp)
+
+ ; GP64-NOT-R6: ld $25, %call16(__udivti3)($gp)
+ ; 64-R6: ld $25, %call16(__udivti3)($gp)
+
+ %r = udiv i128 %a, %b
+ ret i128 %r
+}
diff --git a/test/CodeGen/Mips/llvm-ir/urem.ll b/test/CodeGen/Mips/llvm-ir/urem.ll
new file mode 100644
index 0000000..d5a231c
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/urem.ll
@@ -0,0 +1,155 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: -check-prefix=GP32 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: -check-prefix=GP32 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s -check-prefix=GP32 \
+; RUN: -check-prefix=R2-R5 -check-prefix=R2-R6 -check-prefix=NOT-R6
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s -check-prefix=GP32 \
+; RUN: -check-prefix=R2-R5 -check-prefix=R2-R6 -check-prefix=NOT-R6
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s -check-prefix=GP32 \
+; RUN: -check-prefix=R2-R5 -check-prefix=R2-R6 -check-prefix=NOT-R6
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: -check-prefix=GP32 -check-prefix=R6 -check-prefix=R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6 -check-prefix=NOT-R2-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=R2-R5 -check-prefix=R2-R6 \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
+; RUN: -check-prefix=R2-R5 -check-prefix=R2-R6 \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
+; RUN: -check-prefix=R2-R5 -check-prefix=R2-R6 \
+; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: -check-prefix=64R6 -check-prefix=R6 -check-prefix=R2-R6
+
+define signext i1 @urem_i1(i1 signext %a, i1 signext %b) {
+entry:
+; ALL-LABEL: urem_i1:
+
+ ; NOT-R6: andi $[[T0:[0-9]+]], $5, 1
+ ; NOT-R6: andi $[[T1:[0-9]+]], $4, 1
+ ; NOT-R6: divu $zero, $[[T1]], $[[T0]]
+ ; NOT-R6: teq $[[T0]], $zero, 7
+ ; NOT-R6: mfhi $[[T2:[0-9]+]]
+ ; NOT-R6: sll $[[T3:[0-9]+]], $[[T2]], 31
+ ; NOT-R6: sra $2, $[[T3]], 31
+
+ ; R6: andi $[[T0:[0-9]+]], $5, 1
+ ; R6: andi $[[T1:[0-9]+]], $4, 1
+ ; R6: modu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; R6: teq $[[T0]], $zero, 7
+ ; R6: sll $[[T3:[0-9]+]], $[[T2]], 31
+ ; R6: sra $2, $[[T3]], 31
+
+ %r = urem i1 %a, %b
+ ret i1 %r
+}
+
+define signext i8 @urem_i8(i8 signext %a, i8 signext %b) {
+entry:
+; ALL-LABEL: urem_i8:
+
+ ; NOT-R2-R6: andi $[[T0:[0-9]+]], $5, 255
+ ; NOT-R2-R6: andi $[[T1:[0-9]+]], $4, 255
+ ; NOT-R2-R6: divu $zero, $[[T1]], $[[T0]]
+ ; NOT-R2-R6: teq $[[T0]], $zero, 7
+ ; NOT-R2-R6: mfhi $[[T2:[0-9]+]]
+ ; NOT-R2-R6: sll $[[T3:[0-9]+]], $[[T2]], 24
+ ; NOT-R2-R6: sra $2, $[[T3]], 24
+
+ ; R2-R5: andi $[[T0:[0-9]+]], $5, 255
+ ; R2-R5: andi $[[T1:[0-9]+]], $4, 255
+ ; R2-R5: divu $zero, $[[T1]], $[[T0]]
+ ; R2-R5: teq $[[T0]], $zero, 7
+ ; R2-R5: mfhi $[[T2:[0-9]+]]
+ ; R2-R5: seb $2, $[[T2]]
+
+ ; R6: andi $[[T0:[0-9]+]], $5, 255
+ ; R6: andi $[[T1:[0-9]+]], $4, 255
+ ; R6: modu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; R6: teq $[[T0]], $zero, 7
+ ; R6: seb $2, $[[T2]]
+
+ %r = urem i8 %a, %b
+ ret i8 %r
+}
+
+define signext i16 @urem_i16(i16 signext %a, i16 signext %b) {
+entry:
+; ALL-LABEL: urem_i16:
+
+ ; NOT-R2-R6: andi $[[T0:[0-9]+]], $5, 65535
+ ; NOT-R2-R6: andi $[[T1:[0-9]+]], $4, 65535
+ ; NOT-R2-R6: divu $zero, $[[T1]], $[[T0]]
+ ; NOT-R2-R6: teq $[[T0]], $zero, 7
+ ; NOT-R2-R6: mfhi $[[T2:[0-9]+]]
+ ; NOT-R2-R6: sll $[[T3:[0-9]+]], $[[T2]], 16
+ ; NOT-R2-R6: sra $2, $[[T3]], 16
+
+ ; R2-R5: andi $[[T0:[0-9]+]], $5, 65535
+ ; R2-R5: andi $[[T1:[0-9]+]], $4, 65535
+ ; R2-R5: divu $zero, $[[T1]], $[[T0]]
+ ; R2-R5: teq $[[T0]], $zero, 7
+ ; R2-R5: mfhi $[[T3:[0-9]+]]
+ ; R2-R5: seh $2, $[[T2]]
+
+ ; R6: andi $[[T0:[0-9]+]], $5, 65535
+ ; R6: andi $[[T1:[0-9]+]], $4, 65535
+ ; R6: modu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; R6: teq $[[T0]], $zero, 7
+ ; R6: seh $2, $[[T2]]
+
+ %r = urem i16 %a, %b
+ ret i16 %r
+}
+
+define signext i32 @urem_i32(i32 signext %a, i32 signext %b) {
+entry:
+; ALL-LABEL: urem_i32:
+
+ ; NOT-R6: divu $zero, $4, $5
+ ; NOT-R6: teq $5, $zero, 7
+ ; NOT-R6: mfhi $2
+
+ ; R6: modu $2, $4, $5
+ ; R6: teq $5, $zero, 7
+
+ %r = urem i32 %a, %b
+ ret i32 %r
+}
+
+define signext i64 @urem_i64(i64 signext %a, i64 signext %b) {
+entry:
+; ALL-LABEL: urem_i64:
+
+ ; GP32: lw $25, %call16(__umoddi3)($gp)
+
+ ; GP64-NOT-R6: ddivu $zero, $4, $5
+ ; GP64-NOT-R6: teq $5, $zero, 7
+ ; GP64-NOT-R6: mfhi $2
+
+ ; 64R6: dmodu $2, $4, $5
+ ; 64R6: teq $5, $zero, 7
+
+ %r = urem i64 %a, %b
+ ret i64 %r
+}
+
+define signext i128 @urem_i128(i128 signext %a, i128 signext %b) {
+entry:
+ ; ALL-LABEL: urem_i128:
+
+ ; GP32: lw $25, %call16(__umodti3)($gp)
+
+ ; GP64-NOT-R6: ld $25, %call16(__umodti3)($gp)
+ ; 64-R6: ld $25, %call16(__umodti3)($gp)
+
+ %r = urem i128 %a, %b
+ ret i128 %r
+}
diff --git a/test/CodeGen/Mips/llvm-ir/xor.ll b/test/CodeGen/Mips/llvm-ir/xor.ll
new file mode 100644
index 0000000..89af9998
--- /dev/null
+++ b/test/CodeGen/Mips/llvm-ir/xor.ll
@@ -0,0 +1,99 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP32
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: -check-prefix=ALL -check-prefix=GP64
+
+define signext i1 @xor_i1(i1 signext %a, i1 signext %b) {
+entry:
+; ALL-LABEL: xor_i1:
+
+ ; ALL: xor $2, $4, $5
+
+ %r = xor i1 %a, %b
+ ret i1 %r
+}
+
+define signext i8 @xor_i8(i8 signext %a, i8 signext %b) {
+entry:
+; ALL-LABEL: xor_i8:
+
+ ; ALL: xor $2, $4, $5
+
+ %r = xor i8 %a, %b
+ ret i8 %r
+}
+
+define signext i16 @xor_i16(i16 signext %a, i16 signext %b) {
+entry:
+; ALL-LABEL: xor_i16:
+
+ ; ALL: xor $2, $4, $5
+
+ %r = xor i16 %a, %b
+ ret i16 %r
+}
+
+define signext i32 @xor_i32(i32 signext %a, i32 signext %b) {
+entry:
+; ALL-LABEL: xor_i32:
+
+ ; ALL: xor $2, $4, $5
+
+ %r = xor i32 %a, %b
+ ret i32 %r
+}
+
+define signext i64 @xor_i64(i64 signext %a, i64 signext %b) {
+entry:
+; ALL-LABEL: xor_i64:
+
+ ; GP32: xor $2, $4, $6
+ ; GP32: xor $3, $5, $7
+
+ ; GP64: xor $2, $4, $5
+
+ %r = xor i64 %a, %b
+ ret i64 %r
+}
+
+define signext i128 @xor_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: xor_i128:
+
+ ; GP32: lw $[[T0:[0-9]+]], 24($sp)
+ ; GP32: lw $[[T1:[0-9]+]], 20($sp)
+ ; GP32: lw $[[T2:[0-9]+]], 16($sp)
+ ; GP32: xor $2, $4, $[[T2]]
+ ; GP32: xor $3, $5, $[[T1]]
+ ; GP32: xor $4, $6, $[[T0]]
+ ; GP32: lw $[[T3:[0-9]+]], 28($sp)
+ ; GP32: xor $5, $7, $[[T3]]
+
+ ; GP64: xor $2, $4, $6
+ ; GP64: xor $3, $5, $7
+
+ %r = xor i128 %a, %b
+ ret i128 %r
+}
diff --git a/test/CodeGen/Mips/load-store-left-right.ll b/test/CodeGen/Mips/load-store-left-right.ll
index f6d0e8d..b8e6e83 100644
--- a/test/CodeGen/Mips/load-store-left-right.ll
+++ b/test/CodeGen/Mips/load-store-left-right.ll
@@ -4,14 +4,14 @@
; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32 -check-prefix=MIPS32-EB %s
; RUN: llc -march=mipsel -mcpu=mips32r6 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32R6 -check-prefix=MIPS32R6-EL %s
; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS32R6 -check-prefix=MIPS32R6-EB %s
-; RUN: llc -march=mips64el -mcpu=mips4 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EL %s
-; RUN: llc -march=mips64 -mcpu=mips4 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EB %s
-; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EL %s
-; RUN: llc -march=mips64 -mcpu=mips64 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EB %s
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EL %s
-; RUN: llc -march=mips64 -mcpu=mips64r2 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EB %s
-; RUN: llc -march=mips64el -mcpu=mips64r6 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64R6 -check-prefix=MIPS64R6-EL %s
-; RUN: llc -march=mips64 -mcpu=mips64r6 -mattr=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64R6 -check-prefix=MIPS64R6-EB %s
+; RUN: llc -march=mips64el -mcpu=mips4 -target-abi=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EL %s
+; RUN: llc -march=mips64 -mcpu=mips4 -target-abi=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EB %s
+; RUN: llc -march=mips64el -mcpu=mips64 -target-abi=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EL %s
+; RUN: llc -march=mips64 -mcpu=mips64 -target-abi=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EB %s
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EL %s
+; RUN: llc -march=mips64 -mcpu=mips64r2 -target-abi=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64 -check-prefix=MIPS64-EB %s
+; RUN: llc -march=mips64el -mcpu=mips64r6 -target-abi=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64R6 -check-prefix=MIPS64R6-EL %s
+; RUN: llc -march=mips64 -mcpu=mips64r6 -target-abi=n64 < %s | FileCheck -check-prefix=ALL -check-prefix=MIPS64R6 -check-prefix=MIPS64R6-EB %s
%struct.SLL = type { i64 }
%struct.SI = type { i32 }
diff --git a/test/CodeGen/Mips/longbranch.ll b/test/CodeGen/Mips/longbranch.ll
index b9b52be..9f5b741 100644
--- a/test/CodeGen/Mips/longbranch.ll
+++ b/test/CodeGen/Mips/longbranch.ll
@@ -1,9 +1,9 @@
; RUN: llc -march=mipsel < %s | FileCheck %s
; RUN: llc -march=mipsel -force-mips-long-branch -O3 < %s \
; RUN: | FileCheck %s -check-prefix=O32
-; RUN: llc -march=mips64el -mcpu=mips4 -mattr=n64 -force-mips-long-branch -O3 \
+; RUN: llc -march=mips64el -mcpu=mips4 -target-abi=n64 -force-mips-long-branch -O3 \
; RUN: < %s | FileCheck %s -check-prefix=N64
-; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 -force-mips-long-branch -O3 \
+; RUN: llc -march=mips64el -mcpu=mips64 -target-abi=n64 -force-mips-long-branch -O3 \
; RUN: < %s | FileCheck %s -check-prefix=N64
; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=micromips \
; RUN: -force-mips-long-branch -O3 < %s | FileCheck %s -check-prefix=MICROMIPS
@@ -123,11 +123,10 @@ end:
; MICROMIPS: $[[BB0]]:
; MICROMIPS: lw $[[R1:[0-9]+]], %got(x)($[[GP]])
-; MICROMIPS: addiu $[[R2:[0-9]+]], $zero, 1
-; MICROMIPS: sw $[[R2]], 0($[[R1]])
+; MICROMIPS: li16 $[[R2:[0-9]+]], 1
+; MICROMIPS: sw16 $[[R2]], 0($[[R1]])
; MICROMIPS: $[[BB2]]:
-; MICROMIPS: jr $ra
-; MICROMIPS: nop
+; MICROMIPS: jrc $ra
; Check the NaCl version. Check that sp change is not in the branch delay slot
diff --git a/test/CodeGen/Mips/mbrsize4a.ll b/test/CodeGen/Mips/mbrsize4a.ll
index c802991..15e1f47 100644
--- a/test/CodeGen/Mips/mbrsize4a.ll
+++ b/test/CodeGen/Mips/mbrsize4a.ll
@@ -34,4 +34,4 @@ attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"=
attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #2 = { nounwind }
-!1 = metadata !{i32 68}
+!1 = !{i32 68}
diff --git a/test/CodeGen/Mips/micromips-and16.ll b/test/CodeGen/Mips/micromips-and16.ll
new file mode 100644
index 0000000..4eacf18
--- /dev/null
+++ b/test/CodeGen/Mips/micromips-and16.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips \
+; RUN: -relocation-model=pic -O3 < %s | FileCheck %s
+
+define i32 @main() {
+entry:
+ %retval = alloca i32, align 4
+ %a = alloca i32, align 4
+ %b = alloca i32, align 4
+ %c = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = load i32* %b, align 4
+ %1 = load i32* %c, align 4
+ %and = and i32 %0, %1
+ store i32 %and, i32* %a, align 4
+ ret i32 0
+}
+
+; CHECK: and16
diff --git a/test/CodeGen/Mips/micromips-atomic.ll b/test/CodeGen/Mips/micromips-atomic.ll
index a50e0b7..82eee4b 100644
--- a/test/CodeGen/Mips/micromips-atomic.ll
+++ b/test/CodeGen/Mips/micromips-atomic.ll
@@ -14,5 +14,5 @@ entry:
; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
; CHECK: addu $[[R2:[0-9]+]], $[[R1]], $4
; CHECK: sc $[[R2]], 0($[[R0]])
-; CHECK: beqz $[[R2]], $[[BB0]]
+; CHECK: beqzc $[[R2]], $[[BB0]]
}
diff --git a/test/CodeGen/Mips/micromips-atomic1.ll b/test/CodeGen/Mips/micromips-atomic1.ll
new file mode 100644
index 0000000..37c3d76
--- /dev/null
+++ b/test/CodeGen/Mips/micromips-atomic1.ll
@@ -0,0 +1,29 @@
+; RUN: llc -march=mipsel -filetype=obj --disable-machine-licm -mattr=micromips < %s -o - \
+; RUN: | llvm-objdump -no-show-raw-insn -arch mipsel -mcpu=mips32r2 -mattr=micromips -d - \
+; RUN: | FileCheck %s -check-prefix=MICROMIPS
+
+; Use llvm-objdump to check wheter the encodings of microMIPS atomic instructions are correct.
+; While emitting assembly files directly when in microMIPS mode, it is possible to emit a mips32r2
+; instruction instead of microMIPS instruction, and since many mips32r2 and microMIPS
+; instructions have identical assembly formats, invalid instruction cannot be detected.
+
+@y = common global i8 0, align 1
+
+define signext i8 @AtomicLoadAdd8(i8 signext %incr) nounwind {
+entry:
+ %0 = atomicrmw add i8* @y, i8 %incr monotonic
+ ret i8 %0
+
+; MICROMIPS: ll ${{[0-9]+}}, 0(${{[0-9]+}})
+; MICROMIPS: sc ${{[0-9]+}}, 0(${{[0-9]+}})
+}
+
+define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind {
+entry:
+ %pair0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic monotonic
+ %0 = extractvalue { i8, i1 } %pair0, 0
+ ret i8 %0
+
+; MICROMIPS: ll ${{[0-9]+}}, 0(${{[0-9]+}})
+; MICROMIPS: sc ${{[0-9]+}}, 0(${{[0-9]+}})
+}
diff --git a/test/CodeGen/Mips/micromips-compact-branches.ll b/test/CodeGen/Mips/micromips-compact-branches.ll
new file mode 100644
index 0000000..670f9a0
--- /dev/null
+++ b/test/CodeGen/Mips/micromips-compact-branches.ll
@@ -0,0 +1,19 @@
+; RUN: llc %s -march=mipsel -mattr=micromips -filetype=asm -O3 \
+; RUN: -disable-mips-delay-filler -relocation-model=pic -o - | FileCheck %s
+
+define void @main() nounwind uwtable {
+entry:
+ %x = alloca i32, align 4
+ %0 = load i32* %x, align 4
+ %cmp = icmp eq i32 %0, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ store i32 10, i32* %x, align 4
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK: bnezc
diff --git a/test/CodeGen/Mips/micromips-compact-jump.ll b/test/CodeGen/Mips/micromips-compact-jump.ll
new file mode 100644
index 0000000..70cff84
--- /dev/null
+++ b/test/CodeGen/Mips/micromips-compact-jump.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips \
+; RUN: -disable-mips-delay-filler -O3 < %s | FileCheck %s
+
+define i32 @foo(i32 signext %a) #0 {
+entry:
+ ret i32 0
+}
+
+declare i32 @bar(i32 signext) #1
+
+; CHECK: jrc
diff --git a/test/CodeGen/Mips/micromips-delay-slot-jr.ll b/test/CodeGen/Mips/micromips-delay-slot-jr.ll
new file mode 100644
index 0000000..09a98c2
--- /dev/null
+++ b/test/CodeGen/Mips/micromips-delay-slot-jr.ll
@@ -0,0 +1,46 @@
+; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips \
+; RUN: -relocation-model=static -O2 < %s | FileCheck %s
+
+@main.L = internal unnamed_addr constant [3 x i8*] [i8* blockaddress(@main, %L1), i8* blockaddress(@main, %L2), i8* null], align 4
+@str = private unnamed_addr constant [2 x i8] c"A\00"
+@str2 = private unnamed_addr constant [2 x i8] c"B\00"
+
+define i32 @main() #0 {
+entry:
+ br label %L1
+
+L1: ; preds = %entry, %L1
+ %i.0 = phi i32 [ 0, %entry ], [ %inc, %L1 ]
+ %puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str, i32 0, i32 0))
+ %inc = add i32 %i.0, 1
+ %arrayidx = getelementptr inbounds [3 x i8*]* @main.L, i32 0, i32 %i.0
+ %0 = load i8** %arrayidx, align 4, !tbaa !1
+ indirectbr i8* %0, [label %L1, label %L2]
+
+L2: ; preds = %L1
+ %puts2 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str2, i32 0, i32 0))
+ ret i32 0
+}
+
+declare i32 @puts(i8* nocapture readonly) #1
+
+!1 = !{!2, !2, i64 0}
+!2 = !{!"any pointer", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+
+; CHECK: jrc
+
+%struct.foostruct = type { [3 x float] }
+%struct.barstruct = type { %struct.foostruct, float }
+@bar_ary = common global [4 x %struct.barstruct] zeroinitializer, align 4
+define float* @spooky(i32 signext %i) #0 {
+
+ %safe = getelementptr inbounds [4 x %struct.barstruct]* @bar_ary, i32 0, i32 %i, i32 1
+ store float 1.420000e+02, float* %safe, align 4, !tbaa !1
+ ret float* %safe
+}
+
+; CHECK: spooky:
+; CHECK: jrc $ra
+
diff --git a/test/CodeGen/Mips/micromips-delay-slot.ll b/test/CodeGen/Mips/micromips-delay-slot.ll
index 4bab97a..b5f6c56 100644
--- a/test/CodeGen/Mips/micromips-delay-slot.ll
+++ b/test/CodeGen/Mips/micromips-delay-slot.ll
@@ -1,18 +1,18 @@
; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips \
-; RUN: -relocation-model=pic -O3 < %s | FileCheck %s
+; RUN: -relocation-model=static -O2 < %s | FileCheck %s
-; Function Attrs: nounwind uwtable
-define i32 @foo(i32 %a) #0 {
+; Function Attrs: nounwind
+define i32 @foo(i32 signext %a) #0 {
entry:
%a.addr = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
%0 = load i32* %a.addr, align 4
%shl = shl i32 %0, 2
- %call = call i32 @bar(i32 %shl)
+ %call = call i32 @bar(i32 signext %shl)
ret i32 %call
}
-declare i32 @bar(i32) #1
-
-; CHECK: nop
+declare i32 @bar(i32 signext) #1
+; CHECK: jals
+; CHECK-NEXT: sll16
diff --git a/test/CodeGen/Mips/micromips-li.ll b/test/CodeGen/Mips/micromips-li.ll
new file mode 100644
index 0000000..ac315f9
--- /dev/null
+++ b/test/CodeGen/Mips/micromips-li.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips \
+; RUN: -relocation-model=pic -O3 < %s | FileCheck %s
+
+@x = external global i32
+@y = external global i32
+@z = external global i32
+
+define i32 @main() nounwind {
+entry:
+ store i32 1, i32* @x, align 4
+ store i32 2148, i32* @y, align 4
+ store i32 33332, i32* @z, align 4
+ ret i32 0
+}
+
+; CHECK: li16 ${{[2-7]|16|17}}, 1
+; CHECK: addiu ${{[0-9]+}}, $zero, 2148
+; CHECK: ori ${{[0-9]+}}, $zero, 33332
diff --git a/test/CodeGen/Mips/micromips-or16.ll b/test/CodeGen/Mips/micromips-or16.ll
new file mode 100644
index 0000000..ab7e79a
--- /dev/null
+++ b/test/CodeGen/Mips/micromips-or16.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips \
+; RUN: -relocation-model=pic -O3 < %s | FileCheck %s
+
+define i32 @main() {
+entry:
+ %retval = alloca i32, align 4
+ %a = alloca i32, align 4
+ %b = alloca i32, align 4
+ %c = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = load i32* %b, align 4
+ %1 = load i32* %c, align 4
+ %or = or i32 %0, %1
+ store i32 %or, i32* %a, align 4
+ ret i32 0
+}
+
+; CHECK: or16
diff --git a/test/CodeGen/Mips/micromips-sw-lw-16.ll b/test/CodeGen/Mips/micromips-sw-lw-16.ll
new file mode 100644
index 0000000..bc09554
--- /dev/null
+++ b/test/CodeGen/Mips/micromips-sw-lw-16.ll
@@ -0,0 +1,27 @@
+; RUN: llc %s -march=mipsel -mattr=micromips -filetype=asm \
+; RUN: -relocation-model=pic -O3 -o - | FileCheck %s
+
+; Function Attrs: noinline nounwind
+define void @bar(i32* %p) #0 {
+entry:
+ %p.addr = alloca i32*, align 4
+ store i32* %p, i32** %p.addr, align 4
+ %0 = load i32** %p.addr, align 4
+ %1 = load i32* %0, align 4
+ %add = add nsw i32 7, %1
+ %2 = load i32** %p.addr, align 4
+ store i32 %add, i32* %2, align 4
+ %3 = load i32** %p.addr, align 4
+ %add.ptr = getelementptr inbounds i32* %3, i32 1
+ %4 = load i32* %add.ptr, align 4
+ %add1 = add nsw i32 7, %4
+ %5 = load i32** %p.addr, align 4
+ %add.ptr2 = getelementptr inbounds i32* %5, i32 1
+ store i32 %add1, i32* %add.ptr2, align 4
+ ret void
+}
+
+; CHECK: lw16 ${{[0-9]+}}, 0($4)
+; CHECK: sw16 ${{[0-9]+}}, 0($4)
+; CHECK: lw16 ${{[0-9]+}}, 4(${{[0-9]+}})
+; CHECK: sw16 ${{[0-9]+}}, 4(${{[0-9]+}})
diff --git a/test/CodeGen/Mips/micromips-xor16.ll b/test/CodeGen/Mips/micromips-xor16.ll
new file mode 100644
index 0000000..9915112
--- /dev/null
+++ b/test/CodeGen/Mips/micromips-xor16.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips \
+; RUN: -relocation-model=pic -O3 < %s | FileCheck %s
+
+define i32 @main() {
+entry:
+ %retval = alloca i32, align 4
+ %a = alloca i32, align 4
+ %b = alloca i32, align 4
+ %c = alloca i32, align 4
+ store i32 0, i32* %retval
+ %0 = load i32* %b, align 4
+ %1 = load i32* %c, align 4
+ %xor = xor i32 %0, %1
+ store i32 %xor, i32* %a, align 4
+ ret i32 0
+}
+
+; CHECK: xor16
diff --git a/test/CodeGen/Mips/mips64-sret.ll b/test/CodeGen/Mips/mips64-sret.ll
index ed494e9..0559747 100644
--- a/test/CodeGen/Mips/mips64-sret.ll
+++ b/test/CodeGen/Mips/mips64-sret.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi=n64 < %s | FileCheck %s
define void @foo(i32* noalias sret %agg.result) nounwind {
entry:
diff --git a/test/CodeGen/Mips/mips64directive.ll b/test/CodeGen/Mips/mips64directive.ll
index 3d95f51..c4ba534 100644
--- a/test/CodeGen/Mips/mips64directive.ll
+++ b/test/CodeGen/Mips/mips64directive.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=n64 | FileCheck %s
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -target-abi=n64 | FileCheck %s
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -target-abi=n64 | FileCheck %s
@gl = global i64 1250999896321, align 8
diff --git a/test/CodeGen/Mips/mips64ext.ll b/test/CodeGen/Mips/mips64ext.ll
index 22ea0eb..9c1243b 100644
--- a/test/CodeGen/Mips/mips64ext.ll
+++ b/test/CodeGen/Mips/mips64ext.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=n64 | FileCheck %s
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -target-abi=n64 | FileCheck %s
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -target-abi=n64 | FileCheck %s
define i64 @zext64_32(i32 %a) nounwind readnone {
entry:
diff --git a/test/CodeGen/Mips/mips64extins.ll b/test/CodeGen/Mips/mips64extins.ll
index 14f92ca..211cd5f 100644
--- a/test/CodeGen/Mips/mips64extins.ll
+++ b/test/CodeGen/Mips/mips64extins.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -mattr=n64 | FileCheck %s
+; RUN: llc < %s -march=mips64el -mcpu=mips64r2 -target-abi=n64 | FileCheck %s
define i64 @dext(i64 %i) nounwind readnone {
entry:
diff --git a/test/CodeGen/Mips/mips64fpimm0.ll b/test/CodeGen/Mips/mips64fpimm0.ll
index 19e076d..0296cb5 100644
--- a/test/CodeGen/Mips/mips64fpimm0.ll
+++ b/test/CodeGen/Mips/mips64fpimm0.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=n64 | FileCheck %s
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=n64 | FileCheck %s
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -target-abi=n64 | FileCheck %s
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -target-abi=n64 | FileCheck %s
define double @foo1() nounwind readnone {
entry:
diff --git a/test/CodeGen/Mips/mips64fpldst.ll b/test/CodeGen/Mips/mips64fpldst.ll
index 2f42270..5d62156 100644
--- a/test/CodeGen/Mips/mips64fpldst.ll
+++ b/test/CodeGen/Mips/mips64fpldst.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=-n64,n64 | FileCheck %s -check-prefix=CHECK-N64
-; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=-n64,n32 | FileCheck %s -check-prefix=CHECK-N32
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=-n64,n64 | FileCheck %s -check-prefix=CHECK-N64
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=-n64,n32 | FileCheck %s -check-prefix=CHECK-N32
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -target-abi n64 | FileCheck %s -check-prefix=CHECK-N64
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -target-abi n32 | FileCheck %s -check-prefix=CHECK-N32
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -target-abi n64 | FileCheck %s -check-prefix=CHECK-N64
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -target-abi n32 | FileCheck %s -check-prefix=CHECK-N32
@f0 = common global float 0.000000e+00, align 4
@d0 = common global double 0.000000e+00, align 8
diff --git a/test/CodeGen/Mips/mips64intldst.ll b/test/CodeGen/Mips/mips64intldst.ll
index c3607ba..1ceafc1 100644
--- a/test/CodeGen/Mips/mips64intldst.ll
+++ b/test/CodeGen/Mips/mips64intldst.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=-n64,n64 | FileCheck %s -check-prefix=CHECK-N64
-; RUN: llc < %s -march=mips64el -mcpu=mips4 -mattr=-n64,n32 | FileCheck %s -check-prefix=CHECK-N32
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=-n64,n64 | FileCheck %s -check-prefix=CHECK-N64
-; RUN: llc < %s -march=mips64el -mcpu=mips64 -mattr=-n64,n32 | FileCheck %s -check-prefix=CHECK-N32
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -target-abi n64 | FileCheck %s -check-prefix=CHECK-N64
+; RUN: llc < %s -march=mips64el -mcpu=mips4 -target-abi n32 | FileCheck %s -check-prefix=CHECK-N32
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -target-abi n64 | FileCheck %s -check-prefix=CHECK-N64
+; RUN: llc < %s -march=mips64el -mcpu=mips64 -target-abi n32 | FileCheck %s -check-prefix=CHECK-N32
@c = common global i8 0, align 4
@s = common global i16 0, align 4
diff --git a/test/CodeGen/Mips/mips64sinttofpsf.ll b/test/CodeGen/Mips/mips64sinttofpsf.ll
new file mode 100644
index 0000000..d3d4603
--- /dev/null
+++ b/test/CodeGen/Mips/mips64sinttofpsf.ll
@@ -0,0 +1,15 @@
+; RUN: llc -march=mips64 -mcpu=mips64r2 -soft-float -O0 < %s | FileCheck %s
+
+
+define double @foo() #0 {
+entry:
+ %x = alloca i32, align 4
+ store volatile i32 -32, i32* %x, align 4
+ %0 = load volatile i32* %x, align 4
+ %conv = sitofp i32 %0 to double
+ ret double %conv
+
+; CHECK-NOT: dsll
+; CHECK-NOT: dsrl
+
+}
diff --git a/test/CodeGen/Mips/named-register-n32.ll b/test/CodeGen/Mips/named-register-n32.ll
new file mode 100644
index 0000000..b15e928
--- /dev/null
+++ b/test/CodeGen/Mips/named-register-n32.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=mips64 -relocation-model=static -mattr=+noabicalls -target-abi n32 < %s | FileCheck %s
+
+define i32* @get_gp() {
+entry:
+ %0 = call i64 @llvm.read_register.i64(metadata !0)
+ %1 = trunc i64 %0 to i32
+ %2 = inttoptr i32 %1 to i32*
+ ret i32* %2
+}
+
+; CHECK-LABEL: get_gp:
+; CHECK: sll $2, $gp, 0
+
+declare i64 @llvm.read_register.i64(metadata)
+
+!llvm.named.register.$28 = !{!0}
+
+!0 = !{!"$28"}
diff --git a/test/CodeGen/Mips/named-register-n64.ll b/test/CodeGen/Mips/named-register-n64.ll
new file mode 100644
index 0000000..3198772
--- /dev/null
+++ b/test/CodeGen/Mips/named-register-n64.ll
@@ -0,0 +1,17 @@
+; RUN: llc -march=mips64 -relocation-model=static -mattr=+noabicalls < %s | FileCheck %s
+
+define i32* @get_gp() {
+entry:
+ %0 = call i64 @llvm.read_register.i64(metadata !0)
+ %1 = inttoptr i64 %0 to i32*
+ ret i32* %1
+}
+
+; CHECK-LABEL: get_gp:
+; CHECK: move $2, $gp
+
+declare i64 @llvm.read_register.i64(metadata)
+
+!llvm.named.register.$28 = !{!0}
+
+!0 = !{!"$28"}
diff --git a/test/CodeGen/Mips/named-register-o32.ll b/test/CodeGen/Mips/named-register-o32.ll
new file mode 100644
index 0000000..0890c66
--- /dev/null
+++ b/test/CodeGen/Mips/named-register-o32.ll
@@ -0,0 +1,17 @@
+; RUN: llc -march=mips -relocation-model=static -mattr=+noabicalls < %s | FileCheck %s
+
+define i32* @get_gp() {
+entry:
+ %0 = call i32 @llvm.read_register.i32(metadata !0)
+ %1 = inttoptr i32 %0 to i32*
+ ret i32* %1
+}
+
+; CHECK-LABEL: get_gp:
+; CHECK: move $2, $gp
+
+declare i32 @llvm.read_register.i32(metadata)
+
+!llvm.named.register.$28 = !{!0}
+
+!0 = !{!"$28"}
diff --git a/test/CodeGen/Mips/no-odd-spreg-msa.ll b/test/CodeGen/Mips/no-odd-spreg-msa.ll
new file mode 100644
index 0000000..30dd1ff
--- /dev/null
+++ b/test/CodeGen/Mips/no-odd-spreg-msa.ll
@@ -0,0 +1,131 @@
+; RUN: llc -march=mipsel -mcpu=mips32 -mattr=+fp64,+msa,-nooddspreg < %s | FileCheck %s -check-prefix=ALL -check-prefix=ODDSPREG
+; RUN: llc -march=mipsel -mcpu=mips32 -mattr=+fp64,+msa,+nooddspreg < %s | FileCheck %s -check-prefix=ALL -check-prefix=NOODDSPREG
+
+@v4f32 = global <4 x float> zeroinitializer
+
+define void @msa_insert_0(float %a) {
+entry:
+ ; Force the float into an odd-numbered register using named registers and
+ ; load the vector.
+ %b = call float asm sideeffect "mov.s $0, $1", "={$f13},{$f12}" (float %a)
+ %0 = load volatile <4 x float>* @v4f32
+
+ ; Clobber all except $f12/$w12 and $f13
+ ;
+ ; The intention is that if odd single precision registers are permitted, the
+ ; allocator will choose $f12/$w12 for the vector and $f13 for the float to
+ ; avoid the spill/reload.
+ ;
+ ; On the other hand, if odd single precision registers are not permitted, it
+ ; must copy $f13 to an even-numbered register before inserting into the
+ ; vector.
+ call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+ %1 = insertelement <4 x float> %0, float %b, i32 0
+ store <4 x float> %1, <4 x float>* @v4f32
+ ret void
+}
+
+; ALL-LABEL: msa_insert_0:
+; ALL: mov.s $f13, $f12
+; ALL: lw $[[R0:[0-9]+]], %got(v4f32)(
+; ALL: ld.w $w[[W0:[0-9]+]], 0($[[R0]])
+; NOODDSPREG: mov.s $f[[F0:[0-9]+]], $f13
+; NOODDSPREG: insve.w $w[[W0]][0], $w[[F0]][0]
+; ODDSPREG: insve.w $w[[W0]][0], $w13[0]
+; ALL: # Clobber
+; ALL-NOT: sdc1
+; ALL-NOT: ldc1
+; ALL: st.w $w[[W0]], 0($[[R0]])
+
+define void @msa_insert_1(float %a) {
+entry:
+ ; Force the float into an odd-numbered register using named registers and
+ ; load the vector.
+ %b = call float asm sideeffect "mov.s $0, $1", "={$f13},{$f12}" (float %a)
+ %0 = load volatile <4 x float>* @v4f32
+
+ ; Clobber all except $f12/$w12 and $f13
+ ;
+ ; The intention is that if odd single precision registers are permitted, the
+ ; allocator will choose $f12/$w12 for the vector and $f13 for the float to
+ ; avoid the spill/reload.
+ ;
+ ; On the other hand, if odd single precision registers are not permitted, it
+ ; must copy $f13 to an even-numbered register before inserting into the
+ ; vector.
+ call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+ %1 = insertelement <4 x float> %0, float %b, i32 1
+ store <4 x float> %1, <4 x float>* @v4f32
+ ret void
+}
+
+; ALL-LABEL: msa_insert_1:
+; ALL: mov.s $f13, $f12
+; ALL: lw $[[R0:[0-9]+]], %got(v4f32)(
+; ALL: ld.w $w[[W0:[0-9]+]], 0($[[R0]])
+; NOODDSPREG: mov.s $f[[F0:[0-9]+]], $f13
+; NOODDSPREG: insve.w $w[[W0]][1], $w[[F0]][0]
+; ODDSPREG: insve.w $w[[W0]][1], $w13[0]
+; ALL: # Clobber
+; ALL-NOT: sdc1
+; ALL-NOT: ldc1
+; ALL: st.w $w[[W0]], 0($[[R0]])
+
+define float @msa_extract_0() {
+entry:
+ %0 = load volatile <4 x float>* @v4f32
+ %1 = call <4 x float> asm sideeffect "move.v $0, $1", "={$w13},{$w12}" (<4 x float> %0)
+
+ ; Clobber all except $f12, and $f13
+ ;
+ ; The intention is that if odd single precision registers are permitted, the
+ ; allocator will choose $f13/$w13 for the vector since that saves on moves.
+ ;
+ ; On the other hand, if odd single precision registers are not permitted, it
+ ; must move it to $f12/$w12.
+ call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+
+ %2 = extractelement <4 x float> %1, i32 0
+ ret float %2
+}
+
+; ALL-LABEL: msa_extract_0:
+; ALL: lw $[[R0:[0-9]+]], %got(v4f32)(
+; ALL: ld.w $w12, 0($[[R0]])
+; ALL: move.v $w[[W0:13]], $w12
+; NOODDSPREG: move.v $w[[W0:12]], $w13
+; ALL: # Clobber
+; ALL-NOT: st.w
+; ALL-NOT: ld.w
+; ALL: mov.s $f0, $f[[W0]]
+
+define float @msa_extract_1() {
+entry:
+ %0 = load volatile <4 x float>* @v4f32
+ %1 = call <4 x float> asm sideeffect "move.v $0, $1", "={$w13},{$w12}" (<4 x float> %0)
+
+ ; Clobber all except $f13
+ ;
+ ; The intention is that if odd single precision registers are permitted, the
+ ; allocator will choose $f13/$w13 for the vector since that saves on moves.
+ ;
+ ; On the other hand, if odd single precision registers are not permitted, it
+ ; must be spilled.
+ call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+
+ %2 = extractelement <4 x float> %1, i32 1
+ ret float %2
+}
+
+; ALL-LABEL: msa_extract_1:
+; ALL: lw $[[R0:[0-9]+]], %got(v4f32)(
+; ALL: ld.w $w12, 0($[[R0]])
+; ALL: splati.w $w[[W0:[0-9]+]], $w13[1]
+; NOODDSPREG: st.w $w[[W0]], 0($sp)
+; ODDSPREG-NOT: st.w
+; ODDSPREG-NOT: ld.w
+; ALL: # Clobber
+; ODDSPREG-NOT: st.w
+; ODDSPREG-NOT: ld.w
+; NOODDSPREG: ld.w $w0, 0($sp)
+; ODDSPREG: mov.s $f0, $f[[W0]]
diff --git a/test/CodeGen/Mips/octeon.ll b/test/CodeGen/Mips/octeon.ll
index d5ff9bd..97e12e7 100644
--- a/test/CodeGen/Mips/octeon.ll
+++ b/test/CodeGen/Mips/octeon.ll
@@ -1,15 +1,14 @@
-; RUN: llc -O1 < %s -march=mips64 -mcpu=octeon | FileCheck %s -check-prefix=OCTEON
-; RUN: llc -O1 < %s -march=mips64 -mcpu=mips64 | FileCheck %s -check-prefix=MIPS64
+; RUN: llc -O1 < %s -march=mips64 -mcpu=octeon | FileCheck %s -check-prefix=ALL -check-prefix=OCTEON
+; RUN: llc -O1 < %s -march=mips64 -mcpu=mips64 | FileCheck %s -check-prefix=ALL -check-prefix=MIPS64
define i64 @addi64(i64 %a, i64 %b) nounwind {
entry:
-; OCTEON-LABEL: addi64:
+; ALL-LABEL: addi64:
; OCTEON: jr $ra
; OCTEON: baddu $2, $4, $5
-; MIPS64-LABEL: addi64:
-; MIPS64: daddu
-; MIPS64: jr
-; MIPS64: andi
+; MIPS64: daddu $[[T0:[0-9]+]], $4, $5
+; MIPS64: jr $ra
+; MIPS64: andi $2, $[[T0]], 255
%add = add i64 %a, %b
%and = and i64 %add, 255
ret i64 %and
@@ -17,13 +16,142 @@ entry:
define i64 @mul(i64 %a, i64 %b) nounwind {
entry:
-; OCTEON-LABEL: mul:
+; ALL-LABEL: mul:
; OCTEON: jr $ra
; OCTEON: dmul $2, $4, $5
-; MIPS64-LABEL: mul:
-; MIPS64: dmult
-; MIPS64: jr
-; MIPS64: mflo
+; MIPS64: dmult $4, $5
+; MIPS64: jr $ra
+; MIPS64: mflo $2
%res = mul i64 %a, %b
ret i64 %res
}
+
+define i64 @cmpeq(i64 %a, i64 %b) nounwind {
+entry:
+; ALL-LABEL: cmpeq:
+; OCTEON: jr $ra
+; OCTEON: seq $2, $4, $5
+; MIPS64: xor $[[T0:[0-9]+]], $4, $5
+; MIPS64: sltiu $[[T1:[0-9]+]], $[[T0]], 1
+; MIPS64: dsll $[[T2:[0-9]+]], $[[T1]], 32
+; MIPS64: jr $ra
+; MIPS64: dsrl $2, $[[T2]], 32
+ %res = icmp eq i64 %a, %b
+ %res2 = zext i1 %res to i64
+ ret i64 %res2
+}
+
+define i64 @cmpeqi(i64 %a) nounwind {
+entry:
+; ALL-LABEL: cmpeqi:
+; OCTEON: jr $ra
+; OCTEON: seqi $2, $4, 42
+; MIPS64: daddiu $[[T0:[0-9]+]], $zero, 42
+; MIPS64: xor $[[T1:[0-9]+]], $4, $[[T0]]
+; MIPS64: sltiu $[[T2:[0-9]+]], $[[T1]], 1
+; MIPS64: dsll $[[T3:[0-9]+]], $[[T2]], 32
+; MIPS64: jr $ra
+; MIPS64: dsrl $2, $[[T3]], 32
+ %res = icmp eq i64 %a, 42
+ %res2 = zext i1 %res to i64
+ ret i64 %res2
+}
+
+define i64 @cmpne(i64 %a, i64 %b) nounwind {
+entry:
+; ALL-LABEL: cmpne:
+; OCTEON: jr $ra
+; OCTEON: sne $2, $4, $5
+; MIPS64: xor $[[T0:[0-9]+]], $4, $5
+; MIPS64: sltu $[[T1:[0-9]+]], $zero, $[[T0]]
+; MIPS64: dsll $[[T2:[0-9]+]], $[[T1]], 32
+; MIPS64: jr $ra
+; MIPS64: dsrl $2, $[[T2]], 32
+ %res = icmp ne i64 %a, %b
+ %res2 = zext i1 %res to i64
+ ret i64 %res2
+}
+
+define i64 @cmpnei(i64 %a) nounwind {
+entry:
+; ALL-LABEL: cmpnei:
+; OCTEON: jr $ra
+; OCTEON: snei $2, $4, 42
+; MIPS64: daddiu $[[T0:[0-9]+]], $zero, 42
+; MIPS64: xor $[[T1:[0-9]+]], $4, $[[T0]]
+; MIPS64: sltu $[[T2:[0-9]+]], $zero, $[[T1]]
+; MIPS64: dsll $[[T3:[0-9]+]], $[[T2]], 32
+; MIPS64: jr $ra
+; MIPS64: dsrl $2, $[[T3]], 32
+ %res = icmp ne i64 %a, 42
+ %res2 = zext i1 %res to i64
+ ret i64 %res2
+}
+
+define i64 @bbit0(i64 %a) nounwind {
+entry:
+; ALL-LABEL: bbit0:
+; OCTEON: bbit0 $4, 3, $[[BB0:BB[0-9_]+]]
+; MIPS64: andi $[[T0:[0-9]+]], $4, 8
+; MIPS64: beqz $[[T0]], $[[BB0:BB[0-9_]+]]
+ %bit = and i64 %a, 8
+ %res = icmp eq i64 %bit, 0
+ br i1 %res, label %endif, label %if
+if:
+ ret i64 48
+
+endif:
+ ret i64 12
+}
+
+define i64 @bbit032(i64 %a) nounwind {
+entry:
+; ALL-LABEL: bbit032:
+; OCTEON: bbit032 $4, 3, $[[BB0:BB[0-9_]+]]
+; MIPS64: daddiu $[[T0:[0-9]+]], $zero, 1
+; MIPS64: dsll $[[T1:[0-9]+]], $[[T0]], 35
+; MIPS64: and $[[T2:[0-9]+]], $4, $[[T1]]
+; MIPS64: beqz $[[T2]], $[[BB0:BB[0-9_]+]]
+ %bit = and i64 %a, 34359738368
+ %res = icmp eq i64 %bit, 0
+ br i1 %res, label %endif, label %if
+if:
+ ret i64 48
+
+endif:
+ ret i64 12
+}
+
+define i64 @bbit1(i64 %a) nounwind {
+entry:
+; ALL-LABEL: bbit1:
+; OCTEON: bbit1 $4, 3, $[[BB0:BB[0-9_]+]]
+; MIPS64: andi $[[T0:[0-9]+]], $4, 8
+; MIPS64: beqz $[[T0]], $[[BB0:BB[0-9_]+]]
+ %bit = and i64 %a, 8
+ %res = icmp ne i64 %bit, 0
+ br i1 %res, label %endif, label %if
+if:
+ ret i64 48
+
+endif:
+ ret i64 12
+}
+
+define i64 @bbit132(i64 %a) nounwind {
+entry:
+; ALL-LABEL: bbit132:
+; OCTEON: bbit132 $4, 3, $[[BB0:BB[0-9_]+]]
+; MIPS64: daddiu $[[T0:[0-9]+]], $zero, 1
+; MIPS64: dsll $[[T1:[0-9]+]], $[[T0]], 35
+; MIPS64: and $[[T2:[0-9]+]], $4, $[[T1]]
+; MIPS64: beqz $[[T2]], $[[BB0:BB[0-9_]+]]
+ %bit = and i64 %a, 34359738368
+ %res = icmp ne i64 %bit, 0
+ br i1 %res, label %endif, label %if
+if:
+ ret i64 48
+
+endif:
+ ret i64 12
+}
diff --git a/test/CodeGen/Mips/powif64_16.ll b/test/CodeGen/Mips/powif64_16.ll
index 4875727..33ec8c4 100644
--- a/test/CodeGen/Mips/powif64_16.ll
+++ b/test/CodeGen/Mips/powif64_16.ll
@@ -20,7 +20,7 @@ define double @foo_pow_f64(double %y, i32 %p) {
attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
attributes #1 = { nounwind readonly }
-!0 = metadata !{metadata !"double", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"int", metadata !1}
+!0 = !{!"double", !1}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{!"int", !1}
diff --git a/test/CodeGen/Mips/remat-immed-load.ll b/test/CodeGen/Mips/remat-immed-load.ll
index b53b156..3d37b43 100644
--- a/test/CodeGen/Mips/remat-immed-load.ll
+++ b/test/CodeGen/Mips/remat-immed-load.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=32
-; RUN: llc -march=mips64el -mcpu=mips4 -mattr=n64 < %s | FileCheck %s -check-prefix=64
-; RUN: llc -march=mips64el -mcpu=mips64 -mattr=n64 < %s | FileCheck %s -check-prefix=64
+; RUN: llc -march=mips64el -mcpu=mips4 -target-abi=n64 < %s | FileCheck %s -check-prefix=64
+; RUN: llc -march=mips64el -mcpu=mips64 -target-abi=n64 < %s | FileCheck %s -check-prefix=64
define void @f0() nounwind {
entry:
diff --git a/test/CodeGen/Mips/start-asm-file.ll b/test/CodeGen/Mips/start-asm-file.ll
index 9dc501c..60c047a 100644
--- a/test/CodeGen/Mips/start-asm-file.ll
+++ b/test/CodeGen/Mips/start-asm-file.ll
@@ -19,36 +19,36 @@
; ### N32 ABI ###
; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
-; RUN: -relocation-model=static -mattr=-n64,+n32 %s -o - | \
+; RUN: -relocation-model=static -target-abi n32 %s -o - | \
; RUN: FileCheck -check-prefix=CHECK-STATIC-N32 -check-prefix=CHECK-STATIC-N32-NLEGACY %s
; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
-; RUN: -relocation-model=pic -mattr=-n64,+n32 %s -o - | \
+; RUN: -relocation-model=pic -target-abi n32 %s -o - | \
; RUN: FileCheck -check-prefix=CHECK-PIC-N32 -check-prefix=CHECK-PIC-N32-NLEGACY %s
; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
-; RUN: -relocation-model=static -mattr=-n64,+n32,+nan2008 %s -o - | \
+; RUN: -relocation-model=static -target-abi n32 -mattr=+nan2008 %s -o - | \
; RUN: FileCheck -check-prefix=CHECK-STATIC-N32 -check-prefix=CHECK-STATIC-N32-N2008 %s
; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
-; RUN: -relocation-model=pic -mattr=-n64,+n32,+nan2008 %s -o - | \
+; RUN: -relocation-model=pic -target-abi n32 -mattr=+nan2008 %s -o - | \
; RUN: FileCheck -check-prefix=CHECK-PIC-N32 -check-prefix=CHECK-PIC-N32-N2008 %s
; ### N64 ABI ###
; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
-; RUN: -relocation-model=static -mattr=+n64 %s -o - | \
+; RUN: -relocation-model=static -target-abi n64 %s -o - | \
; RUN: FileCheck -check-prefix=CHECK-STATIC-N64 -check-prefix=CHECK-STATIC-N64-NLEGACY %s
; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
-; RUN: -relocation-model=pic -mattr=+n64 %s -o - | \
+; RUN: -relocation-model=pic -target-abi n64 %s -o - | \
; RUN: FileCheck -check-prefix=CHECK-PIC-N64 -check-prefix=CHECK-PIC-N64-NLEGACY %s
; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
-; RUN: -relocation-model=static -mattr=+n64,+nan2008 %s -o - | \
+; RUN: -relocation-model=static -target-abi n64 -mattr=+nan2008 %s -o - | \
; RUN: FileCheck -check-prefix=CHECK-STATIC-N64 -check-prefix=CHECK-STATIC-N64-N2008 %s
; RUN: llc -filetype=asm -mtriple mips64-unknown-linux -mcpu=mips64 \
-; RUN: -relocation-model=pic -mattr=+n64,+nan2008 %s -o - | \
+; RUN: -relocation-model=pic -target-abi n64 -mattr=+nan2008 %s -o - | \
; RUN: FileCheck -check-prefix=CHECK-PIC-N64 -check-prefix=CHECK-PIC-N64-N2008 %s
; CHECK-STATIC-O32: .abicalls
diff --git a/test/CodeGen/NVPTX/annotations.ll b/test/CodeGen/NVPTX/annotations.ll
index 39d52d3..2341377 100644
--- a/test/CodeGen/NVPTX/annotations.ll
+++ b/test/CodeGen/NVPTX/annotations.ll
@@ -33,21 +33,14 @@ define void @kernel_func_minctasm(float* %a) {
!nvvm.annotations = !{!1, !2, !3, !4, !5, !6, !7, !8}
-!1 = metadata !{void (float*)* @kernel_func_maxntid, metadata !"kernel", i32 1}
-!2 = metadata !{void (float*)* @kernel_func_maxntid,
- metadata !"maxntidx", i32 10,
- metadata !"maxntidy", i32 20,
- metadata !"maxntidz", i32 30}
-
-!3 = metadata !{void (float*)* @kernel_func_reqntid, metadata !"kernel", i32 1}
-!4 = metadata !{void (float*)* @kernel_func_reqntid,
- metadata !"reqntidx", i32 11,
- metadata !"reqntidy", i32 22,
- metadata !"reqntidz", i32 33}
-
-!5 = metadata !{void (float*)* @kernel_func_minctasm, metadata !"kernel", i32 1}
-!6 = metadata !{void (float*)* @kernel_func_minctasm,
- metadata !"minctasm", i32 42}
-
-!7 = metadata !{i64 addrspace(1)* @texture, metadata !"texture", i32 1}
-!8 = metadata !{i64 addrspace(1)* @surface, metadata !"surface", i32 1}
+!1 = !{void (float*)* @kernel_func_maxntid, !"kernel", i32 1}
+!2 = !{void (float*)* @kernel_func_maxntid, !"maxntidx", i32 10, !"maxntidy", i32 20, !"maxntidz", i32 30}
+
+!3 = !{void (float*)* @kernel_func_reqntid, !"kernel", i32 1}
+!4 = !{void (float*)* @kernel_func_reqntid, !"reqntidx", i32 11, !"reqntidy", i32 22, !"reqntidz", i32 33}
+
+!5 = !{void (float*)* @kernel_func_minctasm, !"kernel", i32 1}
+!6 = !{void (float*)* @kernel_func_minctasm, !"minctasm", i32 42}
+
+!7 = !{i64 addrspace(1)* @texture, !"texture", i32 1}
+!8 = !{i64 addrspace(1)* @surface, !"surface", i32 1}
diff --git a/test/CodeGen/NVPTX/bug21465.ll b/test/CodeGen/NVPTX/bug21465.ll
index 157b28c..cacffce 100644
--- a/test/CodeGen/NVPTX/bug21465.ll
+++ b/test/CodeGen/NVPTX/bug21465.ll
@@ -21,4 +21,4 @@ attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"=
!nvvm.annotations = !{!0}
-!0 = metadata !{void (%struct.S*, i32*)* @_Z11TakesStruct1SPi, metadata !"kernel", i32 1}
+!0 = !{void (%struct.S*, i32*)* @_Z11TakesStruct1SPi, !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/bug22246.ll b/test/CodeGen/NVPTX/bug22246.ll
new file mode 100644
index 0000000..70e7e12
--- /dev/null
+++ b/test/CodeGen/NVPTX/bug22246.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-nvidia-cuda"
+
+; CHECK-LABEL: _Z3foobbbPb
+define void @_Z3foobbbPb(i1 zeroext %p1, i1 zeroext %p2, i1 zeroext %p3, i8* nocapture %output) {
+entry:
+; CHECK: selp.b32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %p{{[0-9]+}}
+ %.sink.v = select i1 %p1, i1 %p2, i1 %p3
+ %frombool5 = zext i1 %.sink.v to i8
+ store i8 %frombool5, i8* %output, align 1
+ ret void
+}
diff --git a/test/CodeGen/NVPTX/bug22322.ll b/test/CodeGen/NVPTX/bug22322.ll
new file mode 100644
index 0000000..19ee694
--- /dev/null
+++ b/test/CodeGen/NVPTX/bug22322.ll
@@ -0,0 +1,62 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-nvidia-cuda"
+
+%class.float3 = type { float, float, float }
+
+; Function Attrs: nounwind
+; CHECK-LABEL: some_kernel
+define void @some_kernel(%class.float3* nocapture %dst) #0 {
+_ZL11compute_vecRK6float3jb.exit:
+ %ret_vec.sroa.8.i = alloca float, align 4
+ %0 = tail call i32 @llvm.ptx.read.ctaid.x()
+ %1 = tail call i32 @llvm.ptx.read.ntid.x()
+ %2 = mul nsw i32 %1, %0
+ %3 = tail call i32 @llvm.ptx.read.tid.x()
+ %4 = add nsw i32 %2, %3
+ %5 = zext i32 %4 to i64
+ %6 = bitcast float* %ret_vec.sroa.8.i to i8*
+ call void @llvm.lifetime.start(i64 4, i8* %6)
+ %7 = and i32 %4, 15
+ %8 = icmp eq i32 %7, 0
+ %9 = select i1 %8, float 0.000000e+00, float -1.000000e+00
+ store float %9, float* %ret_vec.sroa.8.i, align 4
+; CHECK: setp.lt.f32 %p{{[0-9]+}}, %f{{[0-9]+}}, 0f00000000
+ %10 = fcmp olt float %9, 0.000000e+00
+ %ret_vec.sroa.8.i.val = load float* %ret_vec.sroa.8.i, align 4
+ %11 = select i1 %10, float 0.000000e+00, float %ret_vec.sroa.8.i.val
+ call void @llvm.lifetime.end(i64 4, i8* %6)
+ %12 = getelementptr inbounds %class.float3* %dst, i64 %5, i32 0
+ store float 0.000000e+00, float* %12, align 4
+ %13 = getelementptr inbounds %class.float3* %dst, i64 %5, i32 1
+ store float %11, float* %13, align 4
+ %14 = getelementptr inbounds %class.float3* %dst, i64 %5, i32 2
+ store float 0.000000e+00, float* %14, align 4
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.ptx.read.ctaid.x() #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.ptx.read.ntid.x() #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.ptx.read.tid.x() #1
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.start(i64, i8* nocapture) #2
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.end(i64, i8* nocapture) #2
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
+
+!nvvm.annotations = !{!0}
+!llvm.ident = !{!1}
+
+!0 = !{void (%class.float3*)* @some_kernel, !"kernel", i32 1}
+!1 = !{!"clang version 3.5.1 (tags/RELEASE_351/final)"}
diff --git a/test/CodeGen/NVPTX/call-with-alloca-buffer.ll b/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
index 83d4916..8483112 100644
--- a/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
+++ b/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
@@ -63,4 +63,4 @@ declare void @callee(float*, i8*)
!nvvm.annotations = !{!0}
-!0 = metadata !{void (float*)* @kernel_func, metadata !"kernel", i32 1}
+!0 = !{void (float*)* @kernel_func, !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/calling-conv.ll b/test/CodeGen/NVPTX/calling-conv.ll
index 190a146..3b03442 100644
--- a/test/CodeGen/NVPTX/calling-conv.ll
+++ b/test/CodeGen/NVPTX/calling-conv.ll
@@ -27,4 +27,4 @@ define void @metadata_kernel(float* %a) {
!nvvm.annotations = !{!1}
-!1 = metadata !{void (float*)* @metadata_kernel, metadata !"kernel", i32 1}
+!1 = !{void (float*)* @metadata_kernel, !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/fma-assoc.ll b/test/CodeGen/NVPTX/fma-assoc.ll
new file mode 100644
index 0000000..fc04c61
--- /dev/null
+++ b/test/CodeGen/NVPTX/fma-assoc.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -fp-contract=fast | FileCheck %s
+
+define ptx_device float @t1_f32(float %x, float %y, float %z,
+ float %u, float %v) {
+; CHECK: fma.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: fma.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
+ %a = fmul float %x, %y
+ %b = fmul float %u, %v
+ %c = fadd float %a, %b
+ %d = fadd float %c, %z
+ ret float %d
+}
+
+define ptx_device double @t1_f64(double %x, double %y, double %z,
+ double %u, double %v) {
+; CHECK: fma.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: fma.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
+ %a = fmul double %x, %y
+ %b = fmul double %u, %v
+ %c = fadd double %a, %b
+ %d = fadd double %c, %z
+ ret double %d
+}
diff --git a/test/CodeGen/NVPTX/fma.ll b/test/CodeGen/NVPTX/fma.ll
index 14b5c45..6785a01 100644
--- a/test/CodeGen/NVPTX/fma.ll
+++ b/test/CodeGen/NVPTX/fma.ll
@@ -1,5 +1,8 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 -fp-contract=fast | FileCheck %s
+declare float @dummy_f32(float, float) #0
+declare double @dummy_f64(double, double) #0
+
define ptx_device float @t1_f32(float %x, float %y, float %z) {
; CHECK: fma.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
; CHECK: ret;
@@ -8,6 +11,17 @@ define ptx_device float @t1_f32(float %x, float %y, float %z) {
ret float %b
}
+define ptx_device float @t2_f32(float %x, float %y, float %z, float %w) {
+; CHECK: fma.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: fma.rn.f32 %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}};
+; CHECK: ret;
+ %a = fmul float %x, %y
+ %b = fadd float %a, %z
+ %c = fadd float %a, %w
+ %d = call float @dummy_f32(float %b, float %c)
+ ret float %d
+}
+
define ptx_device double @t1_f64(double %x, double %y, double %z) {
; CHECK: fma.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
; CHECK: ret;
@@ -15,3 +29,14 @@ define ptx_device double @t1_f64(double %x, double %y, double %z) {
%b = fadd double %a, %z
ret double %b
}
+
+define ptx_device double @t2_f64(double %x, double %y, double %z, double %w) {
+; CHECK: fma.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: fma.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}};
+; CHECK: ret;
+ %a = fmul double %x, %y
+ %b = fadd double %a, %z
+ %c = fadd double %a, %w
+ %d = call double @dummy_f64(double %b, double %c)
+ ret double %d
+}
diff --git a/test/CodeGen/NVPTX/generic-to-nvvm.ll b/test/CodeGen/NVPTX/generic-to-nvvm.ll
index 2a52798..fb63d6e 100644
--- a/test/CodeGen/NVPTX/generic-to-nvvm.ll
+++ b/test/CodeGen/NVPTX/generic-to-nvvm.ll
@@ -23,4 +23,4 @@ define void @foo(i32* %a, i32* %b) {
!nvvm.annotations = !{!0}
-!0 = metadata !{void (i32*, i32*)* @foo, metadata !"kernel", i32 1}
+!0 = !{void (i32*, i32*)* @foo, !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/i1-global.ll b/test/CodeGen/NVPTX/i1-global.ll
index 1dd8ae4..e3fe08e 100644
--- a/test/CodeGen/NVPTX/i1-global.ll
+++ b/test/CodeGen/NVPTX/i1-global.ll
@@ -16,4 +16,4 @@ define void @foo(i1 %p, i32* %out) {
!nvvm.annotations = !{!0}
-!0 = metadata !{void (i1, i32*)* @foo, metadata !"kernel", i32 1}
+!0 = !{void (i1, i32*)* @foo, !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/i1-param.ll b/test/CodeGen/NVPTX/i1-param.ll
index f4df874..aac7196 100644
--- a/test/CodeGen/NVPTX/i1-param.ll
+++ b/test/CodeGen/NVPTX/i1-param.ll
@@ -16,4 +16,4 @@ define void @foo(i1 %p, i32* %out) {
!nvvm.annotations = !{!0}
-!0 = metadata !{void (i1, i32*)* @foo, metadata !"kernel", i32 1}
+!0 = !{void (i1, i32*)* @foo, !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/managed.ll b/test/CodeGen/NVPTX/managed.ll
index 4d7e781..d3f1604 100644
--- a/test/CodeGen/NVPTX/managed.ll
+++ b/test/CodeGen/NVPTX/managed.ll
@@ -8,4 +8,4 @@
!nvvm.annotations = !{!0}
-!0 = metadata !{i32 addrspace(1)* @managed_g, metadata !"managed", i32 1}
+!0 = !{i32 addrspace(1)* @managed_g, !"managed", i32 1}
diff --git a/test/CodeGen/NVPTX/noduplicate-syncthreads.ll b/test/CodeGen/NVPTX/noduplicate-syncthreads.ll
index 64745fc..841bbc3 100644
--- a/test/CodeGen/NVPTX/noduplicate-syncthreads.ll
+++ b/test/CodeGen/NVPTX/noduplicate-syncthreads.ll
@@ -70,5 +70,5 @@ if.end17: ; preds = %if.else13, %if.then
; Function Attrs: noduplicate nounwind
declare void @llvm.cuda.syncthreads() #2
-!0 = metadata !{void (float*)* @foo, metadata !"kernel", i32 1}
-!1 = metadata !{null, metadata !"align", i32 8}
+!0 = !{void (float*)* @foo, !"kernel", i32 1}
+!1 = !{null, !"align", i32 8}
diff --git a/test/CodeGen/NVPTX/nounroll.ll b/test/CodeGen/NVPTX/nounroll.ll
new file mode 100644
index 0000000..db96d2a
--- /dev/null
+++ b/test/CodeGen/NVPTX/nounroll.ll
@@ -0,0 +1,37 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-unknown-unknown"
+
+; Compiled from the following CUDA code:
+;
+; #pragma nounroll
+; for (int i = 0; i < 2; ++i)
+; output[i] = input[i];
+define void @nounroll(float* %input, float* %output) {
+; CHECK-LABEL: .visible .func nounroll(
+entry:
+ br label %for.body
+
+for.body:
+; CHECK: .pragma "nounroll"
+ %i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %idxprom = sext i32 %i.06 to i64
+ %arrayidx = getelementptr inbounds float* %input, i64 %idxprom
+ %0 = load float* %arrayidx, align 4
+; CHECK: ld.f32
+ %arrayidx2 = getelementptr inbounds float* %output, i64 %idxprom
+ store float %0, float* %arrayidx2, align 4
+; CHECK: st.f32
+ %inc = add nuw nsw i32 %i.06, 1
+ %exitcond = icmp eq i32 %inc, 2
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
+; CHECK-NOT: ld.f32
+; CHECK-NOT: st.f32
+
+for.end:
+ ret void
+}
+
+!0 = distinct !{!0, !1}
+!1 = !{!"llvm.loop.unroll.disable"}
diff --git a/test/CodeGen/NVPTX/nvcl-param-align.ll b/test/CodeGen/NVPTX/nvcl-param-align.ll
new file mode 100644
index 0000000..c1a489f
--- /dev/null
+++ b/test/CodeGen/NVPTX/nvcl-param-align.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+target triple = "nvptx-unknown-nvcl"
+
+; CHECK-LABEL: .entry foo(
+define void @foo(i64 %img, i64 %sampler, <5 x float>* %v) {
+; The parameter alignment should be the next power of 2 of 5xsizeof(float),
+; which is 32.
+; CHECK: .param .u32 .ptr .align 32 foo_param_2
+ ret void
+}
+
+!nvvm.annotations = !{!1, !2, !3}
+!1 = !{void (i64, i64, <5 x float>*)* @foo, !"kernel", i32 1}
+!2 = !{void (i64, i64, <5 x float>*)* @foo, !"rdoimage", i32 0}
+!3 = !{void (i64, i64, <5 x float>*)* @foo, !"sampler", i32 1}
diff --git a/test/CodeGen/NVPTX/refl1.ll b/test/CodeGen/NVPTX/refl1.ll
index 4aeff09..e8782ea 100644
--- a/test/CodeGen/NVPTX/refl1.ll
+++ b/test/CodeGen/NVPTX/refl1.ll
@@ -36,4 +36,4 @@ attributes #2 = { alwaysinline inlinehint nounwind readnone }
!nvvm.annotations = !{!0}
-!0 = metadata !{void (float*)* @foo, metadata !"kernel", i32 1}
+!0 = !{void (float*)* @foo, !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/simple-call.ll b/test/CodeGen/NVPTX/simple-call.ll
index ab6f423..1b41361 100644
--- a/test/CodeGen/NVPTX/simple-call.ll
+++ b/test/CodeGen/NVPTX/simple-call.ll
@@ -23,4 +23,4 @@ define void @kernel_func(float* %a) {
!nvvm.annotations = !{!1}
-!1 = metadata !{void (float*)* @kernel_func, metadata !"kernel", i32 1}
+!1 = !{void (float*)* @kernel_func, !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/surf-read-cuda.ll b/test/CodeGen/NVPTX/surf-read-cuda.ll
index 10a1ecc..ed02134 100644
--- a/test/CodeGen/NVPTX/surf-read-cuda.ll
+++ b/test/CodeGen/NVPTX/surf-read-cuda.ll
@@ -47,7 +47,7 @@ define void @bar(float* %red, i32 %idx) {
!nvvm.annotations = !{!1, !2, !3}
-!1 = metadata !{void (i64, float*, i32)* @foo, metadata !"kernel", i32 1}
-!2 = metadata !{void (float*, i32)* @bar, metadata !"kernel", i32 1}
-!3 = metadata !{i64 addrspace(1)* @surf0, metadata !"surface", i32 1}
+!1 = !{void (i64, float*, i32)* @foo, !"kernel", i32 1}
+!2 = !{void (float*, i32)* @bar, !"kernel", i32 1}
+!3 = !{i64 addrspace(1)* @surf0, !"surface", i32 1}
diff --git a/test/CodeGen/NVPTX/surf-read.ll b/test/CodeGen/NVPTX/surf-read.ll
index a69d03e..7383722 100644
--- a/test/CodeGen/NVPTX/surf-read.ll
+++ b/test/CodeGen/NVPTX/surf-read.ll
@@ -16,5 +16,5 @@ define void @foo(i64 %img, float* %red, i32 %idx) {
}
!nvvm.annotations = !{!1, !2}
-!1 = metadata !{void (i64, float*, i32)* @foo, metadata !"kernel", i32 1}
-!2 = metadata !{void (i64, float*, i32)* @foo, metadata !"rdwrimage", i32 0}
+!1 = !{void (i64, float*, i32)* @foo, !"kernel", i32 1}
+!2 = !{void (i64, float*, i32)* @foo, !"rdwrimage", i32 0}
diff --git a/test/CodeGen/NVPTX/surf-write-cuda.ll b/test/CodeGen/NVPTX/surf-write-cuda.ll
index 654c47f..da55a24 100644
--- a/test/CodeGen/NVPTX/surf-write-cuda.ll
+++ b/test/CodeGen/NVPTX/surf-write-cuda.ll
@@ -36,7 +36,7 @@ define void @bar(i32 %val, i32 %idx) {
!nvvm.annotations = !{!1, !2, !3}
-!1 = metadata !{void (i64, i32, i32)* @foo, metadata !"kernel", i32 1}
-!2 = metadata !{void (i32, i32)* @bar, metadata !"kernel", i32 1}
-!3 = metadata !{i64 addrspace(1)* @surf0, metadata !"surface", i32 1}
+!1 = !{void (i64, i32, i32)* @foo, !"kernel", i32 1}
+!2 = !{void (i32, i32)* @bar, !"kernel", i32 1}
+!3 = !{i64 addrspace(1)* @surf0, !"surface", i32 1}
diff --git a/test/CodeGen/NVPTX/surf-write.ll b/test/CodeGen/NVPTX/surf-write.ll
index 880231f..5098d2a 100644
--- a/test/CodeGen/NVPTX/surf-write.ll
+++ b/test/CodeGen/NVPTX/surf-write.ll
@@ -12,5 +12,5 @@ define void @foo(i64 %img, i32 %val, i32 %idx) {
}
!nvvm.annotations = !{!1, !2}
-!1 = metadata !{void (i64, i32, i32)* @foo, metadata !"kernel", i32 1}
-!2 = metadata !{void (i64, i32, i32)* @foo, metadata !"wroimage", i32 0}
+!1 = !{void (i64, i32, i32)* @foo, !"kernel", i32 1}
+!2 = !{void (i64, i32, i32)* @foo, !"wroimage", i32 0}
diff --git a/test/CodeGen/NVPTX/tex-read-cuda.ll b/test/CodeGen/NVPTX/tex-read-cuda.ll
index ee0cefa..c5b5600 100644
--- a/test/CodeGen/NVPTX/tex-read-cuda.ll
+++ b/test/CodeGen/NVPTX/tex-read-cuda.ll
@@ -41,6 +41,6 @@ define void @bar(float* %red, i32 %idx) {
}
!nvvm.annotations = !{!1, !2, !3}
-!1 = metadata !{void (i64, float*, i32)* @foo, metadata !"kernel", i32 1}
-!2 = metadata !{void (float*, i32)* @bar, metadata !"kernel", i32 1}
-!3 = metadata !{i64 addrspace(1)* @tex0, metadata !"texture", i32 1}
+!1 = !{void (i64, float*, i32)* @foo, !"kernel", i32 1}
+!2 = !{void (float*, i32)* @bar, !"kernel", i32 1}
+!3 = !{i64 addrspace(1)* @tex0, !"texture", i32 1}
diff --git a/test/CodeGen/NVPTX/tex-read.ll b/test/CodeGen/NVPTX/tex-read.ll
index 55e4bfc..6e0fda6 100644
--- a/test/CodeGen/NVPTX/tex-read.ll
+++ b/test/CodeGen/NVPTX/tex-read.ll
@@ -15,6 +15,6 @@ define void @foo(i64 %img, i64 %sampler, float* %red, i32 %idx) {
}
!nvvm.annotations = !{!1, !2, !3}
-!1 = metadata !{void (i64, i64, float*, i32)* @foo, metadata !"kernel", i32 1}
-!2 = metadata !{void (i64, i64, float*, i32)* @foo, metadata !"rdoimage", i32 0}
-!3 = metadata !{void (i64, i64, float*, i32)* @foo, metadata !"sampler", i32 1}
+!1 = !{void (i64, i64, float*, i32)* @foo, !"kernel", i32 1}
+!2 = !{void (i64, i64, float*, i32)* @foo, !"rdoimage", i32 0}
+!3 = !{void (i64, i64, float*, i32)* @foo, !"sampler", i32 1}
diff --git a/test/CodeGen/NVPTX/texsurf-queries.ll b/test/CodeGen/NVPTX/texsurf-queries.ll
index c7637cc..e56eb5d 100644
--- a/test/CodeGen/NVPTX/texsurf-queries.ll
+++ b/test/CodeGen/NVPTX/texsurf-queries.ll
@@ -99,5 +99,5 @@ define i32 @s3() {
!nvvm.annotations = !{!1, !2}
-!1 = metadata !{i64 addrspace(1)* @tex0, metadata !"texture", i32 1}
-!2 = metadata !{i64 addrspace(1)* @surf0, metadata !"surface", i32 1}
+!1 = !{i64 addrspace(1)* @tex0, !"texture", i32 1}
+!2 = !{i64 addrspace(1)* @surf0, !"surface", i32 1}
diff --git a/test/CodeGen/NVPTX/vector-global.ll b/test/CodeGen/NVPTX/vector-global.ll
new file mode 100644
index 0000000..a463bee
--- /dev/null
+++ b/test/CodeGen/NVPTX/vector-global.ll
@@ -0,0 +1,9 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
+
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-nvidia-cuda"
+
+@g1 = external global <4 x i32> ; external global variable
+; CHECK: .extern .global .align 16 .b8 g1[16];
+@g2 = global <4 x i32> zeroinitializer ; module-level global variable
+; CHECK: .visible .global .align 16 .b8 g2[16];
diff --git a/test/CodeGen/NVPTX/weak-linkage.ll b/test/CodeGen/NVPTX/weak-linkage.ll
index 7a13357..5df57b2 100644
--- a/test/CodeGen/NVPTX/weak-linkage.ll
+++ b/test/CodeGen/NVPTX/weak-linkage.ll
@@ -1,11 +1,17 @@
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
-
+; CHECK: // .weak foo
; CHECK: .weak .func foo
define weak void @foo() {
ret void
}
+; CHECK: // .weak baz
+; CHECK: .weak .func baz
+define weak_odr void @baz() {
+ ret void
+}
+
; CHECK: .visible .func bar
define void @bar() {
ret void
diff --git a/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll b/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
index 3620b0e..3624b51 100644
--- a/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
+++ b/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=ppc64 -mcpu=g5 | grep cntlzd
+; RUN: llc -mcpu=g5 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
define i32 @_ZNK4llvm5APInt17countLeadingZerosEv(i64 *%t) nounwind {
%tmp19 = load i64* %t
@@ -7,6 +9,12 @@ define i32 @_ZNK4llvm5APInt17countLeadingZerosEv(i64 *%t) nounwind {
%tmp89 = add i32 %tmp23, -64 ; <i32> [#uses=1]
%tmp90 = add i32 %tmp89, 0 ; <i32> [#uses=1]
ret i32 %tmp90
+
+; CHECK-LABEL: @_ZNK4llvm5APInt17countLeadingZerosEv
+; CHECK: ld [[REG1:[0-9]+]], 0(3)
+; CHECK-NEXT: cntlzd [[REG2:[0-9]+]], [[REG1]]
+; CHECK-NEXT: addi 3, [[REG2]], -64
+; CHECK-NEXT: blr
}
declare i64 @llvm.ctlz.i64(i64, i1)
diff --git a/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll b/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll
index 3acd01d..e7bc5bf 100644
--- a/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll
+++ b/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll
@@ -183,4 +183,4 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32,
declare i32 @puts(i8* nocapture) nounwind
-!3 = metadata !{metadata !"branch_weights", i32 64, i32 4}
+!3 = !{!"branch_weights", i32 64, i32 4}
diff --git a/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll b/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll
index 4a1a512..a6223d4 100644
--- a/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll
+++ b/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll
@@ -217,4 +217,4 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32,
declare i32 @puts(i8* nocapture) nounwind
-!3 = metadata !{metadata !"branch_weights", i32 64, i32 4}
+!3 = !{!"branch_weights", i32 64, i32 4}
diff --git a/test/CodeGen/PowerPC/Frames-large.ll b/test/CodeGen/PowerPC/Frames-large.ll
index 0ccea42..5b8aef4 100644
--- a/test/CodeGen/PowerPC/Frames-large.ll
+++ b/test/CodeGen/PowerPC/Frames-large.ll
@@ -1,9 +1,8 @@
-; RUN: llvm-as < %s > %t.bc
-; RUN: llc < %t.bc -march=ppc32 | FileCheck %s -check-prefix=PPC32-NOFP
-; RUN: llc < %t.bc -march=ppc32 -disable-fp-elim | FileCheck %s -check-prefix=PPC32-FP
+; RUN: llc < %s -march=ppc32 | FileCheck %s -check-prefix=PPC32-NOFP
+; RUN: llc < %s -march=ppc32 -disable-fp-elim | FileCheck %s -check-prefix=PPC32-FP
-; RUN: llc < %t.bc -march=ppc64 | FileCheck %s -check-prefix=PPC64-NOFP
-; RUN: llc < %t.bc -march=ppc64 -disable-fp-elim | FileCheck %s -check-prefix=PPC64-FP
+; RUN: llc < %s -march=ppc64 | FileCheck %s -check-prefix=PPC64-NOFP
+; RUN: llc < %s -march=ppc64 -disable-fp-elim | FileCheck %s -check-prefix=PPC64-FP
target triple = "powerpc-apple-darwin8"
diff --git a/test/CodeGen/PowerPC/aa-tbaa.ll b/test/CodeGen/PowerPC/aa-tbaa.ll
index 1939841..0e7ff3d 100644
--- a/test/CodeGen/PowerPC/aa-tbaa.ll
+++ b/test/CodeGen/PowerPC/aa-tbaa.ll
@@ -35,7 +35,7 @@ next:
; CHECK: blr
}
-!0 = metadata !{ metadata !"root" }
-!1 = metadata !{ metadata !"set1", metadata !0 }
-!2 = metadata !{ metadata !"set2", metadata !0 }
+!0 = !{ !"root" }
+!1 = !{ !"set1", !0 }
+!2 = !{ !"set2", !0 }
diff --git a/test/CodeGen/PowerPC/add-fi.ll b/test/CodeGen/PowerPC/add-fi.ll
new file mode 100644
index 0000000..18892c8
--- /dev/null
+++ b/test/CodeGen/PowerPC/add-fi.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mcpu=ppc64 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define i32* @test1() {
+ %X = alloca { i32, i32 }
+ %Y = getelementptr {i32,i32}* %X, i32 0, i32 1
+ ret i32* %Y
+
+; CHECK-LABEL: @test1
+; CHECK: addi 3, 1, -4
+; CHECK: blr
+}
+
+define i32* @test2() {
+ %X = alloca { i32, i32, i32, i32 }
+ %Y = getelementptr {i32,i32,i32,i32}* %X, i32 0, i32 3
+ ret i32* %Y
+
+; CHECK-LABEL: @test2
+; CHECK: addi 3, 1, -4
+; CHECK: blr
+}
+
diff --git a/test/CodeGen/PowerPC/addi-licm.ll b/test/CodeGen/PowerPC/addi-licm.ll
new file mode 100644
index 0000000..070d86f
--- /dev/null
+++ b/test/CodeGen/PowerPC/addi-licm.ll
@@ -0,0 +1,64 @@
+; RUN: llc -mcpu=pwr7 -disable-ppc-preinc-prep < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s -check-prefix=PIP
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define double @foo() #1 {
+entry:
+ %x = alloca [2048 x float], align 4
+ %y = alloca [2048 x float], align 4
+ %0 = bitcast [2048 x float]* %x to i8*
+ call void @llvm.lifetime.start(i64 8192, i8* %0) #2
+ %1 = bitcast [2048 x float]* %y to i8*
+ call void @llvm.lifetime.start(i64 8192, i8* %1) #2
+ br label %for.body.i
+
+; CHECK-LABEL: @foo
+; CHECK: addi [[REG1:[0-9]+]], 1,
+; CHECK: addi [[REG2:[0-9]+]], 1,
+; CHECK: %for.body.i
+; CHECK-DAG: lfsx {{[0-9]+}}, [[REG1]],
+; CHECK-DAG: lfsx {{[0-9]+}}, [[REG2]],
+; CHECK: blr
+
+; PIP-LABEL: @foo
+; PIP: addi [[REG1:[0-9]+]], 1,
+; PIP: addi [[REG2:[0-9]+]], 1,
+; PIP: %for.body.i
+; PIP-DAG: lfsu {{[0-9]+}}, 4([[REG1]])
+; PIP-DAG: lfsu {{[0-9]+}}, 4([[REG2]])
+; PIP: blr
+
+for.body.i: ; preds = %for.body.i.preheader, %for.body.i
+ %accumulator.09.i = phi double [ %add.i, %for.body.i ], [ 0.000000e+00, %entry ]
+ %i.08.i = phi i64 [ %inc.i, %for.body.i ], [ 0, %entry ]
+ %arrayidx.i = getelementptr inbounds [2048 x float]* %x, i64 0, i64 %i.08.i
+ %v14 = load float* %arrayidx.i, align 4
+ %conv.i = fpext float %v14 to double
+ %arrayidx1.i = getelementptr inbounds [2048 x float]* %y, i64 0, i64 %i.08.i
+ %v15 = load float* %arrayidx1.i, align 4
+ %conv2.i = fpext float %v15 to double
+ %mul.i = fmul double %conv.i, %conv2.i
+ %add.i = fadd double %accumulator.09.i, %mul.i
+ %inc.i = add nuw nsw i64 %i.08.i, 1
+ %exitcond.i = icmp eq i64 %i.08.i, 2047
+ br i1 %exitcond.i, label %loop.exit, label %for.body.i
+
+loop.exit: ; preds = %for.body.i
+ ret double %accumulator.09.i
+}
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.start(i64, i8* nocapture) #2
+
+declare void @bar(float*, float*)
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.end(i64, i8* nocapture) #2
+
+attributes #0 = { nounwind readonly }
+attributes #1 = { nounwind }
+attributes #2 = { nounwind }
+
+
diff --git a/test/CodeGen/PowerPC/arr-fp-arg-no-copy.ll b/test/CodeGen/PowerPC/arr-fp-arg-no-copy.ll
new file mode 100644
index 0000000..fd430a6
--- /dev/null
+++ b/test/CodeGen/PowerPC/arr-fp-arg-no-copy.ll
@@ -0,0 +1,23 @@
+; RUN: llc -mcpu=ppc64 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @bar() #0 {
+entry:
+ tail call void @xxx([2 x i64] [i64 4607182418800017408, i64 4611686018427387904]) #0
+ ret void
+
+; CHECK-LABEL: @bar
+; CHECK-DAG: li [[REG1:[0-9]+]], 1023
+; CHECK-DAG: li [[REG2:[0-9]+]], {{1$}}
+; CHECK-DAG: sldi 3, [[REG1]], 52
+; CHECK-DAG: sldi 4, [[REG2]], 62
+; CHECK: bl xxx
+; CHECK: blr
+}
+
+declare void @xxx([2 x i64])
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/asm-Zy.ll b/test/CodeGen/PowerPC/asm-Zy.ll
index 691165f..6d1ab0e 100644
--- a/test/CodeGen/PowerPC/asm-Zy.ll
+++ b/test/CodeGen/PowerPC/asm-Zy.ll
@@ -10,5 +10,5 @@ entry:
; CHECK: lwbrx 3, 0,
}
-!0 = metadata !{i32 101688}
+!0 = !{i32 101688}
diff --git a/test/CodeGen/PowerPC/asm-constraints.ll b/test/CodeGen/PowerPC/asm-constraints.ll
index 998b618..9bf8b75 100644
--- a/test/CodeGen/PowerPC/asm-constraints.ll
+++ b/test/CodeGen/PowerPC/asm-constraints.ll
@@ -30,15 +30,16 @@ entry:
}
; CHECK-LABEL: @foo
-; CHECK: ld [[REG:[0-9]+]],0(4)
-; CHECK-NEXT: cmpw [[REG]],[[REG]]
-; CHECK-NEXT: bne- 1f
-; CHECK-NEXT: 1: isync
+; CHECK: ld [[REG:[0-9]+]], 0(4)
+; CHECK: cmpw 0, [[REG]], [[REG]]
+; CHECK: bne- 0, .Ltmp[[TMP:[0-9]+]]
+; CHECK: .Ltmp[[TMP]]:
+; CHECK: isync
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"clang version 3.6.0 (trunk 217557)"}
-!1 = metadata !{i32 67, i32 91, i32 110, i32 126}
+!0 = !{!"clang version 3.6.0 (trunk 217557)"}
+!1 = !{i32 67, i32 91, i32 110, i32 126}
diff --git a/test/CodeGen/PowerPC/bperm.ll b/test/CodeGen/PowerPC/bperm.ll
new file mode 100644
index 0000000..c489c1f
--- /dev/null
+++ b/test/CodeGen/PowerPC/bperm.ll
@@ -0,0 +1,279 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone
+define zeroext i32 @bs4(i32 zeroext %a) #0 {
+entry:
+ %0 = tail call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %0
+
+; CHECK-LABEL: @bs4
+; CHECK: rlwinm [[REG1:[0-9]+]], 3, 8, 0, 31
+; CHECK: rlwimi [[REG1]], 3, 24, 16, 23
+; CHECK: rlwimi [[REG1]], 3, 24, 0, 7
+; CHECK: mr 3, [[REG1]]
+; CHECK: blr
+}
+
+define i64 @bs8(i64 %x) #0 {
+entry:
+ %0 = tail call i64 @llvm.bswap.i64(i64 %x)
+ ret i64 %0
+
+; CHECK-LABEL: @bs8
+; CHECK-DAG: rldicl [[REG1:[0-9]+]], 3, 16, 0
+; CHECK-DAG: rldicl [[REG2:[0-9]+]], 3, 8, 0
+; CHECK-DAG: rldicl [[REG3:[0-9]+]], 3, 24, 0
+; CHECK-DAG: rldimi [[REG2]], [[REG1]], 8, 48
+; CHECK-DAG: rldicl [[REG4:[0-9]+]], 3, 32, 0
+; CHECK-DAG: rldimi [[REG2]], [[REG3]], 16, 40
+; CHECK-DAG: rldicl [[REG5:[0-9]+]], 3, 48, 0
+; CHECK-DAG: rldimi [[REG2]], [[REG4]], 24, 32
+; CHECK-DAG: rldicl [[REG6:[0-9]+]], 3, 56, 0
+; CHECK-DAG: rldimi [[REG2]], [[REG5]], 40, 16
+; CHECK-DAG: rldimi [[REG2]], [[REG6]], 48, 8
+; CHECK-DAG: rldimi [[REG2]], 3, 56, 0
+; CHECK: mr 3, [[REG2]]
+; CHECK: blr
+}
+
+define i64 @test1(i64 %i0, i64 %i1) #0 {
+entry:
+ %0 = lshr i64 %i1, 8
+ %and = and i64 %0, 5963776000
+ ret i64 %and
+
+; CHECK-LABEL: @test1
+; CHECK-DAG: li [[REG1:[0-9]+]], 11375
+; CHECK-DAG: rldicl [[REG3:[0-9]+]], 4, 56, 0
+; CHECK-DAG: sldi [[REG2:[0-9]+]], [[REG1]], 19
+; CHECK: and 3, [[REG3]], [[REG2]]
+; CHECK: blr
+}
+
+define i64 @test2(i64 %i0, i64 %i1) #0 {
+entry:
+ %0 = lshr i64 %i1, 6
+ %and = and i64 %0, 133434808670355456
+ ret i64 %and
+
+; CHECK-LABEL: @test2
+; CHECK-DAG: lis [[REG1:[0-9]+]], 474
+; CHECK-DAG: rldicl [[REG5:[0-9]+]], 4, 58, 0
+; CHECK-DAG: ori [[REG2:[0-9]+]], [[REG1]], 3648
+; CHECK-DAG: sldi [[REG3:[0-9]+]], [[REG2]], 32
+; CHECK-DAG: oris [[REG4:[0-9]+]], [[REG3]], 25464
+; CHECK: and 3, [[REG5]], [[REG4]]
+; CHECK: blr
+}
+
+define i64 @test3(i64 %i0, i64 %i1) #0 {
+entry:
+ %0 = shl i64 %i0, 34
+ %and = and i64 %0, 191795733152661504
+ ret i64 %and
+
+; CHECK-LABEL: @test3
+; CHECK-DAG: lis [[REG1:[0-9]+]], 170
+; CHECK-DAG: rldicl [[REG4:[0-9]+]], 3, 34, 0
+; CHECK-DAG: ori [[REG2:[0-9]+]], [[REG1]], 22861
+; CHECK-DAG: sldi [[REG3:[0-9]+]], [[REG2]], 34
+; CHECK: and 3, [[REG4]], [[REG3]]
+; CHECK: blr
+}
+
+define i64 @test4(i64 %i0, i64 %i1) #0 {
+entry:
+ %0 = lshr i64 %i1, 15
+ %and = and i64 %0, 58195968
+ ret i64 %and
+
+; CHECK-LABEL: @test4
+; CHECK: rldicl [[REG1:[0-9]+]], 4, 49, 0
+; CHECK: andis. 3, [[REG1]], 888
+; CHECK: blr
+}
+
+define i64 @test5(i64 %i0, i64 %i1) #0 {
+entry:
+ %0 = shl i64 %i1, 12
+ %and = and i64 %0, 127252959854592
+ ret i64 %and
+
+; CHECK-LABEL: @test5
+; CHECK-DAG: lis [[REG1:[0-9]+]], 3703
+; CHECK-DAG: rldicl [[REG4:[0-9]+]], 4, 12, 0
+; CHECK-DAG: ori [[REG2:[0-9]+]], [[REG1]], 35951
+; CHECK-DAG: sldi [[REG3:[0-9]+]], [[REG2]], 19
+; CHECK: and 3, [[REG4]], [[REG3]]
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define zeroext i32 @test6(i32 zeroext %x) #0 {
+entry:
+ %and = lshr i32 %x, 16
+ %shr = and i32 %and, 255
+ %and1 = shl i32 %x, 16
+ %shl = and i32 %and1, 16711680
+ %or = or i32 %shr, %shl
+ ret i32 %or
+
+; CHECK-LABEL: @test6
+; CHECK: rlwinm [[REG1:[0-9]+]], 3, 16, 24, 31
+; CHECK: rlwimi [[REG1]], 3, 16, 8, 15
+; CHECK: mr 3, [[REG1]]
+; CHECK: blr
+}
+
+define i64 @test7(i64 %i0, i64 %i1) #0 {
+entry:
+ %0 = lshr i64 %i0, 5
+ %and = and i64 %0, 58195968
+ ret i64 %and
+
+; CHECK-LABEL: @test7
+; CHECK: rlwinm [[REG1:[0-9]+]], 3, 27, 9, 12
+; CHECK: rlwimi [[REG1]], 3, 27, 6, 7
+; CHECK: mr 3, [[REG1]]
+; CHECK: blr
+}
+
+define i64 @test8(i64 %i0, i64 %i1) #0 {
+entry:
+ %0 = lshr i64 %i0, 1
+ %and = and i64 %0, 169172533248
+ ret i64 %and
+
+; CHECK-LABEL: @test8
+; CHECK-DAG: lis [[REG1:[0-9]+]], 4
+; CHECK-DAG: rldicl [[REG4:[0-9]+]], 3, 63, 0
+; CHECK-DAG: ori [[REG2:[0-9]+]], [[REG1]], 60527
+; CHECK-DAG: sldi [[REG3:[0-9]+]], [[REG2]], 19
+; CHECK: and 3, [[REG4]], [[REG3]]
+; CHECK: blr
+}
+
+define i64 @test9(i64 %i0, i64 %i1) #0 {
+entry:
+ %0 = lshr i64 %i1, 14
+ %and = and i64 %0, 18848677888
+ %1 = shl i64 %i1, 51
+ %and3 = and i64 %1, 405323966463344640
+ %or4 = or i64 %and, %and3
+ ret i64 %or4
+
+; CHECK-LABEL: @test9
+; CHECK-DAG: lis [[REG1:[0-9]+]], 1440
+; CHECK-DAG: rldicl [[REG5:[0-9]+]], 4, 62, 0
+; CHECK-DAG: rldicl [[REG6:[0-9]+]], 4, 50, 0
+; CHECK-DAG: ori [[REG2:[0-9]+]], [[REG1]], 4
+; CHECK-DAG: rldimi [[REG6]], [[REG5]], 53, 0
+; CHECK-DAG: sldi [[REG3:[0-9]+]], [[REG2]], 32
+; CHECK-DAG: oris [[REG4:[0-9]+]], [[REG3]], 25464
+; CHECK: and 3, [[REG6]], [[REG4]]
+; CHECK: blr
+}
+
+define i64 @test10(i64 %i0, i64 %i1) #0 {
+entry:
+ %0 = shl i64 %i0, 37
+ %and = and i64 %0, 15881483390550016
+ %1 = shl i64 %i0, 25
+ %and3 = and i64 %1, 2473599172608
+ %or4 = or i64 %and, %and3
+ ret i64 %or4
+
+; CHECK-LABEL: @test10
+; CHECK-DAG: lis [[REG1:[0-9]+]], 1
+; CHECK-DAG: rldicl [[REG6:[0-9]+]], 3, 25, 0
+; CHECK-DAG: rldicl [[REG7:[0-9]+]], 3, 37, 0
+; CHECK-DAG: ori [[REG2:[0-9]+]], [[REG1]], 8183
+; CHECK-DAG: ori [[REG3:[0-9]+]], [[REG1]], 50017
+; CHECK-DAG: sldi [[REG4:[0-9]+]], [[REG2]], 25
+; CHECK-DAG: sldi [[REG5:[0-9]+]], [[REG3]], 37
+; CHECK-DAG: and [[REG8:[0-9]+]], [[REG6]], [[REG4]]
+; CHECK-DAG: and [[REG9:[0-9]+]], [[REG7]], [[REG5]]
+; CHECK: or 3, [[REG9]], [[REG8]]
+; CHECK: blr
+}
+
+define i64 @test11(i64 %x) #0 {
+entry:
+ %and = and i64 %x, 4294967295
+ %shl = shl i64 %x, 32
+ %or = or i64 %and, %shl
+ ret i64 %or
+
+; CHECK-LABEL: @test11
+; CHECK: rlwinm 3, 3, 0, 1, 0
+; CHECK: blr
+}
+
+define i64 @test12(i64 %x) #0 {
+entry:
+ %and = and i64 %x, 4294905855
+ %shl = shl i64 %x, 32
+ %or = or i64 %and, %shl
+ ret i64 %or
+
+; CHECK-LABEL: @test12
+; CHECK: rlwinm 3, 3, 0, 20, 15
+; CHECK: blr
+}
+
+define i64 @test13(i64 %x) #0 {
+entry:
+ %shl = shl i64 %x, 4
+ %and = and i64 %shl, 240
+ %shr = lshr i64 %x, 28
+ %and1 = and i64 %shr, 15
+ %or = or i64 %and, %and1
+ ret i64 %or
+
+; CHECK-LABEL: @test13
+; CHECK: rlwinm 3, 3, 4, 24, 31
+; CHECK: blr
+}
+
+define i64 @test14(i64 %x) #0 {
+entry:
+ %shl = shl i64 %x, 4
+ %and = and i64 %shl, 240
+ %shr = lshr i64 %x, 28
+ %and1 = and i64 %shr, 15
+ %and2 = and i64 %x, -4294967296
+ %or = or i64 %and1, %and2
+ %or3 = or i64 %or, %and
+ ret i64 %or3
+
+; CHECK-LABEL: @test14
+; CHECK: rldicr [[REG1:[0-9]+]], 3, 0, 31
+; CHECK: rlwimi [[REG1]], 3, 4, 24, 31
+; CHECK: mr 3, [[REG1]]
+; CHECK: blr
+}
+
+define i64 @test15(i64 %x) #0 {
+entry:
+ %shl = shl i64 %x, 4
+ %and = and i64 %shl, 240
+ %shr = lshr i64 %x, 28
+ %and1 = and i64 %shr, 15
+ %and2 = and i64 %x, -256
+ %or = or i64 %and1, %and2
+ %or3 = or i64 %or, %and
+ ret i64 %or3
+
+; CHECK-LABEL: @test15
+; CHECK: rlwimi 3, 3, 4, 24, 31
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.bswap.i32(i32) #0
+declare i64 @llvm.bswap.i64(i64) #0
+
+attributes #0 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/cc.ll b/test/CodeGen/PowerPC/cc.ll
index f92121b..c23ee7c 100644
--- a/test/CodeGen/PowerPC/cc.ll
+++ b/test/CodeGen/PowerPC/cc.ll
@@ -41,7 +41,7 @@ entry:
br label %foo
foo:
- call { i64, i64 } asm sideeffect "sc", "={r0},={r3},{r0},~{cc}" (i64 %a)
+ call { i64, i64 } asm sideeffect "sc", "={r0},={r3},{r0},~{cc},~{cr1},~{cr2},~{cr3},~{cr4},~{cr5},~{cr6},~{cr7}" (i64 %a)
br i1 %c, label %bar, label %end
bar:
diff --git a/test/CodeGen/PowerPC/cmpb-ppc32.ll b/test/CodeGen/PowerPC/cmpb-ppc32.ll
new file mode 100644
index 0000000..639ed88
--- /dev/null
+++ b/test/CodeGen/PowerPC/cmpb-ppc32.ll
@@ -0,0 +1,50 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-p:32:32-i64:64-n32"
+target triple = "powerpc-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone
+define zeroext i16 @test16(i16 zeroext %x, i16 zeroext %y) #0 {
+entry:
+ %0 = xor i16 %y, %x
+ %1 = and i16 %0, 255
+ %cmp = icmp eq i16 %1, 0
+ %cmp20 = icmp ult i16 %0, 256
+ %conv25 = select i1 %cmp, i32 255, i32 0
+ %conv27 = select i1 %cmp20, i32 65280, i32 0
+ %or = or i32 %conv25, %conv27
+ %conv29 = trunc i32 %or to i16
+ ret i16 %conv29
+
+; CHECK-LABEL: @test16
+; CHECK: cmpb [[REG1:[0-9]+]], 4, 3
+; CHECK: rlwinm 3, [[REG1]], 0, 16, 31
+; CHECK: blr
+}
+
+define i32 @test32(i32 %x, i32 %y) #0 {
+entry:
+ %0 = xor i32 %y, %x
+ %1 = and i32 %0, 255
+ %cmp = icmp eq i32 %1, 0
+ %2 = and i32 %0, 65280
+ %cmp28 = icmp eq i32 %2, 0
+ %3 = and i32 %0, 16711680
+ %cmp34 = icmp eq i32 %3, 0
+ %cmp40 = icmp ult i32 %0, 16777216
+ %conv44 = select i1 %cmp, i32 255, i32 0
+ %conv45 = select i1 %cmp28, i32 65280, i32 0
+ %conv47 = select i1 %cmp34, i32 16711680, i32 0
+ %conv50 = select i1 %cmp40, i32 -16777216, i32 0
+ %or = or i32 %conv45, %conv50
+ %or49 = or i32 %or, %conv44
+ %or52 = or i32 %or49, %conv47
+ ret i32 %or52
+
+; CHECK-LABEL: @test32
+; CHECK: cmpb 3, 4, 3
+; CHECK-NOT: rlwinm
+; CHECK: blr
+}
+
+attributes #0 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/cmpb.ll b/test/CodeGen/PowerPC/cmpb.ll
new file mode 100644
index 0000000..7d0c0ab
--- /dev/null
+++ b/test/CodeGen/PowerPC/cmpb.ll
@@ -0,0 +1,204 @@
+; RUN: llc -mcpu pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone
+define zeroext i16 @test16(i16 zeroext %x, i16 zeroext %y) #0 {
+entry:
+ %0 = xor i16 %y, %x
+ %1 = and i16 %0, 255
+ %cmp = icmp eq i16 %1, 0
+ %cmp20 = icmp ult i16 %0, 256
+ %conv25 = select i1 %cmp, i32 255, i32 0
+ %conv27 = select i1 %cmp20, i32 65280, i32 0
+ %or = or i32 %conv25, %conv27
+ %conv29 = trunc i32 %or to i16
+ ret i16 %conv29
+
+; CHECK-LABEL: @test16
+; CHECK: cmpb [[REG1:[0-9]+]], 4, 3
+; CHECK: rldicl 3, [[REG1]], 0, 48
+; CHECK: blr
+}
+
+define zeroext i16 @test16p1(i16 zeroext %x, i16 zeroext %y) #0 {
+entry:
+ %0 = xor i16 %y, %x
+ %1 = and i16 %0, 255
+ %cmp = icmp eq i16 %1, 0
+ %cmp20 = icmp ult i16 %0, 256
+ %conv28 = select i1 %cmp, i32 5, i32 0
+ %conv30 = select i1 %cmp20, i32 65280, i32 0
+ %or = or i32 %conv28, %conv30
+ %conv32 = trunc i32 %or to i16
+ ret i16 %conv32
+
+; CHECK-LABEL: @test16p1
+; CHECK: cmpb [[REG1:[0-9]+]], 4, 3
+; CHECK: andi. 3, [[REG1]], 65285
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define zeroext i16 @test16p2(i16 zeroext %x, i16 zeroext %y) #0 {
+entry:
+ %0 = xor i16 %y, %x
+ %1 = and i16 %0, 255
+ %cmp = icmp eq i16 %1, 0
+ %cmp20 = icmp ult i16 %0, 256
+ %conv28 = select i1 %cmp, i32 255, i32 0
+ %conv30 = select i1 %cmp20, i32 1280, i32 0
+ %or = or i32 %conv28, %conv30
+ %conv32 = trunc i32 %or to i16
+ ret i16 %conv32
+
+; CHECK-LABEL: @test16p2
+; CHECK: cmpb [[REG1:[0-9]+]], 4, 3
+; CHECK: andi. 3, [[REG1]], 1535
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define zeroext i16 @test16p3(i16 zeroext %x, i16 zeroext %y) #0 {
+entry:
+ %0 = xor i16 %y, %x
+ %1 = and i16 %0, 255
+ %cmp = icmp eq i16 %1, 0
+ %cmp20 = icmp ult i16 %0, 256
+ %conv27 = select i1 %cmp, i32 255, i32 0
+ %conv29 = select i1 %cmp20, i32 1024, i32 1280
+ %or = or i32 %conv27, %conv29
+ %conv31 = trunc i32 %or to i16
+ ret i16 %conv31
+
+; CHECK-LABEL: @test16p3
+; CHECK: cmpb [[REG1:[0-9]+]], 4, 3
+; CHECK: rldicl [[REG2:[0-9]+]], [[REG1]], 0, 55
+; CHECK: xori 3, [[REG2]], 1280
+; CHECK: blr
+}
+
+define zeroext i32 @test32(i32 zeroext %x, i32 zeroext %y) #0 {
+entry:
+ %0 = xor i32 %y, %x
+ %1 = and i32 %0, 255
+ %cmp = icmp eq i32 %1, 0
+ %2 = and i32 %0, 65280
+ %cmp28 = icmp eq i32 %2, 0
+ %3 = and i32 %0, 16711680
+ %cmp34 = icmp eq i32 %3, 0
+ %cmp40 = icmp ult i32 %0, 16777216
+ %conv44 = select i1 %cmp, i32 255, i32 0
+ %conv45 = select i1 %cmp28, i32 65280, i32 0
+ %conv47 = select i1 %cmp34, i32 16711680, i32 0
+ %conv50 = select i1 %cmp40, i32 -16777216, i32 0
+ %or = or i32 %conv45, %conv50
+ %or49 = or i32 %or, %conv44
+ %or52 = or i32 %or49, %conv47
+ ret i32 %or52
+
+; CHECK-LABEL: @test32
+; CHECK: cmpb [[REG1:[0-9]+]], 4, 3
+; CHECK: rldicl 3, [[REG1]], 0, 32
+; CHECK: blr
+}
+
+define zeroext i32 @test32p1(i32 zeroext %x, i32 zeroext %y) #0 {
+entry:
+ %0 = xor i32 %y, %x
+ %1 = and i32 %0, 255
+ %cmp = icmp eq i32 %1, 0
+ %2 = and i32 %0, 65280
+ %cmp28 = icmp eq i32 %2, 0
+ %3 = and i32 %0, 16711680
+ %cmp34 = icmp eq i32 %3, 0
+ %cmp40 = icmp ult i32 %0, 16777216
+ %conv47 = select i1 %cmp, i32 255, i32 0
+ %conv48 = select i1 %cmp28, i32 65280, i32 0
+ %conv50 = select i1 %cmp34, i32 458752, i32 0
+ %conv53 = select i1 %cmp40, i32 -16777216, i32 0
+ %or = or i32 %conv48, %conv53
+ %or52 = or i32 %or, %conv47
+ %or55 = or i32 %or52, %conv50
+ ret i32 %or55
+
+; CHECK-LABEL: @test32p1
+; CHECK: li [[REG1:[0-9]+]], 0
+; CHECK: cmpb [[REG4:[0-9]+]], 4, 3
+; CHECK: oris [[REG2:[0-9]+]], [[REG1]], 65287
+; CHECK: ori [[REG3:[0-9]+]], [[REG2]], 65535
+; CHECK: and 3, [[REG4]], [[REG3]]
+; CHECK: blr
+}
+
+define zeroext i32 @test32p2(i32 zeroext %x, i32 zeroext %y) #0 {
+entry:
+ %0 = xor i32 %y, %x
+ %1 = and i32 %0, 255
+ %cmp = icmp eq i32 %1, 0
+ %2 = and i32 %0, 65280
+ %cmp22 = icmp eq i32 %2, 0
+ %cmp28 = icmp ult i32 %0, 16777216
+ %conv32 = select i1 %cmp, i32 255, i32 0
+ %conv33 = select i1 %cmp22, i32 65280, i32 0
+ %conv35 = select i1 %cmp28, i32 -16777216, i32 0
+ %or = or i32 %conv33, %conv35
+ %or37 = or i32 %or, %conv32
+ ret i32 %or37
+
+; CHECK-LABEL: @test32p2
+; CHECK: li [[REG1:[0-9]+]], 0
+; CHECK: cmpb [[REG4:[0-9]+]], 4, 3
+; CHECK: oris [[REG2:[0-9]+]], [[REG1]], 65280
+; CHECK: ori [[REG3:[0-9]+]], [[REG2]], 65535
+; CHECK: and 3, [[REG4]], [[REG3]]
+; CHECK: blr
+}
+
+define i64 @test64(i64 %x, i64 %y) #0 {
+entry:
+ %shr19 = lshr i64 %x, 56
+ %conv21 = trunc i64 %shr19 to i32
+ %shr43 = lshr i64 %y, 56
+ %conv45 = trunc i64 %shr43 to i32
+ %0 = xor i64 %y, %x
+ %1 = and i64 %0, 255
+ %cmp = icmp eq i64 %1, 0
+ %2 = and i64 %0, 65280
+ %cmp52 = icmp eq i64 %2, 0
+ %3 = and i64 %0, 16711680
+ %cmp58 = icmp eq i64 %3, 0
+ %4 = and i64 %0, 4278190080
+ %cmp64 = icmp eq i64 %4, 0
+ %5 = and i64 %0, 1095216660480
+ %cmp70 = icmp eq i64 %5, 0
+ %6 = and i64 %0, 280375465082880
+ %cmp76 = icmp eq i64 %6, 0
+ %7 = and i64 %0, 71776119061217280
+ %cmp82 = icmp eq i64 %7, 0
+ %cmp88 = icmp eq i32 %conv21, %conv45
+ %conv92 = select i1 %cmp, i64 255, i64 0
+ %conv93 = select i1 %cmp52, i64 65280, i64 0
+ %or = or i64 %conv92, %conv93
+ %conv95 = select i1 %cmp58, i64 16711680, i64 0
+ %or97 = or i64 %or, %conv95
+ %conv98 = select i1 %cmp64, i64 4278190080, i64 0
+ %or100 = or i64 %or97, %conv98
+ %conv101 = select i1 %cmp70, i64 1095216660480, i64 0
+ %or103 = or i64 %or100, %conv101
+ %conv104 = select i1 %cmp76, i64 280375465082880, i64 0
+ %or106 = or i64 %or103, %conv104
+ %conv107 = select i1 %cmp82, i64 71776119061217280, i64 0
+ %or109 = or i64 %or106, %conv107
+ %conv110 = select i1 %cmp88, i64 -72057594037927936, i64 0
+ %or112 = or i64 %or109, %conv110
+ ret i64 %or112
+
+; CHECK-LABEL: @test64
+; CHECK: cmpb 3, 3, 4
+; CHECK-NOT: rldicl
+; CHECK: blr
+}
+
+attributes #0 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/code-align.ll b/test/CodeGen/PowerPC/code-align.ll
new file mode 100644
index 0000000..c6ec37f
--- /dev/null
+++ b/test/CodeGen/PowerPC/code-align.ll
@@ -0,0 +1,110 @@
+; RUN: llc -mcpu=ppc64 < %s | FileCheck %s -check-prefix=GENERIC
+; RUN: llc -mcpu=970 < %s | FileCheck %s -check-prefix=PWR
+; RUN: llc -mcpu=a2 < %s | FileCheck %s -check-prefix=BASIC
+; RUN: llc -mcpu=e500mc < %s | FileCheck %s -check-prefix=BASIC
+; RUN: llc -mcpu=e5500 < %s | FileCheck %s -check-prefix=BASIC
+; RUN: llc -mcpu=pwr4 < %s | FileCheck %s -check-prefix=PWR
+; RUN: llc -mcpu=pwr5 < %s | FileCheck %s -check-prefix=PWR
+; RUN: llc -mcpu=pwr5x < %s | FileCheck %s -check-prefix=PWR
+; RUN: llc -mcpu=pwr6 < %s | FileCheck %s -check-prefix=PWR
+; RUN: llc -mcpu=pwr6x < %s | FileCheck %s -check-prefix=PWR
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s -check-prefix=PWR
+; RUN: llc -mcpu=pwr8 < %s | FileCheck %s -check-prefix=PWR
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone
+define signext i32 @foo(i32 signext %x) #0 {
+entry:
+ %mul = shl nsw i32 %x, 1
+ ret i32 %mul
+
+; GENERIC-LABEL: .globl foo
+; BASIC-LABEL: .globl foo
+; PWR-LABEL: .globl foo
+; GENERIC: .align 2
+; BASIC: .align 4
+; PWR: .align 4
+; GENERIC: @foo
+; BASIC: @foo
+; PWR: @foo
+}
+
+; Function Attrs: nounwind
+define void @loop(i32 signext %x, i32* nocapture %a) #1 {
+entry:
+ br label %vector.body
+
+; GENERIC-LABEL: @loop
+; BASIC-LABEL: @loop
+; PWR-LABEL: @loop
+; GENERIC: mtctr
+; BASIC: mtctr
+; PWR: mtctr
+; GENERIC-NOT: .align
+; BASIC: .align 4
+; PWR: .align 4
+; GENERIC: lwzu
+; BASIC: lwzu
+; PWR: lwzu
+; GENERIC: bdnz
+; BASIC: bdnz
+; PWR: bdnz
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %induction45 = or i64 %index, 1
+ %0 = getelementptr inbounds i32* %a, i64 %index
+ %1 = getelementptr inbounds i32* %a, i64 %induction45
+ %2 = load i32* %0, align 4
+ %3 = load i32* %1, align 4
+ %4 = add nsw i32 %2, 4
+ %5 = add nsw i32 %3, 4
+ %6 = mul nsw i32 %4, 3
+ %7 = mul nsw i32 %5, 3
+ store i32 %6, i32* %0, align 4
+ store i32 %7, i32* %1, align 4
+ %index.next = add i64 %index, 2
+ %8 = icmp eq i64 %index.next, 2048
+ br i1 %8, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @sloop(i32 signext %x, i32* nocapture %a) #1 {
+entry:
+ br label %for.body
+
+; GENERIC-LABEL: @sloop
+; BASIC-LABEL: @sloop
+; PWR-LABEL: @sloop
+; GENERIC: mtctr
+; BASIC: mtctr
+; PWR: mtctr
+; GENERIC-NOT: .align
+; BASIC: .align 4
+; PWR: .align 5
+; GENERIC: bdnz
+; BASIC: bdnz
+; PWR: bdnz
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %0, 4
+ %mul = mul nsw i32 %add, 3
+ store i32 %mul, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 2048
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/constants-i64.ll b/test/CodeGen/PowerPC/constants-i64.ll
new file mode 100644
index 0000000..5f2815e
--- /dev/null
+++ b/test/CodeGen/PowerPC/constants-i64.ll
@@ -0,0 +1,84 @@
+; RUN: llc -mcpu=ppc64 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone
+define i64 @cn1() #0 {
+entry:
+ ret i64 281474976710655
+
+; CHECK-LABEL: @cn1
+; CHECK: lis [[REG1:[0-9]+]], -1
+; CHECK: rldicr 3, [[REG1]], 48, 63
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define i64 @cnb() #0 {
+entry:
+ ret i64 281474976710575
+
+; CHECK-LABEL: @cnb
+; CHECK: lis [[REG1:[0-9]+]], -81
+; CHECK: rldicr 3, [[REG1]], 48, 63
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define i64 @f2(i64 %x) #0 {
+entry:
+ ret i64 -68719476736
+
+; CHECK-LABEL: @f2
+; CHECK: li [[REG1:[0-9]+]], -1
+; CHECK: sldi 3, [[REG1]], 36
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define i64 @f2a(i64 %x) #0 {
+entry:
+ ret i64 -361850994688
+
+; CHECK-LABEL: @f2a
+; CHECK: li [[REG1:[0-9]+]], -337
+; CHECK: sldi 3, [[REG1]], 30
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define i64 @f2n(i64 %x) #0 {
+entry:
+ ret i64 68719476735
+
+; CHECK-LABEL: @f2n
+; CHECK: lis [[REG1:[0-9]+]], -4096
+; CHECK: rldicr 3, [[REG1]], 36, 63
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define i64 @f3(i64 %x) #0 {
+entry:
+ ret i64 8589934591
+
+; CHECK-LABEL: @f3
+; CHECK: lis [[REG1:[0-9]+]], -32768
+; CHECK: rldicr 3, [[REG1]], 33, 63
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define i64 @cn2n() #0 {
+entry:
+ ret i64 -1407374887747585
+
+; CHECK-LABEL: @cn2n
+; CHECK: lis [[REG1:[0-9]+]], -5121
+; CHECK: ori [[REG2:[0-9]+]], [[REG1]], 65534
+; CHECK: rldicr 3, [[REG2]], 22, 63
+; CHECK: blr
+}
+
+attributes #0 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/crsave.ll b/test/CodeGen/PowerPC/crsave.ll
index a9b4b36..602ba94 100644
--- a/test/CodeGen/PowerPC/crsave.ll
+++ b/test/CodeGen/PowerPC/crsave.ll
@@ -6,7 +6,7 @@ declare void @foo()
define i32 @test_cr2() nounwind uwtable {
entry:
%ret = alloca i32, align 4
- %0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmp 2,$2,$1\0A\09mfcr $0", "=r,r,r,r,r,~{cr2}"(i32 1, i32 2, i32 3, i32 0) nounwind
+ %0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmpw 2,$2,$1\0A\09mfcr $0", "=r,r,r,r,r,~{cr2}"(i32 1, i32 2, i32 3, i32 0) nounwind
store i32 %0, i32* %ret, align 4
call void @foo()
%1 = load i32* %ret, align 4
@@ -35,7 +35,7 @@ entry:
define i32 @test_cr234() nounwind {
entry:
%ret = alloca i32, align 4
- %0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmp 2,$2,$1\0A\09cmp 3,$2,$2\0A\09cmp 4,$2,$3\0A\09mfcr $0", "=r,r,r,r,r,~{cr2},~{cr3},~{cr4}"(i32 1, i32 2, i32 3, i32 0) nounwind
+ %0 = call i32 asm sideeffect "\0A\09mtcr $4\0A\09cmpw 2,$2,$1\0A\09cmpw 3,$2,$2\0A\09cmpw 4,$2,$3\0A\09mfcr $0", "=r,r,r,r,r,~{cr2},~{cr3},~{cr4}"(i32 1, i32 2, i32 3, i32 0) nounwind
store i32 %0, i32* %ret, align 4
call void @foo()
%1 = load i32* %ret, align 4
diff --git a/test/CodeGen/PowerPC/ctrloops.ll b/test/CodeGen/PowerPC/ctrloops.ll
index ca00f68..ccab7cb 100644
--- a/test/CodeGen/PowerPC/ctrloops.ll
+++ b/test/CodeGen/PowerPC/ctrloops.ll
@@ -1,6 +1,6 @@
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
target triple = "powerpc64-unknown-freebsd10.0"
-; RUN: llc < %s -march=ppc64 | FileCheck %s
+; RUN: llc < %s -march=ppc64 -relocation-model=pic | FileCheck %s
@a = common global i32 0, align 4
@@ -73,3 +73,26 @@ for.end: ; preds = %for.body, %entry
; CHECK-NOT: cmplwi
; CHECK: bdnz
}
+
+@tls_var = external thread_local global i8
+
+define i32 @test4() {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %phi = phi i32 [ %dec, %for.body ], [ undef, %entry ]
+ %load = ptrtoint i8* @tls_var to i32
+ %dec = add i32 %phi, -1
+ %cmp = icmp sgt i32 %phi, 1
+ br i1 %cmp, label %for.body, label %return
+
+return: ; preds = %for.body
+ ret i32 %load
+; CHECK-LABEL: @test4
+; CHECK-NOT: mtctr
+; CHECK: addi {{[0-9]+}}
+; CHECK: cmpwi
+; CHECK-NOT: bdnz
+; CHECK: bgt
+}
diff --git a/test/CodeGen/PowerPC/dbg.ll b/test/CodeGen/PowerPC/dbg.ll
index 04338a6..bd15367 100644
--- a/test/CodeGen/PowerPC/dbg.ll
+++ b/test/CodeGen/PowerPC/dbg.ll
@@ -6,8 +6,8 @@ target triple = "powerpc64-unknown-linux-gnu"
define i32 @main(i32 %argc, i8** nocapture %argv) nounwind readnone {
entry:
- tail call void @llvm.dbg.value(metadata !{i32 %argc}, i64 0, metadata !15, metadata !{metadata !"0x102"}), !dbg !17
- tail call void @llvm.dbg.value(metadata !{i8** %argv}, i64 0, metadata !16, metadata !{metadata !"0x102"}), !dbg !18
+ tail call void @llvm.dbg.value(metadata i32 %argc, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !17
+ tail call void @llvm.dbg.value(metadata i8** %argv, i64 0, metadata !16, metadata !{!"0x102"}), !dbg !18
%add = add nsw i32 %argc, 1, !dbg !19
ret i32 %add, !dbg !19
}
@@ -17,23 +17,23 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!22}
-!0 = metadata !{metadata !"0x11\0012\00clang version 3.1\001\00\000\00\000", metadata !21, metadata !1, metadata !1, metadata !3, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{}
-!3 = metadata !{metadata !5}
-!5 = metadata !{metadata !"0x2e\00main\00main\00\001\000\001\000\006\00256\001\000", metadata !21, null, metadata !7, null, i32 (i32, i8**)* @main, null, null, metadata !13} ; [ DW_TAG_subprogram ]
-!6 = metadata !{metadata !"0x29", metadata !21} ; [ DW_TAG_file_type ]
-!7 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !8, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!8 = metadata !{metadata !9, metadata !9, metadata !10}
-!9 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ]
-!10 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !11} ; [ DW_TAG_pointer_type ]
-!11 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !12} ; [ DW_TAG_pointer_type ]
-!12 = metadata !{metadata !"0x24\00char\000\008\008\000\000\008", null, null} ; [ DW_TAG_base_type ]
-!13 = metadata !{metadata !15, metadata !16}
-!15 = metadata !{metadata !"0x101\00argc\0016777217\000", metadata !5, metadata !6, metadata !9} ; [ DW_TAG_arg_variable ]
-!16 = metadata !{metadata !"0x101\00argv\0033554433\000", metadata !5, metadata !6, metadata !10} ; [ DW_TAG_arg_variable ]
-!17 = metadata !{i32 1, i32 14, metadata !5, null}
-!18 = metadata !{i32 1, i32 26, metadata !5, null}
-!19 = metadata !{i32 2, i32 3, metadata !20, null}
-!20 = metadata !{metadata !"0xb\001\0034\000", metadata !21, metadata !5} ; [ DW_TAG_lexical_block ]
-!21 = metadata !{metadata !"dbg.c", metadata !"/src"}
-!22 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x11\0012\00clang version 3.1\001\00\000\00\000", !21, !1, !1, !3, !1, !""} ; [ DW_TAG_compile_unit ]
+!1 = !{}
+!3 = !{!5}
+!5 = !{!"0x2e\00main\00main\00\001\000\001\000\006\00256\001\000", !21, null, !7, null, i32 (i32, i8**)* @main, null, null, !13} ; [ DW_TAG_subprogram ]
+!6 = !{!"0x29", !21} ; [ DW_TAG_file_type ]
+!7 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !8, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = !{!9, !9, !10}
+!9 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ]
+!10 = !{!"0xf\00\000\0064\0064\000\000", null, null, !11} ; [ DW_TAG_pointer_type ]
+!11 = !{!"0xf\00\000\0064\0064\000\000", null, null, !12} ; [ DW_TAG_pointer_type ]
+!12 = !{!"0x24\00char\000\008\008\000\000\008", null, null} ; [ DW_TAG_base_type ]
+!13 = !{!15, !16}
+!15 = !{!"0x101\00argc\0016777217\000", !5, !6, !9} ; [ DW_TAG_arg_variable ]
+!16 = !{!"0x101\00argv\0033554433\000", !5, !6, !10} ; [ DW_TAG_arg_variable ]
+!17 = !MDLocation(line: 1, column: 14, scope: !5)
+!18 = !MDLocation(line: 1, column: 26, scope: !5)
+!19 = !MDLocation(line: 2, column: 3, scope: !20)
+!20 = !{!"0xb\001\0034\000", !21, !5} ; [ DW_TAG_lexical_block ]
+!21 = !{!"dbg.c", !"/src"}
+!22 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/PowerPC/early-ret2.ll b/test/CodeGen/PowerPC/early-ret2.ll
index 1784777..f9758d3 100644
--- a/test/CodeGen/PowerPC/early-ret2.ll
+++ b/test/CodeGen/PowerPC/early-ret2.ll
@@ -25,5 +25,5 @@ while.end: ; preds = %while.body, %while.
attributes #0 = { noinline nounwind }
-!0 = metadata !{}
+!0 = !{}
diff --git a/test/CodeGen/PowerPC/fast-isel-const.ll b/test/CodeGen/PowerPC/fast-isel-const.ll
new file mode 100644
index 0000000..1057d0a
--- /dev/null
+++ b/test/CodeGen/PowerPC/fast-isel-const.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s --check-prefix=ELF64
+
+define zeroext i1 @testi1(i8 %in) nounwind uwtable ssp {
+entry:
+ %c = icmp eq i8 %in, 5
+ br i1 %c, label %true, label %false
+
+; ELF64-LABEL: @testi1
+
+true:
+ br label %end
+
+; ELF64-NOT: li {{[0-9]+}}, -1
+; ELF64: li {{[0-9]+}}, 1
+
+false:
+ br label %end
+
+; ELF64: li {{[0-9]+}}, 0
+
+end:
+ %r = phi i1 [ 0, %false], [ 1, %true ]
+ ret i1 %r
+
+; ELF64: blr
+}
+
diff --git a/test/CodeGen/PowerPC/fdiv-combine.ll b/test/CodeGen/PowerPC/fdiv-combine.ll
new file mode 100644
index 0000000..d3dc3fe
--- /dev/null
+++ b/test/CodeGen/PowerPC/fdiv-combine.ll
@@ -0,0 +1,39 @@
+; RUN: llc -mcpu=ppc64 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Following test case checks:
+; a / D; b / D; c / D;
+; =>
+; recip = 1.0 / D; a * recip; b * recip; c * recip;
+
+define void @three_fdiv_double(double %D, double %a, double %b, double %c) #0 {
+; CHECK-LABEL: three_fdiv_double:
+; CHECK: fdiv
+; CHECK-NEXT-NOT: fdiv
+; CHECK: fmul
+; CHECK: fmul
+; CHECK: fmul
+ %div = fdiv double %a, %D
+ %div1 = fdiv double %b, %D
+ %div2 = fdiv double %c, %D
+ tail call void @foo_3d(double %div, double %div1, double %div2)
+ ret void
+}
+
+define void @two_fdiv_double(double %D, double %a, double %b) #0 {
+; CHECK-LABEL: two_fdiv_double:
+; CHECK: fdiv
+; CHECK: fdiv
+; CHECK-NEXT-NOT: fmul
+ %div = fdiv double %a, %D
+ %div1 = fdiv double %b, %D
+ tail call void @foo_2d(double %div, double %div1)
+ ret void
+}
+
+declare void @foo_3d(double, double, double)
+declare void @foo_3_2xd(<2 x double>, <2 x double>, <2 x double>)
+declare void @foo_2d(double, double)
+
+attributes #0 = { "unsafe-fp-math"="true" }
diff --git a/test/CodeGen/PowerPC/flt-preinc.ll b/test/CodeGen/PowerPC/flt-preinc.ll
new file mode 100644
index 0000000..dd17031
--- /dev/null
+++ b/test/CodeGen/PowerPC/flt-preinc.ll
@@ -0,0 +1,40 @@
+; RUN: llc -mcpu=ppc64 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readonly
+define float @tf(float* nocapture readonly %i, i32 signext %o) #0 {
+entry:
+ %idx.ext = sext i32 %o to i64
+ %add.ptr = getelementptr inbounds float* %i, i64 %idx.ext
+ %0 = load float* %add.ptr, align 4
+ %add.ptr.sum = add nsw i64 %idx.ext, 1
+ %add.ptr3 = getelementptr inbounds float* %i, i64 %add.ptr.sum
+ %1 = load float* %add.ptr3, align 4
+ %add = fadd float %0, %1
+ ret float %add
+
+; CHECK-LABEL: @tf
+; CHECK: lfsux
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readonly
+define double @td(double* nocapture readonly %i, i32 signext %o) #0 {
+entry:
+ %idx.ext = sext i32 %o to i64
+ %add.ptr = getelementptr inbounds double* %i, i64 %idx.ext
+ %0 = load double* %add.ptr, align 8
+ %add.ptr.sum = add nsw i64 %idx.ext, 1
+ %add.ptr3 = getelementptr inbounds double* %i, i64 %add.ptr.sum
+ %1 = load double* %add.ptr3, align 8
+ %add = fadd double %0, %1
+ ret double %add
+
+; CHECK-LABEL: @td
+; CHECK: lfdux
+; CHECK: blr
+}
+
+attributes #0 = { nounwind readonly }
+
diff --git a/test/CodeGen/PowerPC/fma-assoc.ll b/test/CodeGen/PowerPC/fma-assoc.ll
new file mode 100644
index 0000000..dc1316e
--- /dev/null
+++ b/test/CodeGen/PowerPC/fma-assoc.ll
@@ -0,0 +1,79 @@
+; RUN: llc < %s -march=ppc32 -fp-contract=fast -mattr=-vsx | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -fp-contract=fast -mattr=+vsx -mcpu=pwr7 | FileCheck -check-prefix=CHECK-VSX %s
+
+define double @test_FMADD_ASSOC1(double %A, double %B, double %C,
+ double %D, double %E) {
+ %F = fmul double %A, %B ; <double> [#uses=1]
+ %G = fmul double %C, %D ; <double> [#uses=1]
+ %H = fadd double %F, %G ; <double> [#uses=1]
+ %I = fadd double %H, %E ; <double> [#uses=1]
+ ret double %I
+; CHECK-LABEL: test_FMADD_ASSOC1:
+; CHECK: fmadd
+; CHECK-NEXT: fmadd
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMADD_ASSOC1:
+; CHECK-VSX: xsmaddmdp
+; CHECK-VSX-NEXT: xsmaddadp
+; CHECK-VSX-NEXT: fmr
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMADD_ASSOC2(double %A, double %B, double %C,
+ double %D, double %E) {
+ %F = fmul double %A, %B ; <double> [#uses=1]
+ %G = fmul double %C, %D ; <double> [#uses=1]
+ %H = fadd double %F, %G ; <double> [#uses=1]
+ %I = fadd double %E, %H ; <double> [#uses=1]
+ ret double %I
+; CHECK-LABEL: test_FMADD_ASSOC2:
+; CHECK: fmadd
+; CHECK-NEXT: fmadd
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMADD_ASSOC2:
+; CHECK-VSX: xsmaddmdp
+; CHECK-VSX-NEXT: xsmaddadp
+; CHECK-VSX-NEXT: fmr
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMSUB_ASSOC1(double %A, double %B, double %C,
+ double %D, double %E) {
+ %F = fmul double %A, %B ; <double> [#uses=1]
+ %G = fmul double %C, %D ; <double> [#uses=1]
+ %H = fadd double %F, %G ; <double> [#uses=1]
+ %I = fsub double %H, %E ; <double> [#uses=1]
+ ret double %I
+; CHECK-LABEL: test_FMSUB_ASSOC1:
+; CHECK: fmsub
+; CHECK-NEXT: fmadd
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMSUB_ASSOC1:
+; CHECK-VSX: xsmsubmdp
+; CHECK-VSX-NEXT: xsmaddadp
+; CHECK-VSX-NEXT: fmr
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMSUB_ASSOC2(double %A, double %B, double %C,
+ double %D, double %E) {
+ %F = fmul double %A, %B ; <double> [#uses=1]
+ %G = fmul double %C, %D ; <double> [#uses=1]
+ %H = fadd double %F, %G ; <double> [#uses=1]
+ %I = fsub double %E, %H ; <double> [#uses=1]
+ ret double %I
+; CHECK-LABEL: test_FMSUB_ASSOC2:
+; CHECK: fnmsub
+; CHECK-NEXT: fnmsub
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMSUB_ASSOC2:
+; CHECK-VSX: xsnmsubmdp
+; CHECK-VSX-NEXT: xsnmsubadp
+; CHECK-VSX-NEXT: fmr
+; CHECK-VSX-NEXT: blr
+}
+
diff --git a/test/CodeGen/PowerPC/fma-ext.ll b/test/CodeGen/PowerPC/fma-ext.ll
new file mode 100644
index 0000000..56825ce
--- /dev/null
+++ b/test/CodeGen/PowerPC/fma-ext.ll
@@ -0,0 +1,93 @@
+; RUN: llc < %s -march=ppc32 -fp-contract=fast -mattr=-vsx | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -fp-contract=fast -mattr=+vsx -mcpu=pwr7 | FileCheck -check-prefix=CHECK-VSX %s
+
+define double @test_FMADD_EXT1(float %A, float %B, double %C) {
+ %D = fmul float %A, %B ; <float> [#uses=1]
+ %E = fpext float %D to double ; <double> [#uses=1]
+ %F = fadd double %E, %C ; <double> [#uses=1]
+ ret double %F
+; CHECK-LABEL: test_FMADD_EXT1:
+; CHECK: fmadd
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMADD_EXT1:
+; CHECK-VSX: xsmaddmdp
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMADD_EXT2(float %A, float %B, double %C) {
+ %D = fmul float %A, %B ; <float> [#uses=1]
+ %E = fpext float %D to double ; <double> [#uses=1]
+ %F = fadd double %C, %E ; <double> [#uses=1]
+ ret double %F
+; CHECK-LABEL: test_FMADD_EXT2:
+; CHECK: fmadd
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMADD_EXT2:
+; CHECK-VSX: xsmaddmdp
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMSUB_EXT1(float %A, float %B, double %C) {
+ %D = fmul float %A, %B ; <float> [#uses=1]
+ %E = fpext float %D to double ; <double> [#uses=1]
+ %F = fsub double %E, %C ; <double> [#uses=1]
+ ret double %F
+; CHECK-LABEL: test_FMSUB_EXT1:
+; CHECK: fmsub
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMSUB_EXT1:
+; CHECK-VSX: xsmsubmdp
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMSUB_EXT2(float %A, float %B, double %C) {
+ %D = fmul float %A, %B ; <float> [#uses=1]
+ %E = fpext float %D to double ; <double> [#uses=1]
+ %F = fsub double %C, %E ; <double> [#uses=1]
+ ret double %F
+; CHECK-LABEL: test_FMSUB_EXT2:
+; CHECK: fnmsub
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMSUB_EXT2:
+; CHECK-VSX: xsnmsubmdp
+; CHECK-VSX-NEXT: fmr
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMSUB_EXT3(float %A, float %B, double %C) {
+ %D = fmul float %A, %B ; <float> [#uses=1]
+ %E = fsub float -0.000000e+00, %D ; <float> [#uses=1]
+ %F = fpext float %E to double ; <double> [#uses=1]
+ %G = fsub double %F, %C ; <double> [#uses=1]
+ ret double %G
+; CHECK-LABEL: test_FMSUB_EXT3:
+; CHECK: fneg
+; CHECK-NEXT: fmsub
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMSUB_EXT3:
+; CHECK-VSX: xsnegdp
+; CHECK-VSX-NEXT: xsmsubmdp
+; CHECK-VSX-NEXT: blr
+}
+
+define double @test_FMSUB_EXT4(float %A, float %B, double %C) {
+ %D = fmul float %A, %B ; <float> [#uses=1]
+ %E = fpext float %D to double ; <double> [#uses=1]
+ %F = fsub double -0.000000e+00, %E ; <double> [#uses=1]
+ %G = fsub double %F, %C ; <double> [#uses=1]
+ ret double %G
+; CHECK-LABEL: test_FMSUB_EXT4:
+; CHECK: fneg
+; CHECK-NEXT: fmsub
+; CHECK-NEXT: blr
+
+; CHECK-VSX-LABEL: test_FMSUB_EXT4:
+; CHECK-VSX: xsnegdp
+; CHECK-VSX-NEXT: xsmsubmdp
+; CHECK-VSX-NEXT: blr
+} \ No newline at end of file
diff --git a/test/CodeGen/PowerPC/fp-to-int-ext.ll b/test/CodeGen/PowerPC/fp-to-int-ext.ll
new file mode 100644
index 0000000..bfacd89
--- /dev/null
+++ b/test/CodeGen/PowerPC/fp-to-int-ext.ll
@@ -0,0 +1,69 @@
+; RUN: llc -mcpu=a2 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define double @foo1(i32* %x) #0 {
+entry:
+ %0 = load i32* %x, align 4
+ %conv = sext i32 %0 to i64
+ %conv1 = sitofp i64 %conv to double
+ ret double %conv1
+
+; CHECK-LABEL: @foo1
+; CHECK: lfiwax [[REG1:[0-9]+]], 0, 3
+; CHECK: fcfid 1, [[REG1]]
+; CHECK: blr
+}
+
+define double @foo2(i32* %x) #0 {
+entry:
+ %0 = load i32* %x, align 4
+ %conv = zext i32 %0 to i64
+ %conv1 = sitofp i64 %conv to double
+ ret double %conv1
+
+; CHECK-LABEL: @foo2
+; CHECK: lfiwzx [[REG1:[0-9]+]], 0, 3
+; CHECK: fcfid 1, [[REG1]]
+; CHECK: blr
+}
+
+define double @foo3(i32* %x) #0 {
+entry:
+ %0 = load i32* %x, align 4
+ %1 = add i32 %0, 8
+ %conv = zext i32 %1 to i64
+ %conv1 = sitofp i64 %conv to double
+ ret double %conv1
+
+; CHECK-LABEL: @foo3
+; CHECK-DAG: lwz [[REG1:[0-9]+]], 0(3)
+; CHECK-DAG: addi [[REG3:[0-9]+]], 1,
+; CHECK-DAG: addi [[REG2:[0-9]+]], [[REG1]], 8
+; CHECK-DAG: stw [[REG2]],
+; CHECK: lfiwzx [[REG4:[0-9]+]], 0, [[REG3]]
+; CHECK: fcfid 1, [[REG4]]
+; CHECK: blr
+}
+
+define double @foo4(i32* %x) #0 {
+entry:
+ %0 = load i32* %x, align 4
+ %1 = add i32 %0, 8
+ %conv = sext i32 %1 to i64
+ %conv1 = sitofp i64 %conv to double
+ ret double %conv1
+
+; CHECK-LABEL: @foo4
+; CHECK-DAG: lwz [[REG1:[0-9]+]], 0(3)
+; CHECK-DAG: addi [[REG3:[0-9]+]], 1,
+; CHECK-DAG: addi [[REG2:[0-9]+]], [[REG1]], 8
+; CHECK-DAG: stw [[REG2]],
+; CHECK: lfiwax [[REG4:[0-9]+]], 0, [[REG3]]
+; CHECK: fcfid 1, [[REG4]]
+; CHECK: blr
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/fp-to-int-to-fp.ll b/test/CodeGen/PowerPC/fp-to-int-to-fp.ll
new file mode 100644
index 0000000..f56b9b3
--- /dev/null
+++ b/test/CodeGen/PowerPC/fp-to-int-to-fp.ll
@@ -0,0 +1,70 @@
+; RUN: llc -mcpu=a2 < %s | FileCheck %s -check-prefix=FPCVT
+; RUN: llc -mcpu=ppc64 < %s | FileCheck %s -check-prefix=PPC64
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone
+define float @fool(float %X) #0 {
+entry:
+ %conv = fptosi float %X to i64
+ %conv1 = sitofp i64 %conv to float
+ ret float %conv1
+
+; FPCVT-LABEL: @fool
+; FPCVT: fctidz [[REG1:[0-9]+]], 1
+; FPCVT: fcfids 1, [[REG1]]
+; FPCVT: blr
+
+; PPC64-LABEL: @fool
+; PPC64: fctidz [[REG1:[0-9]+]], 1
+; PPC64: fcfid [[REG2:[0-9]+]], [[REG1]]
+; PPC64: frsp 1, [[REG2]]
+; PPC64: blr
+}
+
+; Function Attrs: nounwind readnone
+define double @foodl(double %X) #0 {
+entry:
+ %conv = fptosi double %X to i64
+ %conv1 = sitofp i64 %conv to double
+ ret double %conv1
+
+; FPCVT-LABEL: @foodl
+; FPCVT: fctidz [[REG1:[0-9]+]], 1
+; FPCVT: fcfid 1, [[REG1]]
+; FPCVT: blr
+
+; PPC64-LABEL: @foodl
+; PPC64: fctidz [[REG1:[0-9]+]], 1
+; PPC64: fcfid 1, [[REG1]]
+; PPC64: blr
+}
+
+; Function Attrs: nounwind readnone
+define float @fooul(float %X) #0 {
+entry:
+ %conv = fptoui float %X to i64
+ %conv1 = uitofp i64 %conv to float
+ ret float %conv1
+
+; FPCVT-LABEL: @fooul
+; FPCVT: fctiduz [[REG1:[0-9]+]], 1
+; FPCVT: fcfidus 1, [[REG1]]
+; FPCVT: blr
+}
+
+; Function Attrs: nounwind readnone
+define double @fooudl(double %X) #0 {
+entry:
+ %conv = fptoui double %X to i64
+ %conv1 = uitofp i64 %conv to double
+ ret double %conv1
+
+; FPCVT-LABEL: @fooudl
+; FPCVT: fctiduz [[REG1:[0-9]+]], 1
+; FPCVT: fcfidu 1, [[REG1]]
+; FPCVT: blr
+}
+
+attributes #0 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/glob-comp-aa-crash.ll b/test/CodeGen/PowerPC/glob-comp-aa-crash.ll
index f97d0ff..2ea036f 100644
--- a/test/CodeGen/PowerPC/glob-comp-aa-crash.ll
+++ b/test/CodeGen/PowerPC/glob-comp-aa-crash.ll
@@ -130,10 +130,10 @@ attributes #4 = { optsize }
attributes #5 = { nounwind optsize }
attributes #6 = { noreturn optsize }
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"bool", metadata !1}
-!4 = metadata !{i8 0, i8 2}
-!5 = metadata !{metadata !0, metadata !0, i64 0}
-!6 = metadata !{metadata !3, metadata !3, i64 0}
+!0 = !{!"any pointer", !1}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{!"bool", !1}
+!4 = !{i8 0, i8 2}
+!5 = !{!0, !0, i64 0}
+!6 = !{!3, !3, i64 0}
diff --git a/test/CodeGen/PowerPC/i1-ext-fold.ll b/test/CodeGen/PowerPC/i1-ext-fold.ll
new file mode 100644
index 0000000..19bd8ff
--- /dev/null
+++ b/test/CodeGen/PowerPC/i1-ext-fold.ll
@@ -0,0 +1,54 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone
+define signext i32 @foo(i32 signext %a, i32 signext %b) #0 {
+entry:
+ %cmp = icmp slt i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ %shl = shl nuw nsw i32 %conv, 4
+ ret i32 %shl
+
+; CHECK-LABEL: @foo
+; CHECK-DAG: cmpw
+; CHECK-DAG: li [[REG1:[0-9]+]], 0
+; CHECK-DAG: li [[REG2:[0-9]+]], 16
+; CHECK: isel 3, [[REG2]], [[REG1]],
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define signext i32 @foo2(i32 signext %a, i32 signext %b) #0 {
+entry:
+ %cmp = icmp slt i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ %shl = shl nuw nsw i32 %conv, 4
+ %add1 = or i32 %shl, 5
+ ret i32 %add1
+
+; CHECK-LABEL: @foo2
+; CHECK-DAG: cmpw
+; CHECK-DAG: li [[REG1:[0-9]+]], 5
+; CHECK-DAG: li [[REG2:[0-9]+]], 21
+; CHECK: isel 3, [[REG2]], [[REG1]],
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define signext i32 @foo3(i32 signext %a, i32 signext %b) #0 {
+entry:
+ %cmp = icmp sle i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ %shl = shl nuw nsw i32 %conv, 4
+ ret i32 %shl
+
+; CHECK-LABEL: @foo3
+; CHECK-DAG: cmpw
+; CHECK-DAG: li [[REG1:[0-9]+]], 16
+; CHECK: isel 3, 0, [[REG1]],
+; CHECK: blr
+}
+
+attributes #0 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/ia-mem-r0.ll b/test/CodeGen/PowerPC/ia-mem-r0.ll
new file mode 100644
index 0000000..4ab17ed
--- /dev/null
+++ b/test/CodeGen/PowerPC/ia-mem-r0.ll
@@ -0,0 +1,94 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+; Make sure that we don't generate a std r, 0(0) -- the memory address cannot
+; be stored in r0.
+; CHECK-LABEL: @test1
+; CHECK-NOT: std {{[0-9]+}}, 0(0)
+; CHECK: blr
+
+define void @test1({ i8*, void (i8*, i8*)* } %fn_arg) {
+ %fn = alloca { i8*, void (i8*, i8*)* }
+ %sp = alloca i8*, align 8
+ %regs = alloca [18 x i64], align 8
+ store { i8*, void (i8*, i8*)* } %fn_arg, { i8*, void (i8*, i8*)* }* %fn
+ %1 = bitcast [18 x i64]* %regs to i64*
+ call void asm sideeffect "std 14, $0", "=*m"(i64* %1)
+ %2 = bitcast [18 x i64]* %regs to i8*
+ %3 = getelementptr i8* %2, i32 8
+ %4 = bitcast i8* %3 to i64*
+ call void asm sideeffect "std 15, $0", "=*m"(i64* %4)
+ %5 = bitcast [18 x i64]* %regs to i8*
+ %6 = getelementptr i8* %5, i32 16
+ %7 = bitcast i8* %6 to i64*
+ call void asm sideeffect "std 16, $0", "=*m"(i64* %7)
+ %8 = bitcast [18 x i64]* %regs to i8*
+ %9 = getelementptr i8* %8, i32 24
+ %10 = bitcast i8* %9 to i64*
+ call void asm sideeffect "std 17, $0", "=*m"(i64* %10)
+ %11 = bitcast [18 x i64]* %regs to i8*
+ %12 = getelementptr i8* %11, i32 32
+ %13 = bitcast i8* %12 to i64*
+ call void asm sideeffect "std 18, $0", "=*m"(i64* %13)
+ %14 = bitcast [18 x i64]* %regs to i8*
+ %15 = getelementptr i8* %14, i32 40
+ %16 = bitcast i8* %15 to i64*
+ call void asm sideeffect "std 19, $0", "=*m"(i64* %16)
+ %17 = bitcast [18 x i64]* %regs to i8*
+ %18 = getelementptr i8* %17, i32 48
+ %19 = bitcast i8* %18 to i64*
+ call void asm sideeffect "std 20, $0", "=*m"(i64* %19)
+ %20 = bitcast [18 x i64]* %regs to i8*
+ %21 = getelementptr i8* %20, i32 56
+ %22 = bitcast i8* %21 to i64*
+ call void asm sideeffect "std 21, $0", "=*m"(i64* %22)
+ %23 = bitcast [18 x i64]* %regs to i8*
+ %24 = getelementptr i8* %23, i32 64
+ %25 = bitcast i8* %24 to i64*
+ call void asm sideeffect "std 22, $0", "=*m"(i64* %25)
+ %26 = bitcast [18 x i64]* %regs to i8*
+ %27 = getelementptr i8* %26, i32 72
+ %28 = bitcast i8* %27 to i64*
+ call void asm sideeffect "std 23, $0", "=*m"(i64* %28)
+ %29 = bitcast [18 x i64]* %regs to i8*
+ %30 = getelementptr i8* %29, i32 80
+ %31 = bitcast i8* %30 to i64*
+ call void asm sideeffect "std 24, $0", "=*m"(i64* %31)
+ %32 = bitcast [18 x i64]* %regs to i8*
+ %33 = getelementptr i8* %32, i32 88
+ %34 = bitcast i8* %33 to i64*
+ call void asm sideeffect "std 25, $0", "=*m"(i64* %34)
+ %35 = bitcast [18 x i64]* %regs to i8*
+ %36 = getelementptr i8* %35, i32 96
+ %37 = bitcast i8* %36 to i64*
+ call void asm sideeffect "std 26, $0", "=*m"(i64* %37)
+ %38 = bitcast [18 x i64]* %regs to i8*
+ %39 = getelementptr i8* %38, i32 104
+ %40 = bitcast i8* %39 to i64*
+ call void asm sideeffect "std 27, $0", "=*m"(i64* %40)
+ %41 = bitcast [18 x i64]* %regs to i8*
+ %42 = getelementptr i8* %41, i32 112
+ %43 = bitcast i8* %42 to i64*
+ call void asm sideeffect "std 28, $0", "=*m"(i64* %43)
+ %44 = bitcast [18 x i64]* %regs to i8*
+ %45 = getelementptr i8* %44, i32 120
+ %46 = bitcast i8* %45 to i64*
+ call void asm sideeffect "std 29, $0", "=*m"(i64* %46)
+ %47 = bitcast [18 x i64]* %regs to i8*
+ %48 = getelementptr i8* %47, i32 128
+ %49 = bitcast i8* %48 to i64*
+ call void asm sideeffect "std 30, $0", "=*m"(i64* %49)
+ %50 = bitcast [18 x i64]* %regs to i8*
+ %51 = getelementptr i8* %50, i32 136
+ %52 = bitcast i8* %51 to i64*
+ call void asm sideeffect "std 31, $0", "=*m"(i64* %52)
+ %53 = getelementptr { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 1
+ %.funcptr = load void (i8*, i8*)** %53
+ %54 = getelementptr { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 0
+ %.ptr = load i8** %54
+ %55 = load i8** %sp
+ call void %.funcptr(i8* %.ptr, i8* %55)
+ ret void
+}
+
diff --git a/test/CodeGen/PowerPC/ia-neg-const.ll b/test/CodeGen/PowerPC/ia-neg-const.ll
new file mode 100644
index 0000000..556ab80
--- /dev/null
+++ b/test/CodeGen/PowerPC/ia-neg-const.ll
@@ -0,0 +1,25 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+@.str = private unnamed_addr constant [5 x i8] c"%ld\0A\00", align 1
+
+; Function Attrs: nounwind
+define i64 @main() #0 {
+entry:
+ %x = alloca i64, align 8
+ store i64 0, i64* %x, align 8
+ %0 = call i64 asm sideeffect "ld $0,$1\0A\09add${2:I} $0,$0,$2", "=&r,*m,Ir"(i64* %x, i64 -1) #0
+ ret i64 %0
+}
+
+; CHECK: ld
+; CHECK-NOT: addi 3, 3, 4294967295
+; CHECK: addi 3, 3, -1
+; CHECK: blr
+
+; Function Attrs: nounwind
+declare signext i32 @printf(i8* nocapture readonly, ...) #0
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/in-asm-f64-reg.ll b/test/CodeGen/PowerPC/in-asm-f64-reg.ll
index 1321dfc..08b1a2c 100644
--- a/test/CodeGen/PowerPC/in-asm-f64-reg.ll
+++ b/test/CodeGen/PowerPC/in-asm-f64-reg.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -no-integrated-as | FileCheck %s
define void @f() {
; CHECK: @f
diff --git a/test/CodeGen/PowerPC/inlineasm-i64-reg.ll b/test/CodeGen/PowerPC/inlineasm-i64-reg.ll
index 5e31cd5..4d8e704 100644
--- a/test/CodeGen/PowerPC/inlineasm-i64-reg.ll
+++ b/test/CodeGen/PowerPC/inlineasm-i64-reg.ll
@@ -105,4 +105,4 @@ if.end40:
attributes #0 = { alwaysinline inlinehint nounwind }
attributes #1 = { nounwind }
-!0 = metadata !{i32 -2146895770}
+!0 = !{i32 -2146895770}
diff --git a/test/CodeGen/PowerPC/lbz-from-ld-shift.ll b/test/CodeGen/PowerPC/lbz-from-ld-shift.ll
new file mode 100644
index 0000000..3eacd6a
--- /dev/null
+++ b/test/CodeGen/PowerPC/lbz-from-ld-shift.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mcpu=ppc64 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readonly
+define signext i32 @test(i32* nocapture readonly %P) #0 {
+entry:
+ %0 = load i32* %P, align 4
+ %shr = lshr i32 %0, 24
+ ret i32 %shr
+
+; CHECK-LABEL: @test
+; CHECK: lbz 3, 0(3)
+; CHECK: blr
+}
+
+attributes #0 = { nounwind readonly }
+
diff --git a/test/CodeGen/PowerPC/ld-st-upd.ll b/test/CodeGen/PowerPC/ld-st-upd.ll
new file mode 100644
index 0000000..24f31ac
--- /dev/null
+++ b/test/CodeGen/PowerPC/ld-st-upd.ll
@@ -0,0 +1,19 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define i32* @test4(i32* readonly %X, i32* nocapture %dest) #0 {
+ %Y = getelementptr i32* %X, i64 4
+ %A = load i32* %Y, align 4
+ store i32 %A, i32* %dest, align 4
+ ret i32* %Y
+
+; CHECK-LABEL: @test4
+; CHECK: lwzu [[REG1:[0-9]+]], 16(3)
+; CHECK: stw [[REG1]], 0(4)
+; CHECK: blr
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/ldtoc-inv.ll b/test/CodeGen/PowerPC/ldtoc-inv.ll
new file mode 100644
index 0000000..550747c
--- /dev/null
+++ b/test/CodeGen/PowerPC/ldtoc-inv.ll
@@ -0,0 +1,39 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+@phasor = external constant [4096 x i32]
+
+; Function Attrs: nounwind
+define void @test(i32* nocapture %out, i32 zeroext %step_size) #0 {
+entry:
+ %shl = shl i32 %step_size, 2
+ %idxprom = zext i32 %shl to i64
+ br label %for.body
+
+; Make sure that the TOC load has been hoisted out of the loop.
+; CHECK-LABEL: @test
+; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc@l
+; CHECK: %for.body
+; CHECK: blr
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %0 = trunc i64 %indvars.iv to i32
+ %shl1 = shl i32 %0, %step_size
+ %idxprom2 = sext i32 %shl1 to i64
+ %arrayidx.sum = add nsw i64 %idxprom2, %idxprom
+ %arrayidx3 = getelementptr inbounds [4096 x i32]* @phasor, i64 0, i64 %arrayidx.sum
+ %1 = load i32* %arrayidx3, align 4
+ %arrayidx5 = getelementptr inbounds i32* %out, i64 %indvars.iv
+ store i32 %1, i32* %arrayidx5, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
+ %cmp = icmp slt i64 %indvars.iv.next, 1020
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/loop-data-prefetch.ll b/test/CodeGen/PowerPC/loop-data-prefetch.ll
new file mode 100644
index 0000000..8871481
--- /dev/null
+++ b/test/CodeGen/PowerPC/loop-data-prefetch.ll
@@ -0,0 +1,29 @@
+; RUN: llc -mcpu=a2 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+; Function Attrs: nounwind
+define void @foo(double* nocapture %a, double* nocapture readonly %b) #0 {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double* %b, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 8
+ %add = fadd double %0, 1.000000e+00
+ %arrayidx2 = getelementptr inbounds double* %a, i64 %indvars.iv
+ store double %add, double* %arrayidx2, align 8
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1600
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+
+; CHECK-LABEL: @foo
+; CHECK: dcbt
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll b/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll
index 659cdf7..743cc62 100644
--- a/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll
+++ b/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=ppc32
+; RUN: llc < %s -march=ppc32 -no-integrated-as
; ModuleID = 'mult-alt-generic.c'
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32"
target triple = "powerpc"
diff --git a/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll b/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll
index 3da06f6..29a5786 100644
--- a/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll
+++ b/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=ppc64
+; RUN: llc < %s -march=ppc64 -no-integrated-as
; ModuleID = 'mult-alt-generic.c'
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
target triple = "powerpc64"
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r0.ll b/test/CodeGen/PowerPC/named-reg-alloc-r0.ll
index e683f99..b669c35 100644
--- a/test/CodeGen/PowerPC/named-reg-alloc-r0.ll
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r0.ll
@@ -12,4 +12,4 @@ entry:
declare i32 @llvm.read_register.i32(metadata) nounwind
-!0 = metadata !{metadata !"r0\00"}
+!0 = !{!"r0\00"}
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r1-64.ll b/test/CodeGen/PowerPC/named-reg-alloc-r1-64.ll
index b047f9f..419e12c 100644
--- a/test/CodeGen/PowerPC/named-reg-alloc-r1-64.ll
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r1-64.ll
@@ -15,4 +15,4 @@ entry:
declare i64 @llvm.read_register.i64(metadata) nounwind
-!0 = metadata !{metadata !"r1\00"}
+!0 = !{!"r1\00"}
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r1.ll b/test/CodeGen/PowerPC/named-reg-alloc-r1.ll
index 9d0eb34..3ccab8c 100644
--- a/test/CodeGen/PowerPC/named-reg-alloc-r1.ll
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r1.ll
@@ -17,4 +17,4 @@ entry:
declare i32 @llvm.read_register.i32(metadata) nounwind
-!0 = metadata !{metadata !"r1\00"}
+!0 = !{!"r1\00"}
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r13-64.ll b/test/CodeGen/PowerPC/named-reg-alloc-r13-64.ll
index df5085b..74e31fdd 100644
--- a/test/CodeGen/PowerPC/named-reg-alloc-r13-64.ll
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r13-64.ll
@@ -15,4 +15,4 @@ entry:
declare i64 @llvm.read_register.i64(metadata) nounwind
-!0 = metadata !{metadata !"r13\00"}
+!0 = !{!"r13\00"}
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r13.ll b/test/CodeGen/PowerPC/named-reg-alloc-r13.ll
index 900ebb2..314f5d5 100644
--- a/test/CodeGen/PowerPC/named-reg-alloc-r13.ll
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r13.ll
@@ -15,4 +15,4 @@ entry:
declare i32 @llvm.read_register.i32(metadata) nounwind
-!0 = metadata !{metadata !"r13\00"}
+!0 = !{!"r13\00"}
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll b/test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll
index 0da33fa..834df8b 100644
--- a/test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r2-64.ll
@@ -1,17 +1,14 @@
-; RUN: not llc < %s -mtriple=powerpc64-apple-darwin 2>&1 | FileCheck %s --check-prefix=CHECK-DARWIN
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=powerpc64-apple-darwin 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
define i64 @get_reg() nounwind {
entry:
; FIXME: Include an allocatable-specific error message
-; CHECK-DARWIN: Invalid register name global variable
+; CHECK: Invalid register name global variable
%reg = call i64 @llvm.read_register.i64(metadata !0)
ret i64 %reg
-
-; CHECK-LABEL: @get_reg
-; CHECK: mr 3, 2
}
declare i64 @llvm.read_register.i64(metadata) nounwind
-!0 = metadata !{metadata !"r2\00"}
+!0 = !{!"r2\00"}
diff --git a/test/CodeGen/PowerPC/named-reg-alloc-r2.ll b/test/CodeGen/PowerPC/named-reg-alloc-r2.ll
index 51e7e3e..45d9816 100644
--- a/test/CodeGen/PowerPC/named-reg-alloc-r2.ll
+++ b/test/CodeGen/PowerPC/named-reg-alloc-r2.ll
@@ -1,11 +1,11 @@
-; RUN: not llc < %s -mtriple=powerpc-apple-darwin 2>&1 | FileCheck %s --check-prefix=CHECK-DARWIN
+; RUN: not llc < %s -mtriple=powerpc-apple-darwin 2>&1 | FileCheck %s --check-prefix=CHECK-NOTPPC32
; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu 2>&1 | FileCheck %s
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
+; RUN: not llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s --check-prefix=CHECK-NOTPPC32
define i32 @get_reg() nounwind {
entry:
; FIXME: Include an allocatable-specific error message
-; CHECK-DARWIN: Invalid register name global variable
+; CHECK-NOTPPC32: Invalid register name global variable
%reg = call i32 @llvm.read_register.i32(metadata !0)
ret i32 %reg
@@ -15,4 +15,4 @@ entry:
declare i32 @llvm.read_register.i32(metadata) nounwind
-!0 = metadata !{metadata !"r2\00"}
+!0 = !{!"r2\00"}
diff --git a/test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll b/test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll
new file mode 100644
index 0000000..6beee25
--- /dev/null
+++ b/test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll
@@ -0,0 +1,96 @@
+; RUN: llc -mcpu=a2 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readonly
+define double @test1(i64* nocapture readonly %x) #0 {
+entry:
+ %0 = load i64* %x, align 8
+ %conv = sitofp i64 %0 to double
+ ret double %conv
+
+; CHECK-LABEL: @test1
+; CHECK: lfd [[REG1:[0-9]+]], 0(3)
+; CHECK: fcfid 1, [[REG1]]
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readonly
+define double @test2(i32* nocapture readonly %x) #0 {
+entry:
+ %0 = load i32* %x, align 4
+ %conv = sitofp i32 %0 to double
+ ret double %conv
+
+; CHECK-LABEL: @test2
+; CHECK: lfiwax [[REG1:[0-9]+]], 0, 3
+; CHECK: fcfid 1, [[REG1]]
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define float @foo(float %X) #0 {
+entry:
+ %conv = fptosi float %X to i32
+ %conv1 = sitofp i32 %conv to float
+ ret float %conv1
+
+; CHECK-LABEL: @foo
+; CHECK-DAG: fctiwz [[REG2:[0-9]+]], 1
+; CHECK-DAG: addi [[REG1:[0-9]+]], 1,
+; CHECK: stfiwx [[REG2]], 0, [[REG1]]
+; CHECK: lfiwax [[REG3:[0-9]+]], 0, [[REG1]]
+; CHECK: fcfids 1, [[REG3]]
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define double @food(double %X) #0 {
+entry:
+ %conv = fptosi double %X to i32
+ %conv1 = sitofp i32 %conv to double
+ ret double %conv1
+
+; CHECK-LABEL: @food
+; CHECK-DAG: fctiwz [[REG2:[0-9]+]], 1
+; CHECK-DAG: addi [[REG1:[0-9]+]], 1,
+; CHECK: stfiwx [[REG2]], 0, [[REG1]]
+; CHECK: lfiwax [[REG3:[0-9]+]], 0, [[REG1]]
+; CHECK: fcfid 1, [[REG3]]
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define float @foou(float %X) #0 {
+entry:
+ %conv = fptoui float %X to i32
+ %conv1 = uitofp i32 %conv to float
+ ret float %conv1
+
+; CHECK-LABEL: @foou
+; CHECK-DAG: fctiwuz [[REG2:[0-9]+]], 1
+; CHECK-DAG: addi [[REG1:[0-9]+]], 1,
+; CHECK: stfiwx [[REG2]], 0, [[REG1]]
+; CHECK: lfiwzx [[REG3:[0-9]+]], 0, [[REG1]]
+; CHECK: fcfidus 1, [[REG3]]
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define double @fooud(double %X) #0 {
+entry:
+ %conv = fptoui double %X to i32
+ %conv1 = uitofp i32 %conv to double
+ ret double %conv1
+
+; CHECK-LABEL: @fooud
+; CHECK-DAG: fctiwuz [[REG2:[0-9]+]], 1
+; CHECK-DAG: addi [[REG1:[0-9]+]], 1,
+; CHECK: stfiwx [[REG2]], 0, [[REG1]]
+; CHECK: lfiwzx [[REG3:[0-9]+]], 0, [[REG1]]
+; CHECK: fcfidu 1, [[REG3]]
+; CHECK: blr
+}
+
+attributes #0 = { nounwind readonly }
+
diff --git a/test/CodeGen/PowerPC/no-pref-jumps.ll b/test/CodeGen/PowerPC/no-pref-jumps.ll
new file mode 100644
index 0000000..d9490f1
--- /dev/null
+++ b/test/CodeGen/PowerPC/no-pref-jumps.ll
@@ -0,0 +1,36 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @foo(i32 signext %a, i32 signext %b) #0 {
+entry:
+ %cmp = icmp sgt i32 %a, 5
+ %cmp1 = icmp slt i32 %b, 3
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.else
+
+; CHECK-LABEL: @foo
+; CHECK: cmpwi
+; CHECK: cmpwi
+; CHECK: cror
+; CHECK: blr
+
+if.then: ; preds = %entry
+ tail call void bitcast (void (...)* @bar to void ()*)() #0
+ br label %if.end
+
+if.else: ; preds = %entry
+ tail call void bitcast (void (...)* @car to void ()*)() #0
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret void
+}
+
+declare void @bar(...)
+
+declare void @car(...)
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/p8-isel-sched.ll b/test/CodeGen/PowerPC/p8-isel-sched.ll
new file mode 100644
index 0000000..034fe3c
--- /dev/null
+++ b/test/CodeGen/PowerPC/p8-isel-sched.ll
@@ -0,0 +1,33 @@
+; RUN: llc -mcpu=pwr8 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @foo(i32* nocapture %r1, i32* nocapture %r2, i32* nocapture %r3, i32* nocapture %r4, i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d) #0 {
+entry:
+ %tobool = icmp ne i32 %a, 0
+ %cond = select i1 %tobool, i32 %b, i32 %c
+ store i32 %cond, i32* %r1, align 4
+ %cond5 = select i1 %tobool, i32 %b, i32 %d
+ store i32 %cond5, i32* %r2, align 4
+ %add = add nsw i32 %b, 1
+ %sub = add nsw i32 %d, -2
+ %cond10 = select i1 %tobool, i32 %add, i32 %sub
+ store i32 %cond10, i32* %r3, align 4
+ %add13 = add nsw i32 %b, 3
+ %sub15 = add nsw i32 %d, -5
+ %cond17 = select i1 %tobool, i32 %add13, i32 %sub15
+ store i32 %cond17, i32* %r4, align 4
+ ret void
+}
+
+; Make sure that we don't schedule all of the isels together, they should be
+; intermixed with the adds because each isel starts a new dispatch group.
+; CHECK-LABEL: @foo
+; CHECK: isel
+; CHECK: addi
+; CHECK: isel
+; CHECK: blr
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/post-ra-ec.ll b/test/CodeGen/PowerPC/post-ra-ec.ll
new file mode 100644
index 0000000..9c61677
--- /dev/null
+++ b/test/CodeGen/PowerPC/post-ra-ec.ll
@@ -0,0 +1,47 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%struct.inode.0.12.120 = type { i8* }
+%struct.kstat2.1.13.121 = type { i32 }
+%struct.task_struct.4.16.124 = type { i8*, %struct.atomic_t.2.14.122, %struct.signal_struct.3.15.123* }
+%struct.atomic_t.2.14.122 = type { i32 }
+%struct.signal_struct.3.15.123 = type { i64 }
+%struct.pid.5.17.125 = type { i8* }
+
+; Function Attrs: nounwind
+define signext i32 @proc_task_getattr(%struct.inode.0.12.120* nocapture readonly %inode, %struct.kstat2.1.13.121* nocapture %stat) #0 {
+entry:
+ %call1.i = tail call %struct.task_struct.4.16.124* @get_pid_task(%struct.pid.5.17.125* undef, i32 zeroext 0) #0
+ br i1 undef, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ %0 = load i64* undef, align 8
+ %conv.i = trunc i64 %0 to i32
+ %1 = load i32* null, align 4
+ %add = add i32 %1, %conv.i
+ store i32 %add, i32* null, align 4
+ %counter.i.i = getelementptr inbounds %struct.task_struct.4.16.124* %call1.i, i64 0, i32 1, i32 0
+ %2 = tail call i32 asm sideeffect "\09lwsync\0A1:\09lwarx\09$0,0,$1\09\09# atomic_dec_return\0A\09addic\09$0,$0,-1\0A\09stwcx.\09$0,0,$1\0A\09bne-\091b\0A\09sync\0A", "=&r,r,~{cr0},~{xer},~{memory}"(i32* %counter.i.i) #0
+ %cmp.i = icmp eq i32 %2, 0
+ br i1 %cmp.i, label %if.then.i, label %if.end
+
+; CHECK-LABEL: @proc_task_getattr
+; CHECK-NOT: stwcx. [[REG:[0-9]+]],0,[[REG]]
+; CHECK: blr
+
+if.then.i: ; preds = %if.then
+ %3 = bitcast %struct.task_struct.4.16.124* %call1.i to i8*
+ tail call void @foo(i8* %3) #0
+ unreachable
+
+if.end: ; preds = %if.then, %entry
+ ret i32 0
+}
+
+declare void @foo(i8*)
+
+declare %struct.task_struct.4.16.124* @get_pid_task(%struct.pid.5.17.125*, i32 zeroext)
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/ppc32-cyclecounter.ll b/test/CodeGen/PowerPC/ppc32-cyclecounter.ll
new file mode 100644
index 0000000..9e2cd0b
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc32-cyclecounter.ll
@@ -0,0 +1,20 @@
+target datalayout = "E-m:e-p:32:32-i64:64-n32"
+target triple = "powerpc"
+; RUN: llc -mcpu=ppc < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+
+define i64 @test1() nounwind {
+entry:
+ %r = call i64 @llvm.readcyclecounter()
+ ret i64 %r
+}
+
+; CHECK: @test1
+; CHECK: mfspr 3, 269
+; CHECK: mfspr 4, 268
+; CHECK: mfspr [[REG:[0-9]+]], 269
+; CHECK: cmpw [[CR:[0-9]+]], 3, [[REG]]
+; CHECK: bne [[CR]], .LBB
+
+declare i64 @llvm.readcyclecounter()
+
diff --git a/test/CodeGen/PowerPC/ppc32-lshrti3.ll b/test/CodeGen/PowerPC/ppc32-lshrti3.ll
index 6e76fea..f773cce 100644
--- a/test/CodeGen/PowerPC/ppc32-lshrti3.ll
+++ b/test/CodeGen/PowerPC/ppc32-lshrti3.ll
@@ -36,4 +36,4 @@ attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointe
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"clang version 3.5.0 (213754)"}
+!0 = !{!"clang version 3.5.0 (213754)"}
diff --git a/test/CodeGen/PowerPC/ppc32-pic-large.ll b/test/CodeGen/PowerPC/ppc32-pic-large.ll
index ecc4f10..bb906ec 100644
--- a/test/CodeGen/PowerPC/ppc32-pic-large.ll
+++ b/test/CodeGen/PowerPC/ppc32-pic-large.ll
@@ -1,23 +1,29 @@
; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu -relocation-model=pic | FileCheck -check-prefix=LARGE-BSS %s
@bar = common global i32 0, align 4
+declare i32 @call_foo(i32, ...)
+
define i32 @foo() {
entry:
%0 = load i32* @bar, align 4
+ %call = call i32 (i32, ...)* @call_foo(i32 %0, i32 0, i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64)
ret i32 %0
}
!llvm.module.flags = !{!0}
-!0 = metadata !{i32 1, metadata !"PIC Level", i32 2}
+!0 = !{i32 1, !"PIC Level", i32 2}
; LARGE-BSS: [[POFF:\.L[0-9]+\$poff]]:
; LARGE-BSS-NEXT: .long .LTOC-[[PB:\.L[0-9]+\$pb]]
; LARGE-BSS-NEXT: foo:
+; LARGE-BSS: stw 30, -8(1)
; LARGE-BSS: bl [[PB]]
; LARGE-BSS-NEXT: [[PB]]:
; LARGE-BSS: mflr 30
; LARGE-BSS: lwz [[REG:[0-9]+]], [[POFF]]-[[PB]](30)
; LARGE-BSS-NEXT: add 30, [[REG]], 30
-; LARGE-BSS: lwz [[VREG:[0-9]+]], [[VREF:\.LC[0-9]+]]-.LTOC(30)
-; LARGE-BSS: lwz {{[0-9]+}}, 0([[VREG]])
+; LARGE-BSS-DAG: lwz [[VREG:[0-9]+]], [[VREF:\.LC[0-9]+]]-.LTOC(30)
+; LARGE-BSS-DAG: lwz {{[0-9]+}}, 0([[VREG]])
+; LARGE-BSS-DAG: stw {{[0-9]+}}, 8(1)
+; LARGE-BSS: lwz 30, -8(1)
; LARGE-BSS: [[VREF]]:
; LARGE-BSS-NEXT: .long bar
diff --git a/test/CodeGen/PowerPC/ppc32-pic.ll b/test/CodeGen/PowerPC/ppc32-pic.ll
index f9c3467..abc1367 100644
--- a/test/CodeGen/PowerPC/ppc32-pic.ll
+++ b/test/CodeGen/PowerPC/ppc32-pic.ll
@@ -1,16 +1,24 @@
; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu -relocation-model=pic | FileCheck -check-prefix=SMALL-BSS %s
@bar = common global i32 0, align 4
+declare i32 @call_foo(i32, ...)
+
define i32 @foo() {
entry:
%0 = load i32* @bar, align 4
- ret i32 %0
+ %call = call i32 (i32, ...)* @call_foo(i32 %0, i32 0, i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64)
+ ret i32 0
}
!llvm.module.flags = !{!0}
-!0 = metadata !{i32 1, metadata !"PIC Level", i32 1}
+!0 = !{i32 1, !"PIC Level", i32 1}
; SMALL-BSS-LABEL:foo:
+; SMALL-BSS: stw 30, -8(1)
+; SMALL-BSS: stwu 1, -32(1)
; SMALL-BSS: bl _GLOBAL_OFFSET_TABLE_@local-4
; SMALL-BSS: mflr 30
-; SMALL-BSS: lwz [[VREG:[0-9]+]], bar@GOT(30)
-; SMALL-BSS: lwz {{[0-9]+}}, 0([[VREG]])
+; SMALL-BSS-DAG: stw {{[0-9]+}}, 8(1)
+; SMALL-BSS-DAG: lwz [[VREG:[0-9]+]], bar@GOT(30)
+; SMALL-BSS-DAG: lwz {{[0-9]+}}, 0([[VREG]])
+; SMALL-BSS: bl call_foo@PLT
+; SMALL-BSS: lwz 30, -8(1)
diff --git a/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll b/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll
new file mode 100644
index 0000000..479c7a7
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-anyregcc-crash.ll
@@ -0,0 +1,19 @@
+; RUN: not llc < %s -mtriple=powerpc64-unknown-linux-gnu 2>&1 | FileCheck %s
+;
+; Check that misuse of anyregcc results in a compile time error.
+
+; CHECK: LLVM ERROR: ran out of registers during register allocation
+define i64 @anyreglimit(i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7, i64 %v8,
+ i64 %v9, i64 %v10, i64 %v11, i64 %v12, i64 %v13, i64 %v14, i64 %v15, i64 %v16,
+ i64 %v17, i64 %v18, i64 %v19, i64 %v20, i64 %v21, i64 %v22, i64 %v23, i64 %v24,
+ i64 %v25, i64 %v26, i64 %v27, i64 %v28, i64 %v29, i64 %v30, i64 %v31, i64 %v32) {
+entry:
+ %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 12, i32 15, i8* inttoptr (i64 0 to i8*), i32 32,
+ i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7, i64 %v8,
+ i64 %v9, i64 %v10, i64 %v11, i64 %v12, i64 %v13, i64 %v14, i64 %v15, i64 %v16,
+ i64 %v17, i64 %v18, i64 %v19, i64 %v20, i64 %v21, i64 %v22, i64 %v23, i64 %v24,
+ i64 %v25, i64 %v26, i64 %v27, i64 %v28, i64 %v29, i64 %v30, i64 %v31, i64 %v32)
+ ret i64 %result
+}
+
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
diff --git a/test/CodeGen/PowerPC/ppc64-anyregcc.ll b/test/CodeGen/PowerPC/ppc64-anyregcc.ll
new file mode 100644
index 0000000..8b4cec5
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-anyregcc.ll
@@ -0,0 +1,367 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Stackmap Header: no constants - 6 callsites
+; CHECK-LABEL: .section .llvm_stackmaps
+; CHECK-NEXT: __LLVM_StackMaps:
+; Header
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 0
+; Num Functions
+; CHECK-NEXT: .long 8
+; Num LargeConstants
+; CHECK-NEXT: .long 0
+; Num Callsites
+; CHECK-NEXT: .long 8
+
+; Functions and stack size
+; CHECK-NEXT: .quad test
+; CHECK-NEXT: .quad 128
+; CHECK-NEXT: .quad property_access1
+; CHECK-NEXT: .quad 128
+; CHECK-NEXT: .quad property_access2
+; CHECK-NEXT: .quad 128
+; CHECK-NEXT: .quad property_access3
+; CHECK-NEXT: .quad 128
+; CHECK-NEXT: .quad anyreg_test1
+; CHECK-NEXT: .quad 144
+; CHECK-NEXT: .quad anyreg_test2
+; CHECK-NEXT: .quad 144
+; CHECK-NEXT: .quad patchpoint_spilldef
+; CHECK-NEXT: .quad 256
+; CHECK-NEXT: .quad patchpoint_spillargs
+; CHECK-NEXT: .quad 288
+
+
+; test
+; CHECK-LABEL: .long .L{{.*}}-.L.test
+; CHECK-NEXT: .short 0
+; 3 locations
+; CHECK-NEXT: .short 3
+; Loc 0: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 2: Constant 3
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 3
+define i64 @test() nounwind ssp uwtable {
+entry:
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 0, i32 24, i8* null, i32 2, i32 1, i32 2, i64 3)
+ ret i64 0
+}
+
+; property access 1 - %obj is an anyreg call argument and should therefore be in a register
+; CHECK-LABEL: .long .L{{.*}}-.L.property_access1
+; CHECK-NEXT: .short 0
+; 2 locations
+; CHECK-NEXT: .short 2
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define i64 @property_access1(i8* %obj) nounwind ssp uwtable {
+entry:
+ %f = inttoptr i64 281474417671919 to i8*
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 1, i32 24, i8* %f, i32 1, i8* %obj)
+ ret i64 %ret
+}
+
+; property access 2 - %obj is an anyreg call argument and should therefore be in a register
+; CHECK-LABEL: .long .L{{.*}}-.L.property_access2
+; CHECK-NEXT: .short 0
+; 2 locations
+; CHECK-NEXT: .short 2
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define i64 @property_access2() nounwind ssp uwtable {
+entry:
+ %obj = alloca i64, align 8
+ %f = inttoptr i64 281474417671919 to i8*
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 2, i32 24, i8* %f, i32 1, i64* %obj)
+ ret i64 %ret
+}
+
+; property access 3 - %obj is a frame index
+; CHECK-LABEL: .long .L{{.*}}-.L.property_access3
+; CHECK-NEXT: .short 0
+; 2 locations
+; CHECK-NEXT: .short 2
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Direct FP - 8
+; CHECK-NEXT: .byte 2
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 31
+; CHECK-NEXT: .long 112
+define i64 @property_access3() nounwind ssp uwtable {
+entry:
+ %obj = alloca i64, align 8
+ %f = inttoptr i64 281474417671919 to i8*
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 3, i32 24, i8* %f, i32 0, i64* %obj)
+ ret i64 %ret
+}
+
+; anyreg_test1
+; CHECK-LABEL: .long .L{{.*}}-.L.anyreg_test1
+; CHECK-NEXT: .short 0
+; 14 locations
+; CHECK-NEXT: .short 14
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 2: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 3: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 4: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 5: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 6: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 7: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 8: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 9: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 10: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 11: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 12: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 13: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define i64 @anyreg_test1(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable {
+entry:
+ %f = inttoptr i64 281474417671919 to i8*
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 4, i32 24, i8* %f, i32 13, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
+ ret i64 %ret
+}
+
+; anyreg_test2
+; CHECK-LABEL: .long .L{{.*}}-.L.anyreg_test2
+; CHECK-NEXT: .short 0
+; 14 locations
+; CHECK-NEXT: .short 14
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 2: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 3: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 4: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 5: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 6: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 7: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 8: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 9: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 10: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 11: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 12: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 13: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define i64 @anyreg_test2(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable {
+entry:
+ %f = inttoptr i64 281474417671919 to i8*
+ %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 24, i8* %f, i32 8, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13)
+ ret i64 %ret
+}
+
+; Test spilling the return value of an anyregcc call.
+;
+; <rdar://problem/15432754> [JS] Assertion: "Folded a def to a non-store!"
+;
+; CHECK-LABEL: .long .L{{.*}}-.L.patchpoint_spilldef
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 3
+; Loc 0: Register (some register that will be spilled to the stack)
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define i64 @patchpoint_spilldef(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
+entry:
+ %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 12, i32 24, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2)
+ tail call void asm sideeffect "nop", "~{r0},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r14},~{r15},~{r16},~{r17
+},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31}"() nounwind
+ ret i64 %result
+}
+
+; Test spilling the arguments of an anyregcc call.
+;
+; <rdar://problem/15487687> [JS] AnyRegCC argument ends up being spilled
+;
+; CHECK-LABEL: .long .L{{.*}}-.L.patchpoint_spillargs
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 5
+; Loc 0: Return a register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 1: Arg0 in a Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 2: Arg1 in a Register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; Loc 3: Arg2 spilled to FP -96
+; CHECK-NEXT: .byte 3
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 31
+; CHECK-NEXT: .long 128
+; Loc 4: Arg3 spilled to FP - 88
+; CHECK-NEXT: .byte 3
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 31
+; CHECK-NEXT: .long 136
+define i64 @patchpoint_spillargs(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
+entry:
+ tail call void asm sideeffect "nop", "~{r0},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r14},~{r15},~{r16},~{r17
+},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31}"() nounwind
+ %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 13, i32 24, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ ret i64 %result
+}
+
+declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
diff --git a/test/CodeGen/PowerPC/ppc64-calls.ll b/test/CodeGen/PowerPC/ppc64-calls.ll
index 31794be..707ba95 100644
--- a/test/CodeGen/PowerPC/ppc64-calls.ll
+++ b/test/CodeGen/PowerPC/ppc64-calls.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=ppc64 | FileCheck %s
+; RUN: llc < %s -march=ppc64 -mcpu=pwr7 | FileCheck %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -67,3 +67,20 @@ define double @test_external(double %x) nounwind {
; CHECK-NEXT: nop
ret double %call
}
+
+; The 'ld 2, 40(1)' really must always come directly after the bctrl to make
+; the unwinding code in libgcc happy.
+@g = external global void ()*
+declare void @h(i64)
+define void @test_indir_toc_reload(i64 %x) {
+ %1 = load void ()** @g
+ call void %1()
+ call void @h(i64 %x)
+ ret void
+
+; CHECK-LABEL: @test_indir_toc_reload
+; CHECK: bctrl
+; CHECK-NEXT: ld 2, 40(1)
+; CHECK: blr
+}
+
diff --git a/test/CodeGen/PowerPC/ppc64-elf-abi.ll b/test/CodeGen/PowerPC/ppc64-elf-abi.ll
index d82122d..5344337 100644
--- a/test/CodeGen/PowerPC/ppc64-elf-abi.ll
+++ b/test/CodeGen/PowerPC/ppc64-elf-abi.ll
@@ -1,9 +1,9 @@
; RUN: llc -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s -check-prefix=CHECK-ELFv1
-; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mattr=+elfv1 < %s | FileCheck %s -check-prefix=CHECK-ELFv1
-; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mattr=+elfv2 < %s | FileCheck %s -check-prefix=CHECK-ELFv2
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -target-abi elfv1 < %s | FileCheck %s -check-prefix=CHECK-ELFv1
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -target-abi elfv2 < %s | FileCheck %s -check-prefix=CHECK-ELFv2
; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s -check-prefix=CHECK-ELFv2
-; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mattr=+elfv1 < %s | FileCheck %s -check-prefix=CHECK-ELFv1
-; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mattr=+elfv2 < %s | FileCheck %s -check-prefix=CHECK-ELFv2
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -target-abi elfv1 < %s | FileCheck %s -check-prefix=CHECK-ELFv1
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -target-abi elfv2 < %s | FileCheck %s -check-prefix=CHECK-ELFv2
; CHECK-ELFv2: .abiversion 2
; CHECK-ELFv1-NOT: .abiversion 2
diff --git a/test/CodeGen/PowerPC/ppc64-fastcc-fast-isel.ll b/test/CodeGen/PowerPC/ppc64-fastcc-fast-isel.ll
new file mode 100644
index 0000000..941513f
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-fastcc-fast-isel.ll
@@ -0,0 +1,56 @@
+; RUN: llc -mcpu=pwr7 -mattr=-vsx -fast-isel -fast-isel-abort < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define fastcc i64 @g1(i64 %g1, double %f1, i64 %g2, double %f2, i64 %g3, double %f3, i64 %g4, double %f4) #0 {
+ ret i64 %g1
+
+; CHECK-LABEL: @g1
+; CHECK-NOT: mr 3,
+; CHECK: blr
+}
+
+define fastcc i64 @g2(i64 %g1, double %f1, i64 %g2, double %f2, i64 %g3, double %f3, i64 %g4, double %f4) #0 {
+ ret i64 %g2
+
+; CHECK-LABEL: @g2
+; CHECK: mr 3, 4
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g3(i64 %g1, double %f1, i64 %g2, double %f2, i64 %g3, double %f3, i64 %g4, double %f4) #0 {
+ ret i64 %g3
+
+; CHECK-LABEL: @g3
+; CHECK: mr 3, 5
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f2(i64 %g1, double %f1, i64 %g2, double %f2, i64 %g3, double %f3, i64 %g4, double %f4) #0 {
+ ret double %f2
+
+; CHECK-LABEL: @f2
+; CHECK: fmr 1, 2
+; CHECK-NEXT: blr
+}
+
+define void @cg2(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, i64 %v, double 0.0, i64 0, double 0.0, i64 0, double 0.0)
+ ret void
+
+; CHECK-LABEL: @cg2
+; CHECK: mr 4, 3
+; CHECK: blr
+}
+
+define void @cf2(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, i64 0, double %v, i64 0, double 0.0, i64 0, double 0.0)
+ ret void
+
+; CHECK-LABEL: @cf2
+; CHECK: mr 2, 1
+; CHECK: blr
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/ppc64-fastcc.ll b/test/CodeGen/PowerPC/ppc64-fastcc.ll
new file mode 100644
index 0000000..bb1365a
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-fastcc.ll
@@ -0,0 +1,540 @@
+; RUN: llc -mcpu=pwr7 -mattr=-vsx < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define fastcc i64 @g1(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g1
+
+; CHECK-LABEL: @g1
+; CHECK-NOT: mr 3,
+; CHECK: blr
+}
+
+define fastcc i64 @g2(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g2
+
+; CHECK-LABEL: @g2
+; CHECK: mr 3, 4
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g3(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g3
+
+; CHECK-LABEL: @g3
+; CHECK: mr 3, 5
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g4(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g4
+
+; CHECK-LABEL: @g4
+; CHECK: mr 3, 6
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g5(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g5
+
+; CHECK-LABEL: @g5
+; CHECK: mr 3, 7
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g6(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g6
+
+; CHECK-LABEL: @g6
+; CHECK: mr 3, 8
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g7(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g7
+
+; CHECK-LABEL: @g7
+; CHECK: mr 3, 9
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g8(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g8
+
+; CHECK-LABEL: @g8
+; CHECK: mr 3, 10
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g9(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g9
+
+; CHECK-LABEL: @g9
+; CHECK: ld 3, 48(1)
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g10(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g10
+
+; CHECK-LABEL: @g10
+; CHECK: ld 3, 56(1)
+; CHECK-NEXT: blr
+}
+
+define fastcc i64 @g11(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret i64 %g11
+
+; CHECK-LABEL: @g11
+; CHECK: ld 3, 64(1)
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f1(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f1
+
+; CHECK-LABEL: @f1
+; CHECK-NOT: fmr 1,
+; CHECK: blr
+}
+
+define fastcc double @f2(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f2
+
+; CHECK-LABEL: @f2
+; CHECK: fmr 1, 2
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f3(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f3
+
+; CHECK-LABEL: @f3
+; CHECK: fmr 1, 3
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f4(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f4
+
+; CHECK-LABEL: @f4
+; CHECK: fmr 1, 4
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f5(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f5
+
+; CHECK-LABEL: @f5
+; CHECK: fmr 1, 5
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f6(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f6
+
+; CHECK-LABEL: @f6
+; CHECK: fmr 1, 6
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f7(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f7
+
+; CHECK-LABEL: @f7
+; CHECK: fmr 1, 7
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f8(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f8
+
+; CHECK-LABEL: @f8
+; CHECK: fmr 1, 8
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f9(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f9
+
+; CHECK-LABEL: @f9
+; CHECK: fmr 1, 9
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f10(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f10
+
+; CHECK-LABEL: @f10
+; CHECK: fmr 1, 10
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f11(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f11
+
+; CHECK-LABEL: @f11
+; CHECK: fmr 1, 11
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f12(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f12
+
+; CHECK-LABEL: @f12
+; CHECK: fmr 1, 12
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f13(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f13
+
+; CHECK-LABEL: @f13
+; CHECK: fmr 1, 13
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f14(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f14
+
+; CHECK-LABEL: @f14
+; CHECK: lfd 1, 120(1)
+; CHECK-NEXT: blr
+}
+
+define fastcc double @f15(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret double %f15
+
+; CHECK-LABEL: @f15
+; CHECK: lfd 1, 152(1)
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v1(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v1
+
+; CHECK-LABEL: @v1
+; CHECK-NOT: vor 2,
+; CHECK: blr
+}
+
+define fastcc <4 x i32> @v2(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v2
+
+; CHECK-LABEL: @v2
+; CHECK: vor 2, 3, 3
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v3(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v3
+
+; CHECK-LABEL: @v3
+; CHECK: vor 2, 4, 4
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v4(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v4
+
+; CHECK-LABEL: @v4
+; CHECK: vor 2, 5, 5
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v5(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v5
+
+; CHECK-LABEL: @v5
+; CHECK: vor 2, 6, 6
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v6(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v6
+
+; CHECK-LABEL: @v6
+; CHECK: vor 2, 7, 7
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v7(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v7
+
+; CHECK-LABEL: @v7
+; CHECK: vor 2, 8, 8
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v8(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v8
+
+; CHECK-LABEL: @v8
+; CHECK: vor 2, 9, 9
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v9(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v9
+
+; CHECK-LABEL: @v9
+; CHECK: vor 2, 10, 10
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v10(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v10
+
+; CHECK-LABEL: @v10
+; CHECK: vor 2, 11, 11
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v11(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v11
+
+; CHECK-LABEL: @v11
+; CHECK: vor 2, 12, 12
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v12(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v12
+
+; CHECK-LABEL: @v12
+; CHECK: vor 2, 13, 13
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v13(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v13
+
+; CHECK-LABEL: @v13
+; CHECK: addi [[REG1:[0-9]+]], 1, 96
+; CHECK-NEXT: lvx 2, 0, [[REG1]]
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v14(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v14
+
+; CHECK-LABEL: @v14
+; CHECK: addi [[REG1:[0-9]+]], 1, 128
+; CHECK-NEXT: lvx 2, 0, [[REG1]]
+; CHECK-NEXT: blr
+}
+
+define fastcc <4 x i32> @v15(i64 %g1, double %f1, <4 x i32> %v1, i64 %g2, double %f2, <4 x i32> %v2, i64 %g3, double %f3, <4 x i32> %v3, i64 %g4, double %f4, <4 x i32> %v4, i64 %g5, double %f5, <4 x i32> %v5, i64 %g6, double %f6, <4 x i32> %v6, i64 %g7, double %f7, <4 x i32> %v7, i64 %g8, double %f8, <4 x i32> %v8, i64 %g9, double %f9, <4 x i32> %v9, i64 %g10, double %f10, <4 x i32> %v10, i64 %g11, double %f11, <4 x i32> %v11, i64 %g12, double %f12, <4 x i32> %v12, i64 %g13, double %f13, <4 x i32> %v13, i64 %g14, double %f14, <4 x i32> %v14, i64 %g15, double %f15, <4 x i32> %v15, i64 %g16, double %f16, <4 x i32> %v16) #0 {
+ ret <4 x i32> %v15
+
+; CHECK-LABEL: @v15
+; CHECK: addi [[REG1:[0-9]+]], 1, 160
+; CHECK-NEXT: lvx 2, 0, [[REG1]]
+; CHECK-NEXT: blr
+}
+
+define void @cg1(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg1
+; CHECK-NOT: {{^[ \t]*}}mr 3,
+; CHECK: blr
+}
+
+define void @cg2(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg2
+; CHECK: mr 4, 3
+; CHECK: blr
+}
+
+define void @cg3(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg3
+; CHECK: mr 5, 3
+; CHECK: blr
+}
+
+define void @cg4(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg4
+; CHECK: mr 6, 3
+; CHECK: blr
+}
+
+define void @cg5(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg5
+; CHECK: mr 7, 3
+; CHECK: blr
+}
+
+define void @cg6(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg6
+; CHECK: mr 8, 3
+; CHECK: blr
+}
+
+define void @cg7(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg7
+; CHECK: mr 9, 3
+; CHECK: blr
+}
+
+define void @cg8(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg8
+; CHECK: mr 10, 3
+; CHECK: blr
+}
+
+define void @cg9(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg9
+; CHECK: mr [[REG1:[0-9]+]], 3
+; CHECK: std [[REG1]], 48(1)
+; CHECK: blr
+}
+
+define void @cg10(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg10
+; CHECK: mr [[REG1:[0-9]+]], 3
+; CHECK: std [[REG1]], 56(1)
+; CHECK: blr
+}
+
+define void @cg11(i64 %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 %v, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cg11
+; CHECK: mr [[REG1:[0-9]+]], 3
+; CHECK: std [[REG1]], 64(1)
+; CHECK: blr
+}
+
+define void @cf1(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf1
+; CHECK-NOT: fmr 1,
+; CHECK: blr
+}
+
+define void @cf2(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf2
+; CHECK: fmr 2, 1
+; CHECK: blr
+}
+
+define void @cf3(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf3
+; CHECK: fmr 3, 1
+; CHECK: blr
+}
+
+define void @cf4(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf4
+; CHECK: fmr 4, 1
+; CHECK: blr
+}
+
+define void @cf5(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf5
+; CHECK: fmr 5, 1
+; CHECK: blr
+}
+
+define void @cf14(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf14
+; CHECK: stfd 1, 120(1)
+; CHECK: blr
+}
+
+define void @cf15(double %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double %v, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cf15
+; CHECK: stfd 1, 152(1)
+; CHECK: blr
+}
+
+define void @cv2(<4 x i32> %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> %v, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cv2
+; CHECK: vor 3, 2, 2
+; CHECK: blr
+}
+
+define void @cv3(<4 x i32> %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> %v, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cv3
+; CHECK: vor 4, 2, 2
+; CHECK: blr
+}
+
+define void @cv13(<4 x i32> %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> %v, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cv13
+; CHECK: li [[REG1:[0-9]+]], 96
+; CHECK: stvx 2, 1, [[REG1]]
+; CHECK: blr
+}
+
+define void @cv14(<4 x i32> %v) #0 {
+ tail call fastcc i64 @g1(i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> %v, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, i64 0, double 0.0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>)
+ ret void
+
+; CHECK-LABEL: @cv14
+; CHECK: li [[REG1:[0-9]+]], 128
+; CHECK: stvx 2, 1, [[REG1]]
+; CHECK: blr
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/ppc64-func-desc-hoist.ll b/test/CodeGen/PowerPC/ppc64-func-desc-hoist.ll
new file mode 100644
index 0000000..57577f9
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-func-desc-hoist.ll
@@ -0,0 +1,47 @@
+; RUN: llc -mcpu=a2 < %s | FileCheck %s -check-prefix=INVFUNCDESC
+; RUN: llc -mcpu=a2 -mattr=-invariant-function-descriptors < %s | FileCheck %s -check-prefix=NONINVFUNCDESC
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @bar(void (...)* nocapture %x) #0 {
+entry:
+ %callee.knr.cast = bitcast void (...)* %x to void ()*
+ br label %for.body
+
+; INVFUNCDESC-LABEL: @bar
+; INVFUNCDESC-DAG: ld [[REG1:[0-9]+]], 8(3)
+; INVFUNCDESC-DAG: ld [[REG2:[0-9]+]], 16(3)
+; INVFUNCDESC-DAG: ld [[REG3:[0-9]+]], 0(3)
+
+; INVFUNCDESC: %for.body
+; INVFUNCDESC: std 2, 40(1)
+; INVFUNCDESC-DAG: mtctr [[REG3]]
+; INVFUNCDESC-DAG: mr 11, [[REG2]]
+; INVFUNCDESC-DAG: mr 2, [[REG1]]
+; INVFUNCDESC: bctrl
+; INVFUNCDESC-NEXT: ld 2, 40(1)
+
+; NONINVFUNCDESC-LABEL: @bar
+; NONINVFUNCDESC: %for.body
+; NONINVFUNCDESC: std 2, 40(1)
+; NONINVFUNCDESC-DAG: ld 3, 0(30)
+; NONINVFUNCDESC-DAG: ld 11, 16(30)
+; NONINVFUNCDESC-DAG: ld 2, 8(30)
+; NONINVFUNCDESC: mtctr 3
+; NONINVFUNCDESC: bctrl
+; NONINVFUNCDESC-NEXT: ld 2, 40(1)
+
+for.body: ; preds = %for.body, %entry
+ %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ tail call void %callee.knr.cast() #0
+ %inc = add nuw nsw i32 %i.02, 1
+ %exitcond = icmp eq i32 %inc, 1600000000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/ppc64-gep-opt.ll b/test/CodeGen/PowerPC/ppc64-gep-opt.ll
new file mode 100644
index 0000000..14cf9a7
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-gep-opt.ll
@@ -0,0 +1,157 @@
+; RUN: llc -O3 -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -O3 -print-after=codegenprepare -mcpu=ppc64 < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-NoAA <%t %s
+; RUN: llc -O3 -print-after=codegenprepare -mcpu=pwr7 < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-UseAA <%t %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Following test cases test enabling SeparateConstOffsetFromGEP pass in the PPC
+; backend. If useAA() returns true, it will lower a GEP with multiple indices
+; into GEPs with a single index, otherwise it will lower it into a
+; "ptrtoint+arithmetics+inttoptr" form.
+
+%struct = type { i32, i32, i32, i32, [20 x i32] }
+
+; Check that when two complex GEPs are used in two basic blocks, LLVM can
+; elimilate the common subexpression for the second use.
+define void @test_GEP_CSE([240 x %struct]* %string, i32* %adj, i32 %lib, i64 %idxprom) {
+ %liberties = getelementptr [240 x %struct]* %string, i64 1, i64 %idxprom, i32 3
+ %1 = load i32* %liberties, align 4
+ %cmp = icmp eq i32 %1, %lib
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ %origin = getelementptr [240 x %struct]* %string, i64 1, i64 %idxprom, i32 2
+ %2 = load i32* %origin, align 4
+ store i32 %2, i32* %adj, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+; CHECK-NoAA-LABEL: @test_GEP_CSE(
+; CHECK-NoAA: [[PTR0:%[a-zA-Z0-9]+]] = ptrtoint [240 x %struct]* %string to i64
+; CHECK-NoAA: [[PTR1:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
+; CHECK-NoAA: [[PTR2:%[a-zA-Z0-9]+]] = add i64 [[PTR0]], [[PTR1]]
+; CHECK-NoAA: add i64 [[PTR2]], 23052
+; CHECK-NoAA: inttoptr
+; CHECK-NoAA: if.then:
+; CHECK-NoAA-NOT: ptrtoint
+; CHECK-NoAA-NOT: mul
+; CHECK-NoAA: add i64 [[PTR2]], 23048
+; CHECK-NoAA: inttoptr
+
+; CHECK-UseAA-LABEL: @test_GEP_CSE(
+; CHECK-UseAA: [[PTR0:%[a-zA-Z0-9]+]] = bitcast [240 x %struct]* %string to i8*
+; CHECK-UseAA: [[IDX:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
+; CHECK-UseAA: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8* [[PTR0]], i64 [[IDX]]
+; CHECK-UseAA: getelementptr i8* [[PTR1]], i64 23052
+; CHECK-UseAA: bitcast
+; CHECK-UseAA: if.then:
+; CHECK-UseAA: getelementptr i8* [[PTR1]], i64 23048
+; CHECK-UseAA: bitcast
+
+%class.my = type { i32, [128 x i32], i32, [256 x %struct.pt]}
+%struct.pt = type { %struct.point*, i32, i32 }
+%struct.point = type { i32, i32 }
+
+; Check when a GEP is used across two basic block, LLVM can sink the address
+; calculation and code gen can generate a better addressing mode for the second
+; use.
+define void @test_GEP_across_BB(%class.my* %this, i64 %idx) {
+ %1 = getelementptr %class.my* %this, i64 0, i32 3, i64 %idx, i32 1
+ %2 = load i32* %1, align 4
+ %3 = getelementptr %class.my* %this, i64 0, i32 3, i64 %idx, i32 2
+ %4 = load i32* %3, align 4
+ %5 = icmp eq i32 %2, %4
+ br i1 %5, label %if.true, label %exit
+
+if.true:
+ %6 = shl i32 %4, 1
+ store i32 %6, i32* %3, align 4
+ br label %exit
+
+exit:
+ %7 = add nsw i32 %4, 1
+ store i32 %7, i32* %1, align 4
+ ret void
+}
+; CHECK-LABEL: test_GEP_across_BB:
+; CHECK-NOT: lwzu
+; CHECK: blr
+
+; CHECK-NoAA-LABEL: test_GEP_across_BB(
+; CHECK-NoAA: add i64 [[TMP:%[a-zA-Z0-9]+]], 528
+; CHECK-NoAA: add i64 [[TMP]], 532
+; CHECK-NoAA: if.true:
+; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = add i64 [[TMP]], 532
+; CHECK-NoAA: exit:
+; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = add i64 [[TMP]], 528
+
+; CHECK-UseAA-LABEL: test_GEP_across_BB(
+; CHECK-UseAA: [[PTR0:%[a-zA-Z0-9]+]] = getelementptr
+; CHECK-UseAA: getelementptr i8* [[PTR0]], i64 528
+; CHECK-UseAA: getelementptr i8* [[PTR0]], i64 532
+; CHECK-UseAA: if.true:
+; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8* [[PTR0]], i64 532
+; CHECK-UseAA: exit:
+; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8* [[PTR0]], i64 528
+
+%struct.S = type { float, double }
+@struct_array = global [1024 x %struct.S] zeroinitializer, align 16
+
+; The following two test cases check we can extract constant from indices of
+; struct type.
+; The constant offsets are from indices "i64 %idxprom" and "i32 1". As the
+; alloca size of %struct.S is 16, and "i32 1" is the 2rd element whose field
+; offset is 8, the total constant offset is (5 * 16 + 8) = 88.
+define double* @test-struct_1(i32 %i) {
+entry:
+ %add = add nsw i32 %i, 5
+ %idxprom = sext i32 %add to i64
+ %p = getelementptr [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
+ ret double* %p
+}
+; CHECK-NoAA-LABEL: @test-struct_1(
+; CHECK-NoAA-NOT: getelementptr
+; CHECK-NoAA: add i64 %{{[a-zA-Z0-9]+}}, 88
+
+; CHECK-UseAA-LABEL: @test-struct_1(
+; CHECK-UseAA: getelementptr i8* %{{[a-zA-Z0-9]+}}, i64 88
+
+%struct3 = type { i64, i32 }
+%struct2 = type { %struct3, i32 }
+%struct1 = type { i64, %struct2 }
+%struct0 = type { i32, i32, i64*, [100 x %struct1] }
+
+; The constant offsets are from indices "i32 3", "i64 %arrayidx" and "i32 1".
+; "i32 3" is the 4th element whose field offset is 16. The alloca size of
+; %struct1 is 32. "i32 1" is the 2rd element whose field offset is 8. So the
+; total constant offset is 16 + (-2 * 32) + 8 = -40
+define %struct2* @test-struct_2(%struct0* %ptr, i64 %idx) {
+entry:
+ %arrayidx = add nsw i64 %idx, -2
+ %ptr2 = getelementptr %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
+ ret %struct2* %ptr2
+}
+; CHECK-NoAA-LABEL: @test-struct_2(
+; CHECK-NoAA-NOT: = getelementptr
+; CHECK-NoAA: add i64 %{{[a-zA-Z0-9]+}}, -40
+
+; CHECK-UseAA-LABEL: @test-struct_2(
+; CHECK-UseAA: getelementptr i8* %{{[a-zA-Z0-9]+}}, i64 -40
+
+; Test that when a index is added from two constant, SeparateConstOffsetFromGEP
+; pass does not generate incorrect result.
+define void @test_const_add([3 x i32]* %in) {
+ %inc = add nsw i32 2, 1
+ %idxprom = sext i32 %inc to i64
+ %arrayidx = getelementptr [3 x i32]* %in, i64 %idxprom, i64 2
+ store i32 0, i32* %arrayidx, align 4
+ ret void
+}
+; CHECK-LABEL: test_const_add:
+; CHECK: li [[REG:[0-9]+]], 0
+; CHECK: stw [[REG]], 44(3)
+; CHECK: blr
+
diff --git a/test/CodeGen/PowerPC/ppc64-icbt-pwr7.ll b/test/CodeGen/PowerPC/ppc64-icbt-pwr7.ll
new file mode 100644
index 0000000..e8617cc
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-icbt-pwr7.ll
@@ -0,0 +1,19 @@
+; Test the ICBT instruction is not emitted on POWER7
+; Based on the ppc64-prefetch.ll test
+; RUN: not llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 < %s 2>&1 | FileCheck %s
+
+declare void @llvm.prefetch(i8*, i32, i32, i32)
+
+define void @test(i8* %a, ...) nounwind {
+entry:
+ call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 0)
+ ret void
+
+; FIXME: Crashing is not really the correct behavior here, we really should just emit nothing
+; CHECK: Cannot select: 0x{{[0-9,a-f]+}}: ch = Prefetch
+; CHECK: 0x{{[0-9,a-f]+}}: i32 = Constant<0>
+; CHECK-NEXT: 0x{{[0-9,a-f]+}}: i32 = Constant<3>
+; CHECK-NEXT: 0x{{[0-9,a-f]+}}: i32 = Constant<0>
+
+}
+
diff --git a/test/CodeGen/PowerPC/ppc64-icbt-pwr8.ll b/test/CodeGen/PowerPC/ppc64-icbt-pwr8.ll
new file mode 100644
index 0000000..a0f084a
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-icbt-pwr8.ll
@@ -0,0 +1,16 @@
+; Test the ICBT instruction on POWER8
+; Copied from the ppc64-prefetch.ll test
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+
+declare void @llvm.prefetch(i8*, i32, i32, i32)
+
+define void @test(i8* %a, ...) nounwind {
+entry:
+ call void @llvm.prefetch(i8* %a, i32 0, i32 3, i32 0)
+ ret void
+
+; CHECK-LABEL: @test
+; CHECK: icbt
+}
+
+
diff --git a/test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll b/test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll
new file mode 100644
index 0000000..b1d3f39
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-nonfunc-calls.ll
@@ -0,0 +1,69 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%struct.cd = type { i64, i64, i64 }
+
+@something = global [33 x i8] c"this is not really code, but...\0A\00", align 1
+@tls_something = thread_local global %struct.cd zeroinitializer, align 8
+@extern_something = external global %struct.cd
+
+; Function Attrs: nounwind
+define void @foo() #0 {
+entry:
+ tail call void bitcast ([33 x i8]* @something to void ()*)() #0
+ ret void
+
+; CHECK-LABEL: @foo
+; CHECK-DAG: addis [[REG1:[0-9]+]], 2, something@toc@ha
+; CHECK-DAG: std 2, 40(1)
+; CHECK-DAG: addi [[REG3:[0-9]+]], [[REG1]], something@toc@l
+; CHECK-DAG: ld [[REG2:[0-9]+]], 0([[REG3]])
+; CHECK-DAG: ld 11, 16([[REG3]])
+; CHECK-DAG: ld 2, 8([[REG3]])
+; CHECK-DAG: mtctr [[REG2]]
+; CHECK: bctrl
+; CHECK: ld 2, 40(1)
+; CHECK: blr
+}
+
+; Function Attrs: nounwind
+define void @bar() #0 {
+entry:
+ tail call void bitcast (%struct.cd* @tls_something to void ()*)() #0
+ ret void
+
+; CHECK-LABEL: @bar
+; CHECK-DAG: addis [[REG1:[0-9]+]], 13, tls_something@tprel@ha
+; CHECK-DAG: std 2, 40(1)
+; CHECK-DAG: addi [[REG3:[0-9]+]], [[REG1]], tls_something@tprel@l
+; CHECK-DAG: ld [[REG2:[0-9]+]], 0([[REG3]])
+; CHECK-DAG: ld 11, 16([[REG3]])
+; CHECK-DAG: ld 2, 8([[REG3]])
+; CHECK-DAG: mtctr [[REG2]]
+; CHECK: bctrl
+; CHECK: ld 2, 40(1)
+; CHECK: blr
+}
+
+; Function Attrs: nounwind
+define void @ext() #0 {
+entry:
+ tail call void bitcast (%struct.cd* @extern_something to void ()*)() #0
+ ret void
+
+; CHECK-LABEL: @ext
+; CHECK-DAG: addis [[REG1:[0-9]+]], 2, [[NAME:[._A-Za-z0-9]+]]@toc@ha
+; CHECK-DAG: std 2, 40(1)
+; CHECK-DAG: ld [[REG3:[0-9]+]], [[NAME]]@toc@l(3)
+; CHECK-DAG: ld [[REG2:[0-9]+]], 0([[REG3]])
+; CHECK-DAG: ld 11, 16([[REG3]])
+; CHECK-DAG: ld 2, 8([[REG3]])
+; CHECK-DAG: mtctr [[REG2]]
+; CHECK: bctrl
+; CHECK: ld 2, 40(1)
+; CHECK: blr
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/ppc64-patchpoint.ll b/test/CodeGen/PowerPC/ppc64-patchpoint.ll
new file mode 100644
index 0000000..6580eff
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-patchpoint.ll
@@ -0,0 +1,97 @@
+; RUN: llc < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
+; RUN: llc -fast-isel -fast-isel-abort < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -fast-isel -fast-isel-abort < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
+
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Trivial patchpoint codegen
+;
+define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
+entry:
+; CHECK-LABEL: trivial_patchpoint_codegen:
+
+; CHECK: li 12, -8531
+; CHECK-NEXT: rldic 12, 12, 32, 16
+; CHECK-NEXT: oris 12, 12, 48879
+; CHECK-NEXT: ori 12, 12, 51966
+; CHECK-NEXT: mtctr 12
+; CHECK-NEXT: bctrl
+
+; CHECK: li 12, -8531
+; CHECK-NEXT: rldic 12, 12, 32, 16
+; CHECK-NEXT: oris 12, 12, 48879
+; CHECK-NEXT: ori 12, 12, 51967
+; CHECK-NEXT: mtctr 12
+; CHECK-NEXT: bctrl
+
+; CHECK: blr
+
+ %resolveCall2 = inttoptr i64 244837814094590 to i8*
+ %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 2, i32 24, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
+ %resolveCall3 = inttoptr i64 244837814094591 to i8*
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 3, i32 24, i8* %resolveCall3, i32 2, i64 %p1, i64 %result)
+ ret i64 %result
+}
+
+; Caller frame metadata with stackmaps. This should not be optimized
+; as a leaf function.
+;
+; CHECK-LABEL: caller_meta_leaf
+; CHECK-BE: stdu 1, -80(1)
+; CHECK-LE: stdu 1, -64(1)
+; CHECK: Ltmp
+; CHECK-BE: addi 1, 1, 80
+; CHECK-LE: addi 1, 1, 64
+; CHECK: blr
+
+define void @caller_meta_leaf() {
+entry:
+ %metadata = alloca i64, i32 3, align 8
+ store i64 11, i64* %metadata
+ store i64 12, i64* %metadata
+ store i64 13, i64* %metadata
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
+ ret void
+}
+
+; Test patchpoints reusing the same TargetConstant.
+; <rdar:15390785> Assertion failed: (CI.getNumArgOperands() >= NumArgs + 4)
+; There is no way to verify this, since it depends on memory allocation.
+; But I think it's useful to include as a working example.
+define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, i64* %tmp33, i64 %tmp79) {
+entry:
+ %tmp80 = add i64 %tmp79, -16
+ %tmp81 = inttoptr i64 %tmp80 to i64*
+ %tmp82 = load i64* %tmp81, align 8
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 15, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
+ %tmp83 = load i64* %tmp33, align 8
+ %tmp84 = add i64 %tmp83, -24
+ %tmp85 = inttoptr i64 %tmp84 to i64*
+ %tmp86 = load i64* %tmp85, align 8
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86)
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 18, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
+ ret i64 10
+}
+
+; Test small patchpoints that don't emit calls.
+define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
+entry:
+; CHECK-LABEL: small_patchpoint_codegen:
+; CHECK: Ltmp
+; CHECK: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NOT: nop
+; CHECK: blr
+ %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2)
+ ret void
+}
+
+declare void @llvm.experimental.stackmap(i64, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
+
diff --git a/test/CodeGen/PowerPC/ppc64-r2-alloc.ll b/test/CodeGen/PowerPC/ppc64-r2-alloc.ll
new file mode 100644
index 0000000..87292d8
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-r2-alloc.ll
@@ -0,0 +1,81 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define signext i32 @foo(i32 signext %a, i32 signext %d) #0 {
+entry:
+ %div = sdiv i32 %a, %d
+ %div1 = sdiv i32 %div, %d
+ %div2 = sdiv i32 %div1, %d
+ %div3 = sdiv i32 %div2, %d
+ %div4 = sdiv i32 %div3, %d
+ %div5 = sdiv i32 %div4, %d
+ %div6 = sdiv i32 %div5, %d
+ %div7 = sdiv i32 %div6, %d
+ %div8 = sdiv i32 %div7, %d
+ %div9 = sdiv i32 %div8, %d
+ %div10 = sdiv i32 %div9, %d
+ %div11 = sdiv i32 %div10, %d
+ %div12 = sdiv i32 %div11, %d
+ %div13 = sdiv i32 %div12, %d
+ %div14 = sdiv i32 %div13, %d
+ %div15 = sdiv i32 %div14, %d
+ %div16 = sdiv i32 %div15, %d
+ %div17 = sdiv i32 %div16, %d
+ %div18 = sdiv i32 %div17, %d
+ %div19 = sdiv i32 %div18, %d
+ %div20 = sdiv i32 %div19, %d
+ %div21 = sdiv i32 %div20, %d
+ %div22 = sdiv i32 %div21, %d
+ %div23 = sdiv i32 %div22, %d
+ %div24 = sdiv i32 %div23, %d
+ %div25 = sdiv i32 %div24, %d
+ %div26 = sdiv i32 %div25, %d
+ %div27 = sdiv i32 %div26, %d
+ %div28 = sdiv i32 %div27, %d
+ %div29 = sdiv i32 %div28, %d
+ %div30 = sdiv i32 %div29, %d
+ %div31 = sdiv i32 %div30, %d
+ %div32 = sdiv i32 %div31, %d
+ %div33 = sdiv i32 %div32, %div31
+ %div34 = sdiv i32 %div33, %div30
+ %div35 = sdiv i32 %div34, %div29
+ %div36 = sdiv i32 %div35, %div28
+ %div37 = sdiv i32 %div36, %div27
+ %div38 = sdiv i32 %div37, %div26
+ %div39 = sdiv i32 %div38, %div25
+ %div40 = sdiv i32 %div39, %div24
+ %div41 = sdiv i32 %div40, %div23
+ %div42 = sdiv i32 %div41, %div22
+ %div43 = sdiv i32 %div42, %div21
+ %div44 = sdiv i32 %div43, %div20
+ %div45 = sdiv i32 %div44, %div19
+ %div46 = sdiv i32 %div45, %div18
+ %div47 = sdiv i32 %div46, %div17
+ %div48 = sdiv i32 %div47, %div16
+ %div49 = sdiv i32 %div48, %div15
+ %div50 = sdiv i32 %div49, %div14
+ %div51 = sdiv i32 %div50, %div13
+ %div52 = sdiv i32 %div51, %div12
+ %div53 = sdiv i32 %div52, %div11
+ %div54 = sdiv i32 %div53, %div10
+ %div55 = sdiv i32 %div54, %div9
+ %div56 = sdiv i32 %div55, %div8
+ %div57 = sdiv i32 %div56, %div7
+ %div58 = sdiv i32 %div57, %div6
+ %div59 = sdiv i32 %div58, %div5
+ %div60 = sdiv i32 %div59, %div4
+ %div61 = sdiv i32 %div60, %div3
+ %div62 = sdiv i32 %div61, %div2
+ %div63 = sdiv i32 %div62, %div1
+ %div64 = sdiv i32 %div63, %div
+ ret i32 %div64
+}
+
+; This function will need to use all non-reserved GPRs (and then some), make
+; sure that r2 is among them.
+; CHECK-LABEL: @foo
+; CHECK: std 2,
+; CHECK: ld 2,
+; CHECK: blr
+
diff --git a/test/CodeGen/PowerPC/ppc64-stackmap-nops.ll b/test/CodeGen/PowerPC/ppc64-stackmap-nops.ll
new file mode 100644
index 0000000..368ddc5
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-stackmap-nops.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-gnu-linux | FileCheck %s
+
+define void @test_shadow_optimization() {
+entry:
+; Expect 12 bytes worth of nops here rather than 32: With the shadow optimization
+; in place, 20 bytes will be consumed by the frame teardown and return instr.
+; CHECK-LABEL: test_shadow_optimization:
+
+; CHECK: nop
+; CHECK-NEXT: nop
+; CHECK-NEXT: nop
+; CHECK-NOT: nop
+; CHECK: addi 1, 1, 64
+; CHECK: ld [[REG1:[0-9]+]], 16(1)
+; CHECK: ld 31, -8(1)
+; CHECK: mtlr [[REG1]]
+; CHECK: blr
+
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 0, i32 32)
+ ret void
+}
+
+declare void @llvm.experimental.stackmap(i64, i32, ...)
+
diff --git a/test/CodeGen/PowerPC/ppc64-stackmap.ll b/test/CodeGen/PowerPC/ppc64-stackmap.ll
new file mode 100644
index 0000000..714d363
--- /dev/null
+++ b/test/CodeGen/PowerPC/ppc64-stackmap.ll
@@ -0,0 +1,289 @@
+; RUN: llc < %s | FileCheck %s
+;
+; Note: Print verbose stackmaps using -debug-only=stackmaps.
+
+; We are not getting the correct stack alignment when cross compiling for arm64.
+; So specify a datalayout here.
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; CHECK-LABEL: .section .llvm_stackmaps
+; CHECK-NEXT: __LLVM_StackMaps:
+; Header
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 0
+; Num Functions
+; CHECK-NEXT: .long 11
+; Num LargeConstants
+; CHECK-NEXT: .long 2
+; Num Callsites
+; CHECK-NEXT: .long 11
+
+; Functions and stack size
+; CHECK-NEXT: .quad constantargs
+; CHECK-NEXT: .quad 128
+; CHECK-NEXT: .quad osrinline
+; CHECK-NEXT: .quad 144
+; CHECK-NEXT: .quad osrcold
+; CHECK-NEXT: .quad 128
+; CHECK-NEXT: .quad propertyRead
+; CHECK-NEXT: .quad 128
+; CHECK-NEXT: .quad propertyWrite
+; CHECK-NEXT: .quad 128
+; CHECK-NEXT: .quad jsVoidCall
+; CHECK-NEXT: .quad 128
+; CHECK-NEXT: .quad jsIntCall
+; CHECK-NEXT: .quad 128
+; CHECK-NEXT: .quad spilledValue
+; CHECK-NEXT: .quad 304
+; CHECK-NEXT: .quad spilledStackMapValue
+; CHECK-NEXT: .quad 224
+; CHECK-NEXT: .quad liveConstant
+; CHECK-NEXT: .quad 64
+; CHECK-NEXT: .quad clobberLR
+; CHECK-NEXT: .quad 208
+
+; Num LargeConstants
+; CHECK-NEXT: .quad 4294967295
+; CHECK-NEXT: .quad 4294967296
+
+; Constant arguments
+;
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .long .L{{.*}}-.L.constantargs
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 4
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 65535
+; SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 65536
+; SmallConstant
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+; LargeConstant at index 0
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 1
+
+define void @constantargs() {
+entry:
+ %0 = inttoptr i64 244837814094590 to i8*
+ tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 1, i32 24, i8* %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296)
+ ret void
+}
+
+; Inline OSR Exit
+;
+; CHECK-LABEL: .long .L{{.*}}-.L.osrinline
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define void @osrinline(i64 %a, i64 %b) {
+entry:
+ ; Runtime void->void call.
+ call void inttoptr (i64 244837814094590 to void ()*)()
+ ; Followed by inline OSR patchpoint with 12-byte shadow and 2 live vars.
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b)
+ ret void
+}
+
+; Cold OSR Exit
+;
+; 2 live variables in register.
+;
+; CHECK-LABEL: .long .L{{.*}}-.L.osrcold
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define void @osrcold(i64 %a, i64 %b) {
+entry:
+ %test = icmp slt i64 %a, %b
+ br i1 %test, label %ret, label %cold
+cold:
+ ; OSR patchpoint with 12-byte nop-slide and 2 live vars.
+ %thunk = inttoptr i64 244837814094590 to i8*
+ call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 4, i32 24, i8* %thunk, i32 0, i64 %a, i64 %b)
+ unreachable
+ret:
+ ret void
+}
+
+; Property Read
+; CHECK-LABEL: .long .L{{.*}}-.L.propertyRead
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 0
+;
+; FIXME: There are currently no stackmap entries. After moving to
+; AnyRegCC, we will have entries for the object and return value.
+define i64 @propertyRead(i64* %obj) {
+entry:
+ %resolveRead = inttoptr i64 244837814094590 to i8*
+ %result = call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 24, i8* %resolveRead, i32 1, i64* %obj)
+ %add = add i64 %result, 3
+ ret i64 %add
+}
+
+; Property Write
+; CHECK-LABEL: .long .L{{.*}}-.L.propertyWrite
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define void @propertyWrite(i64 %dummy1, i64* %obj, i64 %dummy2, i64 %a) {
+entry:
+ %resolveWrite = inttoptr i64 244837814094590 to i8*
+ call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 6, i32 24, i8* %resolveWrite, i32 2, i64* %obj, i64 %a)
+ ret void
+}
+
+; Void JS Call
+;
+; 2 live variables in registers.
+;
+; CHECK-LABEL: .long .L{{.*}}-.L.jsVoidCall
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define void @jsVoidCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+entry:
+ %resolveCall = inttoptr i64 244837814094590 to i8*
+ call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 7, i32 24, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ ret void
+}
+
+; i64 JS Call
+;
+; 2 live variables in registers.
+;
+; CHECK-LABEL: .long .L{{.*}}-.L.jsIntCall
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .long 0
+define i64 @jsIntCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) {
+entry:
+ %resolveCall = inttoptr i64 244837814094590 to i8*
+ %result = call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 8, i32 24, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2)
+ %add = add i64 %result, 3
+ ret i64 %add
+}
+
+; Spilled stack map values.
+;
+; Verify 28 stack map entries.
+;
+; CHECK-LABEL: .long .L{{.*}}-.L.spilledValue
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 28
+;
+; Check that at least one is a spilled entry from r31.
+; Location: Indirect FP + ...
+; CHECK: .byte 3
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 31
+define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) {
+entry:
+ call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 11, i32 24, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27)
+ ret void
+}
+
+; Spilled stack map values.
+;
+; Verify 30 stack map entries.
+;
+; CHECK-LABEL: .long .L{{.*}}-.L.spilledStackMapValue
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .short 30
+;
+; Check that at least one is a spilled entry from r31.
+; Location: Indirect FP + ...
+; CHECK: .byte 3
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 31
+define webkit_jscc void @spilledStackMapValue(i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27, i64 %l28, i64 %l29) {
+entry:
+ call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 12, i32 16, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27, i64 %l28, i64 %l29)
+ ret void
+}
+
+
+; Map a constant value.
+;
+; CHECK-LABEL: .long .L{{.*}}-.L.liveConstant
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: SmallConstant
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 33
+
+define void @liveConstant() {
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 15, i32 8, i32 33)
+ ret void
+}
+
+; Map a value when LR is the only free register.
+;
+; CHECK-LABEL: .long .L{{.*}}-.L.clobberLR
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Indirect FP (r31) - offset
+; CHECK-NEXT: .byte 3
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .short 31
+; CHECK-NEXT: .long {{[0-9]+}}
+define void @clobberLR(i32 %a) {
+ tail call void asm sideeffect "nop", "~{r0},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r14},~{r15},~{r16},~{r17},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31}"() nounwind
+ tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 16, i32 8, i32 %a)
+ ret void
+}
+
+declare void @llvm.experimental.stackmap(i64, i32, ...)
+declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
diff --git a/test/CodeGen/PowerPC/ppc64-vaarg-int.ll b/test/CodeGen/PowerPC/ppc64-vaarg-int.ll
index 5a63b01..c9a4f91 100644
--- a/test/CodeGen/PowerPC/ppc64-vaarg-int.ll
+++ b/test/CodeGen/PowerPC/ppc64-vaarg-int.ll
@@ -16,5 +16,5 @@ declare void @llvm.va_start(i8*) nounwind
; CHECK: @intvaarg
; Make sure that the va pointer is incremented by 8 (not 4).
-; CHECK: addi{{.*}}, 8
+; CHECK: addi{{.*}}, 1, 64
diff --git a/test/CodeGen/PowerPC/ppc64le-aggregates.ll b/test/CodeGen/PowerPC/ppc64le-aggregates.ll
index 9eed623..3fce36e 100644
--- a/test/CodeGen/PowerPC/ppc64le-aggregates.ll
+++ b/test/CodeGen/PowerPC/ppc64le-aggregates.ll
@@ -1,4 +1,11 @@
-; RUN: llc < %s -march=ppc64le -mcpu=pwr8 -mattr=+altivec | FileCheck %s
+; RUN: llc < %s -march=ppc64le -mcpu=pwr8 -mattr=+altivec -mattr=-vsx | FileCheck %s
+; RUN: llc < %s -march=ppc64le -mattr=+altivec -mattr=-vsx | FileCheck %s
+
+; Currently VSX support is disabled for this test because we generate lxsdx
+; instead of lfd, and stxsdx instead of stfd. That is a poor choice when we
+; have reg+imm addressing, and is on the list of things to be fixed.
+; The second run step is to ensure that -march=ppc64le is adequate to select
+; the same feature set as with -mcpu=pwr8 since that is the baseline for ppc64le.
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
@@ -257,26 +264,26 @@ entry:
ret void
}
; CHECK-LABEL: @caller2
-; CHECK: ld [[REG:[0-9]+]], .LC
-; CHECK-DAG: lfs 1, 0([[REG]])
-; CHECK-DAG: lfs 2, 4([[REG]])
-; CHECK-DAG: lfs 3, 8([[REG]])
-; CHECK-DAG: lfs 4, 12([[REG]])
-; CHECK-DAG: lfs 5, 16([[REG]])
-; CHECK-DAG: lfs 6, 20([[REG]])
-; CHECK-DAG: lfs 7, 24([[REG]])
-; CHECK-DAG: lfs 8, 28([[REG]])
-; CHECK: ld [[REG:[0-9]+]], .LC
-; CHECK-DAG: lfs 9, 0([[REG]])
-; CHECK-DAG: lfs 10, 4([[REG]])
-; CHECK-DAG: lfs 11, 8([[REG]])
-; CHECK-DAG: lfs 12, 12([[REG]])
-; CHECK-DAG: lfs 13, 16([[REG]])
-; CHECK: ld [[REG:[0-9]+]], .LC
-; CHECK-DAG: lwz [[REG0:[0-9]+]], 0([[REG]])
-; CHECK-DAG: lwz [[REG1:[0-9]+]], 4([[REG]])
-; CHECK-DAG: sldi [[REG1]], [[REG1]], 32
-; CHECK-DAG: or 10, [[REG0]], [[REG1]]
+; CHECK: ld {{[0-9]+}}, .LC
+; CHECK-DAG: lfs 1, 0({{[0-9]+}})
+; CHECK-DAG: lfs 2, 4({{[0-9]+}})
+; CHECK-DAG: lfs 3, 8({{[0-9]+}})
+; CHECK-DAG: lfs 4, 12({{[0-9]+}})
+; CHECK-DAG: lfs 5, 16({{[0-9]+}})
+; CHECK-DAG: lfs 6, 20({{[0-9]+}})
+; CHECK-DAG: lfs 7, 24({{[0-9]+}})
+; CHECK-DAG: lfs 8, 28({{[0-9]+}})
+
+; CHECK-DAG: lfs 9, 0({{[0-9]+}})
+; CHECK-DAG: lfs 10, 4({{[0-9]+}})
+; CHECK-DAG: lfs 11, 8({{[0-9]+}})
+; CHECK-DAG: lfs 12, 12({{[0-9]+}})
+; CHECK-DAG: lfs 13, 16({{[0-9]+}})
+
+; CHECK-DAG: lwz [[REG0:[0-9]+]], 0({{[0-9]+}})
+; CHECK-DAG: lwz [[REG1:[0-9]+]], 4({{[0-9]+}})
+; CHECK-DAG: sldi [[REG2:[0-9]+]], [[REG1]], 32
+; CHECK-DAG: or 10, [[REG0]], [[REG2]]
; CHECK: bl test2
declare void @test2([8 x float], [5 x float], [2 x float])
diff --git a/test/CodeGen/PowerPC/ppc64le-calls.ll b/test/CodeGen/PowerPC/ppc64le-calls.ll
index 0d667dd..b65b954 100644
--- a/test/CodeGen/PowerPC/ppc64le-calls.ll
+++ b/test/CodeGen/PowerPC/ppc64le-calls.ll
@@ -1,4 +1,8 @@
; RUN: llc -march=ppc64le -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -march=ppc64le < %s | FileCheck %s
+
+; The second run of the test case is to ensure the behaviour is the same
+; without specifying -mcpu=pwr8 as that is now the baseline for ppc64le.
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/ppc64le-localentry.ll b/test/CodeGen/PowerPC/ppc64le-localentry.ll
index 4676ce8..d9995de 100644
--- a/test/CodeGen/PowerPC/ppc64le-localentry.ll
+++ b/test/CodeGen/PowerPC/ppc64le-localentry.ll
@@ -1,5 +1,10 @@
; RUN: llc -march=ppc64le -mcpu=pwr8 < %s | FileCheck %s
; RUN: llc -march=ppc64le -mcpu=pwr8 -O0 < %s | FileCheck %s
+; RUN: llc -march=ppc64le < %s | FileCheck %s
+; RUN: llc -march=ppc64le -O0 < %s | FileCheck %s
+
+; The second run of the test case is to ensure the behaviour is the same
+; without specifying -mcpu=pwr8 as that is now the baseline for ppc64le.
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/ppcf128-endian.ll b/test/CodeGen/PowerPC/ppcf128-endian.ll
index 2a5f13a..180fedf 100644
--- a/test/CodeGen/PowerPC/ppcf128-endian.ll
+++ b/test/CodeGen/PowerPC/ppcf128-endian.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=pwr7 -mattr=+altivec < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 -mattr=+altivec -mattr=-vsx < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/pr17168.ll b/test/CodeGen/PowerPC/pr17168.ll
index c3f0162..62a9ede 100644
--- a/test/CodeGen/PowerPC/pr17168.ll
+++ b/test/CodeGen/PowerPC/pr17168.ll
@@ -25,7 +25,7 @@ for.cond968.preheader: ; preds = %for.cond968.prehead
for.end1042: ; preds = %for.cond968.preheader, %for.cond964.preheader, %entry
%0 = phi i32 [ undef, %for.cond964.preheader ], [ undef, %for.cond968.preheader ], [ undef, %entry ]
%1 = load i32* getelementptr inbounds ([3 x i32]* @grid_points, i64 0, i64 0), align 4, !dbg !443, !tbaa !444
- tail call void @llvm.dbg.value(metadata !447, i64 0, metadata !119, metadata !{metadata !"0x102"}), !dbg !448
+ tail call void @llvm.dbg.value(metadata i32 1, i64 0, metadata !119, metadata !{!"0x102"}), !dbg !448
%sub10454270 = add nsw i32 %0, -1, !dbg !448
%cmp10464271 = icmp sgt i32 %sub10454270, 1, !dbg !448
%sub11134263 = add nsw i32 %1, -1, !dbg !450
@@ -54,468 +54,468 @@ attributes #1 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!438, !464}
-!0 = metadata !{metadata !"0x11\0012\00clang version 3.4 (trunk 190311)\001\00\000\00\000", metadata !1, metadata !2, metadata !2, metadata !3, metadata !298, metadata !2} ; [ DW_TAG_compile_unit ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c] [DW_LANG_C99]
-!1 = metadata !{metadata !"bt.c", metadata !"/home/hfinkel/src/NPB2.3-omp-C/BT"}
-!2 = metadata !{}
-!3 = metadata !{metadata !4, metadata !82, metadata !102, metadata !114, metadata !132, metadata !145, metadata !154, metadata !155, metadata !162, metadata !183, metadata !200, metadata !201, metadata !207, metadata !208, metadata !215, metadata !221, metadata !230, metadata !238, metadata !246, metadata !255, metadata !260, metadata !261, metadata !268, metadata !274, metadata !279, metadata !280, metadata !287, metadata !293}
-!4 = metadata !{metadata !"0x2e\00main\00main\00\0074\000\001\000\006\00256\001\0074", metadata !1, metadata !5, metadata !6, null, null, null, null, metadata !12} ; [ DW_TAG_subprogram ] [line 74] [def] [main]
-!5 = metadata !{metadata !"0x29", metadata !1} ; [ DW_TAG_file_type ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!6 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!7 = metadata !{metadata !8, metadata !8, metadata !9}
-!8 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!9 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !10} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
-!10 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !11} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from char]
-!11 = metadata !{metadata !"0x24\00char\000\008\008\000\000\008", null, null} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_unsigned_char]
-!12 = metadata !{metadata !13, metadata !14, metadata !15, metadata !16, metadata !17, metadata !18, metadata !19, metadata !21, metadata !22, metadata !23, metadata !25, metadata !26}
-!13 = metadata !{metadata !"0x101\00argc\0016777290\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_arg_variable ] [argc] [line 74]
-!14 = metadata !{metadata !"0x101\00argv\0033554506\000", metadata !4, metadata !5, metadata !9} ; [ DW_TAG_arg_variable ] [argv] [line 74]
-!15 = metadata !{metadata !"0x100\00niter\0076\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [niter] [line 76]
-!16 = metadata !{metadata !"0x100\00step\0076\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [step] [line 76]
-!17 = metadata !{metadata !"0x100\00n3\0076\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [n3] [line 76]
-!18 = metadata !{metadata !"0x100\00nthreads\0077\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [nthreads] [line 77]
-!19 = metadata !{metadata !"0x100\00navg\0078\000", metadata !4, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [navg] [line 78]
-!20 = metadata !{metadata !"0x24\00double\000\0064\0064\000\000\004", null, null} ; [ DW_TAG_base_type ] [double] [line 0, size 64, align 64, offset 0, enc DW_ATE_float]
-!21 = metadata !{metadata !"0x100\00mflops\0078\000", metadata !4, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [mflops] [line 78]
-!22 = metadata !{metadata !"0x100\00tmax\0080\000", metadata !4, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [tmax] [line 80]
-!23 = metadata !{metadata !"0x100\00verified\0081\000", metadata !4, metadata !5, metadata !24} ; [ DW_TAG_auto_variable ] [verified] [line 81]
-!24 = metadata !{metadata !"0x16\00boolean\0012\000\000\000\000", metadata !1, null, metadata !8} ; [ DW_TAG_typedef ] [boolean] [line 12, size 0, align 0, offset 0] [from int]
-!25 = metadata !{metadata !"0x100\00class\0082\000", metadata !4, metadata !5, metadata !11} ; [ DW_TAG_auto_variable ] [class] [line 82]
-!26 = metadata !{metadata !"0x100\00fp\0083\000", metadata !4, metadata !5, metadata !27} ; [ DW_TAG_auto_variable ] [fp] [line 83]
-!27 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !28} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from FILE]
-!28 = metadata !{metadata !"0x16\00FILE\0049\000\000\000\000", metadata !1, null, metadata !29} ; [ DW_TAG_typedef ] [FILE] [line 49, size 0, align 0, offset 0] [from _IO_FILE]
-!29 = metadata !{metadata !"0x13\00_IO_FILE\00271\001728\0064\000\000\000", metadata !30, null, null, metadata !31, null, null, null} ; [ DW_TAG_structure_type ] [_IO_FILE] [line 271, size 1728, align 64, offset 0] [def] [from ]
-!30 = metadata !{metadata !"/usr/include/libio.h", metadata !"/home/hfinkel/src/NPB2.3-omp-C/BT"}
-!31 = metadata !{metadata !32, metadata !33, metadata !34, metadata !35, metadata !36, metadata !37, metadata !38, metadata !39, metadata !40, metadata !41, metadata !42, metadata !43, metadata !44, metadata !52, metadata !53, metadata !54, metadata !55, metadata !58, metadata !60, metadata !62, metadata !66, metadata !68, metadata !70, metadata !71, metadata !72, metadata !73, metadata !74, metadata !77, metadata !78}
-!32 = metadata !{metadata !"0xd\00_flags\00272\0032\0032\000\000", metadata !30, metadata !29, metadata !8} ; [ DW_TAG_member ] [_flags] [line 272, size 32, align 32, offset 0] [from int]
-!33 = metadata !{metadata !"0xd\00_IO_read_ptr\00277\0064\0064\0064\000", metadata !30, metadata !29, metadata !10} ; [ DW_TAG_member ] [_IO_read_ptr] [line 277, size 64, align 64, offset 64] [from ]
-!34 = metadata !{metadata !"0xd\00_IO_read_end\00278\0064\0064\00128\000", metadata !30, metadata !29, metadata !10} ; [ DW_TAG_member ] [_IO_read_end] [line 278, size 64, align 64, offset 128] [from ]
-!35 = metadata !{metadata !"0xd\00_IO_read_base\00279\0064\0064\00192\000", metadata !30, metadata !29, metadata !10} ; [ DW_TAG_member ] [_IO_read_base] [line 279, size 64, align 64, offset 192] [from ]
-!36 = metadata !{metadata !"0xd\00_IO_write_base\00280\0064\0064\00256\000", metadata !30, metadata !29, metadata !10} ; [ DW_TAG_member ] [_IO_write_base] [line 280, size 64, align 64, offset 256] [from ]
-!37 = metadata !{metadata !"0xd\00_IO_write_ptr\00281\0064\0064\00320\000", metadata !30, metadata !29, metadata !10} ; [ DW_TAG_member ] [_IO_write_ptr] [line 281, size 64, align 64, offset 320] [from ]
-!38 = metadata !{metadata !"0xd\00_IO_write_end\00282\0064\0064\00384\000", metadata !30, metadata !29, metadata !10} ; [ DW_TAG_member ] [_IO_write_end] [line 282, size 64, align 64, offset 384] [from ]
-!39 = metadata !{metadata !"0xd\00_IO_buf_base\00283\0064\0064\00448\000", metadata !30, metadata !29, metadata !10} ; [ DW_TAG_member ] [_IO_buf_base] [line 283, size 64, align 64, offset 448] [from ]
-!40 = metadata !{metadata !"0xd\00_IO_buf_end\00284\0064\0064\00512\000", metadata !30, metadata !29, metadata !10} ; [ DW_TAG_member ] [_IO_buf_end] [line 284, size 64, align 64, offset 512] [from ]
-!41 = metadata !{metadata !"0xd\00_IO_save_base\00286\0064\0064\00576\000", metadata !30, metadata !29, metadata !10} ; [ DW_TAG_member ] [_IO_save_base] [line 286, size 64, align 64, offset 576] [from ]
-!42 = metadata !{metadata !"0xd\00_IO_backup_base\00287\0064\0064\00640\000", metadata !30, metadata !29, metadata !10} ; [ DW_TAG_member ] [_IO_backup_base] [line 287, size 64, align 64, offset 640] [from ]
-!43 = metadata !{metadata !"0xd\00_IO_save_end\00288\0064\0064\00704\000", metadata !30, metadata !29, metadata !10} ; [ DW_TAG_member ] [_IO_save_end] [line 288, size 64, align 64, offset 704] [from ]
-!44 = metadata !{metadata !"0xd\00_markers\00290\0064\0064\00768\000", metadata !30, metadata !29, metadata !45} ; [ DW_TAG_member ] [_markers] [line 290, size 64, align 64, offset 768] [from ]
-!45 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !46} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _IO_marker]
-!46 = metadata !{metadata !"0x13\00_IO_marker\00186\00192\0064\000\000\000", metadata !30, null, null, metadata !47, null, null, null} ; [ DW_TAG_structure_type ] [_IO_marker] [line 186, size 192, align 64, offset 0] [def] [from ]
-!47 = metadata !{metadata !48, metadata !49, metadata !51}
-!48 = metadata !{metadata !"0xd\00_next\00187\0064\0064\000\000", metadata !30, metadata !46, metadata !45} ; [ DW_TAG_member ] [_next] [line 187, size 64, align 64, offset 0] [from ]
-!49 = metadata !{metadata !"0xd\00_sbuf\00188\0064\0064\0064\000", metadata !30, metadata !46, metadata !50} ; [ DW_TAG_member ] [_sbuf] [line 188, size 64, align 64, offset 64] [from ]
-!50 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !29} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _IO_FILE]
-!51 = metadata !{metadata !"0xd\00_pos\00192\0032\0032\00128\000", metadata !30, metadata !46, metadata !8} ; [ DW_TAG_member ] [_pos] [line 192, size 32, align 32, offset 128] [from int]
-!52 = metadata !{metadata !"0xd\00_chain\00292\0064\0064\00832\000", metadata !30, metadata !29, metadata !50} ; [ DW_TAG_member ] [_chain] [line 292, size 64, align 64, offset 832] [from ]
-!53 = metadata !{metadata !"0xd\00_fileno\00294\0032\0032\00896\000", metadata !30, metadata !29, metadata !8} ; [ DW_TAG_member ] [_fileno] [line 294, size 32, align 32, offset 896] [from int]
-!54 = metadata !{metadata !"0xd\00_flags2\00298\0032\0032\00928\000", metadata !30, metadata !29, metadata !8} ; [ DW_TAG_member ] [_flags2] [line 298, size 32, align 32, offset 928] [from int]
-!55 = metadata !{metadata !"0xd\00_old_offset\00300\0064\0064\00960\000", metadata !30, metadata !29, metadata !56} ; [ DW_TAG_member ] [_old_offset] [line 300, size 64, align 64, offset 960] [from __off_t]
-!56 = metadata !{metadata !"0x16\00__off_t\00141\000\000\000\000", metadata !30, null, metadata !57} ; [ DW_TAG_typedef ] [__off_t] [line 141, size 0, align 0, offset 0] [from long int]
-!57 = metadata !{metadata !"0x24\00long int\000\0064\0064\000\000\005", null, null} ; [ DW_TAG_base_type ] [long int] [line 0, size 64, align 64, offset 0, enc DW_ATE_signed]
-!58 = metadata !{metadata !"0xd\00_cur_column\00304\0016\0016\001024\000", metadata !30, metadata !29, metadata !59} ; [ DW_TAG_member ] [_cur_column] [line 304, size 16, align 16, offset 1024] [from unsigned short]
-!59 = metadata !{metadata !"0x24\00unsigned short\000\0016\0016\000\000\007", null, null} ; [ DW_TAG_base_type ] [unsigned short] [line 0, size 16, align 16, offset 0, enc DW_ATE_unsigned]
-!60 = metadata !{metadata !"0xd\00_vtable_offset\00305\008\008\001040\000", metadata !30, metadata !29, metadata !61} ; [ DW_TAG_member ] [_vtable_offset] [line 305, size 8, align 8, offset 1040] [from signed char]
-!61 = metadata !{metadata !"0x24\00signed char\000\008\008\000\000\006", null, null} ; [ DW_TAG_base_type ] [signed char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
-!62 = metadata !{metadata !"0xd\00_shortbuf\00306\008\008\001048\000", metadata !30, metadata !29, metadata !63} ; [ DW_TAG_member ] [_shortbuf] [line 306, size 8, align 8, offset 1048] [from ]
-!63 = metadata !{metadata !"0x1\00\000\008\008\000\000", null, null, metadata !11, metadata !64, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 8, align 8, offset 0] [from char]
-!64 = metadata !{metadata !65}
-!65 = metadata !{metadata !"0x21\000\001"} ; [ DW_TAG_subrange_type ] [0, 0]
-!66 = metadata !{metadata !"0xd\00_lock\00310\0064\0064\001088\000", metadata !30, metadata !29, metadata !67} ; [ DW_TAG_member ] [_lock] [line 310, size 64, align 64, offset 1088] [from ]
-!67 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, null} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
-!68 = metadata !{metadata !"0xd\00_offset\00319\0064\0064\001152\000", metadata !30, metadata !29, metadata !69} ; [ DW_TAG_member ] [_offset] [line 319, size 64, align 64, offset 1152] [from __off64_t]
-!69 = metadata !{metadata !"0x16\00__off64_t\00142\000\000\000\000", metadata !30, null, metadata !57} ; [ DW_TAG_typedef ] [__off64_t] [line 142, size 0, align 0, offset 0] [from long int]
-!70 = metadata !{metadata !"0xd\00__pad1\00328\0064\0064\001216\000", metadata !30, metadata !29, metadata !67} ; [ DW_TAG_member ] [__pad1] [line 328, size 64, align 64, offset 1216] [from ]
-!71 = metadata !{metadata !"0xd\00__pad2\00329\0064\0064\001280\000", metadata !30, metadata !29, metadata !67} ; [ DW_TAG_member ] [__pad2] [line 329, size 64, align 64, offset 1280] [from ]
-!72 = metadata !{metadata !"0xd\00__pad3\00330\0064\0064\001344\000", metadata !30, metadata !29, metadata !67} ; [ DW_TAG_member ] [__pad3] [line 330, size 64, align 64, offset 1344] [from ]
-!73 = metadata !{metadata !"0xd\00__pad4\00331\0064\0064\001408\000", metadata !30, metadata !29, metadata !67} ; [ DW_TAG_member ] [__pad4] [line 331, size 64, align 64, offset 1408] [from ]
-!74 = metadata !{metadata !"0xd\00__pad5\00332\0064\0064\001472\000", metadata !30, metadata !29, metadata !75} ; [ DW_TAG_member ] [__pad5] [line 332, size 64, align 64, offset 1472] [from size_t]
-!75 = metadata !{metadata !"0x16\00size_t\0042\000\000\000\000", metadata !30, null, metadata !76} ; [ DW_TAG_typedef ] [size_t] [line 42, size 0, align 0, offset 0] [from long unsigned int]
-!76 = metadata !{metadata !"0x24\00long unsigned int\000\0064\0064\000\000\007", null, null} ; [ DW_TAG_base_type ] [long unsigned int] [line 0, size 64, align 64, offset 0, enc DW_ATE_unsigned]
-!77 = metadata !{metadata !"0xd\00_mode\00334\0032\0032\001536\000", metadata !30, metadata !29, metadata !8} ; [ DW_TAG_member ] [_mode] [line 334, size 32, align 32, offset 1536] [from int]
-!78 = metadata !{metadata !"0xd\00_unused2\00336\00160\008\001568\000", metadata !30, metadata !29, metadata !79} ; [ DW_TAG_member ] [_unused2] [line 336, size 160, align 8, offset 1568] [from ]
-!79 = metadata !{metadata !"0x1\00\000\00160\008\000\000", null, null, metadata !11, metadata !80, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 160, align 8, offset 0] [from char]
-!80 = metadata !{metadata !81}
-!81 = metadata !{metadata !"0x21\000\0020"} ; [ DW_TAG_subrange_type ] [0, 19]
-!82 = metadata !{metadata !"0x2e\00verify\00verify\00\002388\001\001\000\006\00256\001\002388", metadata !1, metadata !5, metadata !83, null, null, null, null, metadata !86} ; [ DW_TAG_subprogram ] [line 2388] [local] [def] [verify]
-!83 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !84, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!84 = metadata !{null, metadata !8, metadata !10, metadata !85}
-!85 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !24} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from boolean]
-!86 = metadata !{metadata !87, metadata !88, metadata !89, metadata !90, metadata !94, metadata !95, metadata !96, metadata !97, metadata !98, metadata !99, metadata !100, metadata !101}
-!87 = metadata !{metadata !"0x101\00no_time_steps\0016779604\000", metadata !82, metadata !5, metadata !8} ; [ DW_TAG_arg_variable ] [no_time_steps] [line 2388]
-!88 = metadata !{metadata !"0x101\00class\0033556820\000", metadata !82, metadata !5, metadata !10} ; [ DW_TAG_arg_variable ] [class] [line 2388]
-!89 = metadata !{metadata !"0x101\00verified\0050334036\000", metadata !82, metadata !5, metadata !85} ; [ DW_TAG_arg_variable ] [verified] [line 2388]
-!90 = metadata !{metadata !"0x100\00xcrref\002397\000", metadata !82, metadata !5, metadata !91} ; [ DW_TAG_auto_variable ] [xcrref] [line 2397]
-!91 = metadata !{metadata !"0x1\00\000\00320\0064\000\000", null, null, metadata !20, metadata !92, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 320, align 64, offset 0] [from double]
-!92 = metadata !{metadata !93}
-!93 = metadata !{metadata !"0x21\000\005"} ; [ DW_TAG_subrange_type ] [0, 4]
-!94 = metadata !{metadata !"0x100\00xceref\002397\000", metadata !82, metadata !5, metadata !91} ; [ DW_TAG_auto_variable ] [xceref] [line 2397]
-!95 = metadata !{metadata !"0x100\00xcrdif\002397\000", metadata !82, metadata !5, metadata !91} ; [ DW_TAG_auto_variable ] [xcrdif] [line 2397]
-!96 = metadata !{metadata !"0x100\00xcedif\002397\000", metadata !82, metadata !5, metadata !91} ; [ DW_TAG_auto_variable ] [xcedif] [line 2397]
-!97 = metadata !{metadata !"0x100\00epsilon\002398\000", metadata !82, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [epsilon] [line 2398]
-!98 = metadata !{metadata !"0x100\00xce\002398\000", metadata !82, metadata !5, metadata !91} ; [ DW_TAG_auto_variable ] [xce] [line 2398]
-!99 = metadata !{metadata !"0x100\00xcr\002398\000", metadata !82, metadata !5, metadata !91} ; [ DW_TAG_auto_variable ] [xcr] [line 2398]
-!100 = metadata !{metadata !"0x100\00dtref\002398\000", metadata !82, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [dtref] [line 2398]
-!101 = metadata !{metadata !"0x100\00m\002399\000", metadata !82, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [m] [line 2399]
-!102 = metadata !{metadata !"0x2e\00rhs_norm\00rhs_norm\00\00266\001\001\000\006\00256\001\00266", metadata !1, metadata !5, metadata !103, null, null, null, null, metadata !106} ; [ DW_TAG_subprogram ] [line 266] [local] [def] [rhs_norm]
-!103 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !104, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!104 = metadata !{null, metadata !105}
-!105 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !20} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from double]
-!106 = metadata !{metadata !107, metadata !108, metadata !109, metadata !110, metadata !111, metadata !112, metadata !113}
-!107 = metadata !{metadata !"0x101\00rms\0016777482\000", metadata !102, metadata !5, metadata !105} ; [ DW_TAG_arg_variable ] [rms] [line 266]
-!108 = metadata !{metadata !"0x100\00i\00271\000", metadata !102, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 271]
-!109 = metadata !{metadata !"0x100\00j\00271\000", metadata !102, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 271]
-!110 = metadata !{metadata !"0x100\00k\00271\000", metadata !102, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 271]
-!111 = metadata !{metadata !"0x100\00d\00271\000", metadata !102, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [d] [line 271]
-!112 = metadata !{metadata !"0x100\00m\00271\000", metadata !102, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [m] [line 271]
-!113 = metadata !{metadata !"0x100\00add\00272\000", metadata !102, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [add] [line 272]
-!114 = metadata !{metadata !"0x2e\00compute_rhs\00compute_rhs\00\001767\001\001\000\006\00256\001\001767", metadata !1, metadata !5, metadata !115, null, void ()* @compute_rhs, null, null, metadata !117} ; [ DW_TAG_subprogram ] [line 1767] [local] [def] [compute_rhs]
-!115 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !116, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!116 = metadata !{null}
-!117 = metadata !{metadata !118, metadata !119, metadata !120, metadata !121, metadata !122, metadata !123, metadata !124, metadata !125, metadata !126, metadata !127, metadata !128, metadata !129, metadata !130, metadata !131}
-!118 = metadata !{metadata !"0x100\00i\001769\000", metadata !114, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 1769]
-!119 = metadata !{metadata !"0x100\00j\001769\000", metadata !114, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 1769]
-!120 = metadata !{metadata !"0x100\00k\001769\000", metadata !114, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 1769]
-!121 = metadata !{metadata !"0x100\00m\001769\000", metadata !114, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [m] [line 1769]
-!122 = metadata !{metadata !"0x100\00rho_inv\001770\000", metadata !114, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [rho_inv] [line 1770]
-!123 = metadata !{metadata !"0x100\00uijk\001770\000", metadata !114, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [uijk] [line 1770]
-!124 = metadata !{metadata !"0x100\00up1\001770\000", metadata !114, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [up1] [line 1770]
-!125 = metadata !{metadata !"0x100\00um1\001770\000", metadata !114, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [um1] [line 1770]
-!126 = metadata !{metadata !"0x100\00vijk\001770\000", metadata !114, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [vijk] [line 1770]
-!127 = metadata !{metadata !"0x100\00vp1\001770\000", metadata !114, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [vp1] [line 1770]
-!128 = metadata !{metadata !"0x100\00vm1\001770\000", metadata !114, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [vm1] [line 1770]
-!129 = metadata !{metadata !"0x100\00wijk\001770\000", metadata !114, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [wijk] [line 1770]
-!130 = metadata !{metadata !"0x100\00wp1\001770\000", metadata !114, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [wp1] [line 1770]
-!131 = metadata !{metadata !"0x100\00wm1\001770\000", metadata !114, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [wm1] [line 1770]
-!132 = metadata !{metadata !"0x2e\00error_norm\00error_norm\00\00225\001\001\000\006\00256\001\00225", metadata !1, metadata !5, metadata !103, null, null, null, null, metadata !133} ; [ DW_TAG_subprogram ] [line 225] [local] [def] [error_norm]
-!133 = metadata !{metadata !134, metadata !135, metadata !136, metadata !137, metadata !138, metadata !139, metadata !140, metadata !141, metadata !142, metadata !143, metadata !144}
-!134 = metadata !{metadata !"0x101\00rms\0016777441\000", metadata !132, metadata !5, metadata !105} ; [ DW_TAG_arg_variable ] [rms] [line 225]
-!135 = metadata !{metadata !"0x100\00i\00232\000", metadata !132, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 232]
-!136 = metadata !{metadata !"0x100\00j\00232\000", metadata !132, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 232]
-!137 = metadata !{metadata !"0x100\00k\00232\000", metadata !132, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 232]
-!138 = metadata !{metadata !"0x100\00m\00232\000", metadata !132, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [m] [line 232]
-!139 = metadata !{metadata !"0x100\00d\00232\000", metadata !132, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [d] [line 232]
-!140 = metadata !{metadata !"0x100\00xi\00233\000", metadata !132, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [xi] [line 233]
-!141 = metadata !{metadata !"0x100\00eta\00233\000", metadata !132, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [eta] [line 233]
-!142 = metadata !{metadata !"0x100\00zeta\00233\000", metadata !132, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [zeta] [line 233]
-!143 = metadata !{metadata !"0x100\00u_exact\00233\000", metadata !132, metadata !5, metadata !91} ; [ DW_TAG_auto_variable ] [u_exact] [line 233]
-!144 = metadata !{metadata !"0x100\00add\00233\000", metadata !132, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [add] [line 233]
-!145 = metadata !{metadata !"0x2e\00exact_solution\00exact_solution\00\00643\001\001\000\006\00256\001\00644", metadata !1, metadata !5, metadata !146, null, null, null, null, metadata !148} ; [ DW_TAG_subprogram ] [line 643] [local] [def] [scope 644] [exact_solution]
-!146 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !147, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!147 = metadata !{null, metadata !20, metadata !20, metadata !20, metadata !105}
-!148 = metadata !{metadata !149, metadata !150, metadata !151, metadata !152, metadata !153}
-!149 = metadata !{metadata !"0x101\00xi\0016777859\000", metadata !145, metadata !5, metadata !20} ; [ DW_TAG_arg_variable ] [xi] [line 643]
-!150 = metadata !{metadata !"0x101\00eta\0033555075\000", metadata !145, metadata !5, metadata !20} ; [ DW_TAG_arg_variable ] [eta] [line 643]
-!151 = metadata !{metadata !"0x101\00zeta\0050332291\000", metadata !145, metadata !5, metadata !20} ; [ DW_TAG_arg_variable ] [zeta] [line 643]
-!152 = metadata !{metadata !"0x101\00dtemp\0067109508\000", metadata !145, metadata !5, metadata !105} ; [ DW_TAG_arg_variable ] [dtemp] [line 644]
-!153 = metadata !{metadata !"0x100\00m\00653\000", metadata !145, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [m] [line 653]
-!154 = metadata !{metadata !"0x2e\00set_constants\00set_constants\00\002191\001\001\000\006\00256\001\002191", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !2} ; [ DW_TAG_subprogram ] [line 2191] [local] [def] [set_constants]
-!155 = metadata !{metadata !"0x2e\00lhsinit\00lhsinit\00\00855\001\001\000\006\00256\001\00855", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !156} ; [ DW_TAG_subprogram ] [line 855] [local] [def] [lhsinit]
-!156 = metadata !{metadata !157, metadata !158, metadata !159, metadata !160, metadata !161}
-!157 = metadata !{metadata !"0x100\00i\00857\000", metadata !155, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 857]
-!158 = metadata !{metadata !"0x100\00j\00857\000", metadata !155, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 857]
-!159 = metadata !{metadata !"0x100\00k\00857\000", metadata !155, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 857]
-!160 = metadata !{metadata !"0x100\00m\00857\000", metadata !155, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [m] [line 857]
-!161 = metadata !{metadata !"0x100\00n\00857\000", metadata !155, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [n] [line 857]
-!162 = metadata !{metadata !"0x2e\00initialize\00initialize\00\00669\001\001\000\006\00256\001\00669", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !163} ; [ DW_TAG_subprogram ] [line 669] [local] [def] [initialize]
-!163 = metadata !{metadata !164, metadata !165, metadata !166, metadata !167, metadata !168, metadata !169, metadata !170, metadata !171, metadata !172, metadata !173, metadata !174, metadata !179, metadata !180, metadata !181, metadata !182}
-!164 = metadata !{metadata !"0x100\00i\00679\000", metadata !162, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 679]
-!165 = metadata !{metadata !"0x100\00j\00679\000", metadata !162, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 679]
-!166 = metadata !{metadata !"0x100\00k\00679\000", metadata !162, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 679]
-!167 = metadata !{metadata !"0x100\00m\00679\000", metadata !162, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [m] [line 679]
-!168 = metadata !{metadata !"0x100\00ix\00679\000", metadata !162, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [ix] [line 679]
-!169 = metadata !{metadata !"0x100\00iy\00679\000", metadata !162, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [iy] [line 679]
-!170 = metadata !{metadata !"0x100\00iz\00679\000", metadata !162, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [iz] [line 679]
-!171 = metadata !{metadata !"0x100\00xi\00680\000", metadata !162, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [xi] [line 680]
-!172 = metadata !{metadata !"0x100\00eta\00680\000", metadata !162, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [eta] [line 680]
-!173 = metadata !{metadata !"0x100\00zeta\00680\000", metadata !162, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [zeta] [line 680]
-!174 = metadata !{metadata !"0x100\00Pface\00680\000", metadata !162, metadata !5, metadata !175} ; [ DW_TAG_auto_variable ] [Pface] [line 680]
-!175 = metadata !{metadata !"0x1\00\000\001920\0064\000\000", null, null, metadata !20, metadata !176, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 1920, align 64, offset 0] [from double]
-!176 = metadata !{metadata !177, metadata !178, metadata !93}
-!177 = metadata !{metadata !"0x21\000\002"} ; [ DW_TAG_subrange_type ] [0, 1]
-!178 = metadata !{metadata !"0x21\000\003"} ; [ DW_TAG_subrange_type ] [0, 2]
-!179 = metadata !{metadata !"0x100\00Pxi\00680\000", metadata !162, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [Pxi] [line 680]
-!180 = metadata !{metadata !"0x100\00Peta\00680\000", metadata !162, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [Peta] [line 680]
-!181 = metadata !{metadata !"0x100\00Pzeta\00680\000", metadata !162, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [Pzeta] [line 680]
-!182 = metadata !{metadata !"0x100\00temp\00680\000", metadata !162, metadata !5, metadata !91} ; [ DW_TAG_auto_variable ] [temp] [line 680]
-!183 = metadata !{metadata !"0x2e\00exact_rhs\00exact_rhs\00\00301\001\001\000\006\00256\001\00301", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !184} ; [ DW_TAG_subprogram ] [line 301] [local] [def] [exact_rhs]
-!184 = metadata !{metadata !185, metadata !186, metadata !187, metadata !188, metadata !189, metadata !190, metadata !191, metadata !192, metadata !193, metadata !194, metadata !195, metadata !196, metadata !197, metadata !198, metadata !199}
-!185 = metadata !{metadata !"0x100\00dtemp\00310\000", metadata !183, metadata !5, metadata !91} ; [ DW_TAG_auto_variable ] [dtemp] [line 310]
-!186 = metadata !{metadata !"0x100\00xi\00310\000", metadata !183, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [xi] [line 310]
-!187 = metadata !{metadata !"0x100\00eta\00310\000", metadata !183, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [eta] [line 310]
-!188 = metadata !{metadata !"0x100\00zeta\00310\000", metadata !183, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [zeta] [line 310]
-!189 = metadata !{metadata !"0x100\00dtpp\00310\000", metadata !183, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [dtpp] [line 310]
-!190 = metadata !{metadata !"0x100\00m\00311\000", metadata !183, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [m] [line 311]
-!191 = metadata !{metadata !"0x100\00i\00311\000", metadata !183, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 311]
-!192 = metadata !{metadata !"0x100\00j\00311\000", metadata !183, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 311]
-!193 = metadata !{metadata !"0x100\00k\00311\000", metadata !183, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 311]
-!194 = metadata !{metadata !"0x100\00ip1\00311\000", metadata !183, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [ip1] [line 311]
-!195 = metadata !{metadata !"0x100\00im1\00311\000", metadata !183, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [im1] [line 311]
-!196 = metadata !{metadata !"0x100\00jp1\00311\000", metadata !183, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [jp1] [line 311]
-!197 = metadata !{metadata !"0x100\00jm1\00311\000", metadata !183, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [jm1] [line 311]
-!198 = metadata !{metadata !"0x100\00km1\00311\000", metadata !183, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [km1] [line 311]
-!199 = metadata !{metadata !"0x100\00kp1\00311\000", metadata !183, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [kp1] [line 311]
-!200 = metadata !{metadata !"0x2e\00adi\00adi\00\00210\001\001\000\006\00256\001\00210", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !2} ; [ DW_TAG_subprogram ] [line 210] [local] [def] [adi]
-!201 = metadata !{metadata !"0x2e\00add\00add\00\00187\001\001\000\006\00256\001\00187", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !202} ; [ DW_TAG_subprogram ] [line 187] [local] [def] [add]
-!202 = metadata !{metadata !203, metadata !204, metadata !205, metadata !206}
-!203 = metadata !{metadata !"0x100\00i\00193\000", metadata !201, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 193]
-!204 = metadata !{metadata !"0x100\00j\00193\000", metadata !201, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 193]
-!205 = metadata !{metadata !"0x100\00k\00193\000", metadata !201, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 193]
-!206 = metadata !{metadata !"0x100\00m\00193\000", metadata !201, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [m] [line 193]
-!207 = metadata !{metadata !"0x2e\00z_solve\00z_solve\00\003457\001\001\000\006\00256\001\003457", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !2} ; [ DW_TAG_subprogram ] [line 3457] [local] [def] [z_solve]
-!208 = metadata !{metadata !"0x2e\00z_backsubstitute\00z_backsubstitute\00\003480\001\001\000\006\00256\001\003480", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !209} ; [ DW_TAG_subprogram ] [line 3480] [local] [def] [z_backsubstitute]
-!209 = metadata !{metadata !210, metadata !211, metadata !212, metadata !213, metadata !214}
-!210 = metadata !{metadata !"0x100\00i\003492\000", metadata !208, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 3492]
-!211 = metadata !{metadata !"0x100\00j\003492\000", metadata !208, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 3492]
-!212 = metadata !{metadata !"0x100\00k\003492\000", metadata !208, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 3492]
-!213 = metadata !{metadata !"0x100\00m\003492\000", metadata !208, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [m] [line 3492]
-!214 = metadata !{metadata !"0x100\00n\003492\000", metadata !208, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [n] [line 3492]
-!215 = metadata !{metadata !"0x2e\00z_solve_cell\00z_solve_cell\00\003512\001\001\000\006\00256\001\003512", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !216} ; [ DW_TAG_subprogram ] [line 3512] [local] [def] [z_solve_cell]
-!216 = metadata !{metadata !217, metadata !218, metadata !219, metadata !220}
-!217 = metadata !{metadata !"0x100\00i\003527\000", metadata !215, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 3527]
-!218 = metadata !{metadata !"0x100\00j\003527\000", metadata !215, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 3527]
-!219 = metadata !{metadata !"0x100\00k\003527\000", metadata !215, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 3527]
-!220 = metadata !{metadata !"0x100\00ksize\003527\000", metadata !215, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [ksize] [line 3527]
-!221 = metadata !{metadata !"0x2e\00binvrhs\00binvrhs\00\003154\001\001\000\006\00256\001\003154", metadata !1, metadata !5, metadata !222, null, null, null, null, metadata !225} ; [ DW_TAG_subprogram ] [line 3154] [local] [def] [binvrhs]
-!222 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !223, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!223 = metadata !{null, metadata !224, metadata !105}
-!224 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !91} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
-!225 = metadata !{metadata !226, metadata !227, metadata !228, metadata !229}
-!226 = metadata !{metadata !"0x101\00lhs\0016780370\000", metadata !221, metadata !5, metadata !224} ; [ DW_TAG_arg_variable ] [lhs] [line 3154]
-!227 = metadata !{metadata !"0x101\00r\0033557586\000", metadata !221, metadata !5, metadata !105} ; [ DW_TAG_arg_variable ] [r] [line 3154]
-!228 = metadata !{metadata !"0x100\00pivot\003159\000", metadata !221, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [pivot] [line 3159]
-!229 = metadata !{metadata !"0x100\00coeff\003159\000", metadata !221, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [coeff] [line 3159]
-!230 = metadata !{metadata !"0x2e\00matmul_sub\00matmul_sub\00\002841\001\001\000\006\00256\001\002842", metadata !1, metadata !5, metadata !231, null, null, null, null, metadata !233} ; [ DW_TAG_subprogram ] [line 2841] [local] [def] [scope 2842] [matmul_sub]
-!231 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !232, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!232 = metadata !{null, metadata !224, metadata !224, metadata !224}
-!233 = metadata !{metadata !234, metadata !235, metadata !236, metadata !237}
-!234 = metadata !{metadata !"0x101\00ablock\0016780057\000", metadata !230, metadata !5, metadata !224} ; [ DW_TAG_arg_variable ] [ablock] [line 2841]
-!235 = metadata !{metadata !"0x101\00bblock\0033557273\000", metadata !230, metadata !5, metadata !224} ; [ DW_TAG_arg_variable ] [bblock] [line 2841]
-!236 = metadata !{metadata !"0x101\00cblock\0050334490\000", metadata !230, metadata !5, metadata !224} ; [ DW_TAG_arg_variable ] [cblock] [line 2842]
-!237 = metadata !{metadata !"0x100\00j\002851\000", metadata !230, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 2851]
-!238 = metadata !{metadata !"0x2e\00matvec_sub\00matvec_sub\00\002814\001\001\000\006\00256\001\002814", metadata !1, metadata !5, metadata !239, null, null, null, null, metadata !241} ; [ DW_TAG_subprogram ] [line 2814] [local] [def] [matvec_sub]
-!239 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !240, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!240 = metadata !{null, metadata !224, metadata !105, metadata !105}
-!241 = metadata !{metadata !242, metadata !243, metadata !244, metadata !245}
-!242 = metadata !{metadata !"0x101\00ablock\0016780030\000", metadata !238, metadata !5, metadata !224} ; [ DW_TAG_arg_variable ] [ablock] [line 2814]
-!243 = metadata !{metadata !"0x101\00avec\0033557246\000", metadata !238, metadata !5, metadata !105} ; [ DW_TAG_arg_variable ] [avec] [line 2814]
-!244 = metadata !{metadata !"0x101\00bvec\0050334462\000", metadata !238, metadata !5, metadata !105} ; [ DW_TAG_arg_variable ] [bvec] [line 2814]
-!245 = metadata !{metadata !"0x100\00i\002823\000", metadata !238, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 2823]
-!246 = metadata !{metadata !"0x2e\00binvcrhs\00binvcrhs\00\002885\001\001\000\006\00256\001\002885", metadata !1, metadata !5, metadata !247, null, null, null, null, metadata !249} ; [ DW_TAG_subprogram ] [line 2885] [local] [def] [binvcrhs]
-!247 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !248, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!248 = metadata !{null, metadata !224, metadata !224, metadata !105}
-!249 = metadata !{metadata !250, metadata !251, metadata !252, metadata !253, metadata !254}
-!250 = metadata !{metadata !"0x101\00lhs\0016780101\000", metadata !246, metadata !5, metadata !224} ; [ DW_TAG_arg_variable ] [lhs] [line 2885]
-!251 = metadata !{metadata !"0x101\00c\0033557317\000", metadata !246, metadata !5, metadata !224} ; [ DW_TAG_arg_variable ] [c] [line 2885]
-!252 = metadata !{metadata !"0x101\00r\0050334533\000", metadata !246, metadata !5, metadata !105} ; [ DW_TAG_arg_variable ] [r] [line 2885]
-!253 = metadata !{metadata !"0x100\00pivot\002890\000", metadata !246, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [pivot] [line 2890]
-!254 = metadata !{metadata !"0x100\00coeff\002890\000", metadata !246, metadata !5, metadata !20} ; [ DW_TAG_auto_variable ] [coeff] [line 2890]
-!255 = metadata !{metadata !"0x2e\00lhsz\00lhsz\00\001475\001\001\000\006\00256\001\001475", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !256} ; [ DW_TAG_subprogram ] [line 1475] [local] [def] [lhsz]
-!256 = metadata !{metadata !257, metadata !258, metadata !259}
-!257 = metadata !{metadata !"0x100\00i\001484\000", metadata !255, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 1484]
-!258 = metadata !{metadata !"0x100\00j\001484\000", metadata !255, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 1484]
-!259 = metadata !{metadata !"0x100\00k\001484\000", metadata !255, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 1484]
-!260 = metadata !{metadata !"0x2e\00y_solve\00y_solve\00\003299\001\001\000\006\00256\001\003299", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !2} ; [ DW_TAG_subprogram ] [line 3299] [local] [def] [y_solve]
-!261 = metadata !{metadata !"0x2e\00y_backsubstitute\00y_backsubstitute\00\003323\001\001\000\006\00256\001\003323", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !262} ; [ DW_TAG_subprogram ] [line 3323] [local] [def] [y_backsubstitute]
-!262 = metadata !{metadata !263, metadata !264, metadata !265, metadata !266, metadata !267}
-!263 = metadata !{metadata !"0x100\00i\003335\000", metadata !261, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 3335]
-!264 = metadata !{metadata !"0x100\00j\003335\000", metadata !261, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 3335]
-!265 = metadata !{metadata !"0x100\00k\003335\000", metadata !261, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 3335]
-!266 = metadata !{metadata !"0x100\00m\003335\000", metadata !261, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [m] [line 3335]
-!267 = metadata !{metadata !"0x100\00n\003335\000", metadata !261, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [n] [line 3335]
-!268 = metadata !{metadata !"0x2e\00y_solve_cell\00y_solve_cell\00\003355\001\001\000\006\00256\001\003355", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !269} ; [ DW_TAG_subprogram ] [line 3355] [local] [def] [y_solve_cell]
-!269 = metadata !{metadata !270, metadata !271, metadata !272, metadata !273}
-!270 = metadata !{metadata !"0x100\00i\003370\000", metadata !268, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 3370]
-!271 = metadata !{metadata !"0x100\00j\003370\000", metadata !268, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 3370]
-!272 = metadata !{metadata !"0x100\00k\003370\000", metadata !268, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 3370]
-!273 = metadata !{metadata !"0x100\00jsize\003370\000", metadata !268, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [jsize] [line 3370]
-!274 = metadata !{metadata !"0x2e\00lhsy\00lhsy\00\001181\001\001\000\006\00256\001\001181", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !275} ; [ DW_TAG_subprogram ] [line 1181] [local] [def] [lhsy]
-!275 = metadata !{metadata !276, metadata !277, metadata !278}
-!276 = metadata !{metadata !"0x100\00i\001190\000", metadata !274, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 1190]
-!277 = metadata !{metadata !"0x100\00j\001190\000", metadata !274, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 1190]
-!278 = metadata !{metadata !"0x100\00k\001190\000", metadata !274, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 1190]
-!279 = metadata !{metadata !"0x2e\00x_solve\00x_solve\00\002658\001\001\000\006\00256\001\002658", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !2} ; [ DW_TAG_subprogram ] [line 2658] [local] [def] [x_solve]
-!280 = metadata !{metadata !"0x2e\00x_backsubstitute\00x_backsubstitute\00\002684\001\001\000\006\00256\001\002684", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !281} ; [ DW_TAG_subprogram ] [line 2684] [local] [def] [x_backsubstitute]
-!281 = metadata !{metadata !282, metadata !283, metadata !284, metadata !285, metadata !286}
-!282 = metadata !{metadata !"0x100\00i\002696\000", metadata !280, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 2696]
-!283 = metadata !{metadata !"0x100\00j\002696\000", metadata !280, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 2696]
-!284 = metadata !{metadata !"0x100\00k\002696\000", metadata !280, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 2696]
-!285 = metadata !{metadata !"0x100\00m\002696\000", metadata !280, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [m] [line 2696]
-!286 = metadata !{metadata !"0x100\00n\002696\000", metadata !280, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [n] [line 2696]
-!287 = metadata !{metadata !"0x2e\00x_solve_cell\00x_solve_cell\00\002716\001\001\000\006\00256\001\002716", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !288} ; [ DW_TAG_subprogram ] [line 2716] [local] [def] [x_solve_cell]
-!288 = metadata !{metadata !289, metadata !290, metadata !291, metadata !292}
-!289 = metadata !{metadata !"0x100\00i\002728\000", metadata !287, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 2728]
-!290 = metadata !{metadata !"0x100\00j\002728\000", metadata !287, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 2728]
-!291 = metadata !{metadata !"0x100\00k\002728\000", metadata !287, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 2728]
-!292 = metadata !{metadata !"0x100\00isize\002728\000", metadata !287, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [isize] [line 2728]
-!293 = metadata !{metadata !"0x2e\00lhsx\00lhsx\00\00898\001\001\000\006\00256\001\00898", metadata !1, metadata !5, metadata !115, null, null, null, null, metadata !294} ; [ DW_TAG_subprogram ] [line 898] [local] [def] [lhsx]
-!294 = metadata !{metadata !295, metadata !296, metadata !297}
-!295 = metadata !{metadata !"0x100\00i\00907\000", metadata !293, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [i] [line 907]
-!296 = metadata !{metadata !"0x100\00j\00907\000", metadata !293, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [j] [line 907]
-!297 = metadata !{metadata !"0x100\00k\00907\000", metadata !293, metadata !5, metadata !8} ; [ DW_TAG_auto_variable ] [k] [line 907]
-!298 = metadata !{metadata !299, metadata !304, metadata !305, metadata !309, metadata !310, metadata !311, metadata !312, metadata !313, metadata !314, metadata !315, metadata !316, metadata !317, metadata !318, metadata !319, metadata !320, metadata !321, metadata !322, metadata !323, metadata !324, metadata !325, metadata !326, metadata !327, metadata !328, metadata !329, metadata !330, metadata !331, metadata !332, metadata !333, metadata !334, metadata !335, metadata !336, metadata !337, metadata !338, metadata !339, metadata !340, metadata !341, metadata !342, metadata !343, metadata !347, metadata !350, metadata !351, metadata !352, metadata !353, metadata !354, metadata !355, metadata !356, metadata !360, metadata !361, metadata !362, metadata !363, metadata !364, metadata !365, metadata !366, metadata !367, metadata !368, metadata !369, metadata !370, metadata !371, metadata !372, metadata !373, metadata !374, metadata !375, metadata !376, metadata !377, metadata !378, metadata !379, metadata !380, metadata !381, metadata !382, metadata !383, metadata !384, metadata !385, metadata !386, metadata !387, metadata !388, metadata !389, metadata !390, metadata !391, metadata !392, metadata !393, metadata !394, metadata !395, metadata !396, metadata !397, metadata !398, metadata !399, metadata !400, metadata !401, metadata !402, metadata !403, metadata !404, metadata !405, metadata !406, metadata !407, metadata !408, metadata !409, metadata !410, metadata !411, metadata !412, metadata !413, metadata !414, metadata !415, metadata !416, metadata !417, metadata !418, metadata !419, metadata !422, metadata !426, metadata !427, metadata !430, metadata !431, metadata !434, metadata !435, metadata !436, metadata !437}
-!299 = metadata !{metadata !"0x34\00grid_points\00grid_points\00\0028\001\001", null, metadata !300, metadata !302, [3 x i32]* @grid_points, null} ; [ DW_TAG_variable ] [grid_points] [line 28] [local] [def]
-!300 = metadata !{metadata !"0x29", metadata !301} ; [ DW_TAG_file_type ] [/home/hfinkel/src/NPB2.3-omp-C/BT/./header.h]
-!301 = metadata !{metadata !"./header.h", metadata !"/home/hfinkel/src/NPB2.3-omp-C/BT"}
-!302 = metadata !{metadata !"0x1\00\000\0096\0032\000\000", null, null, metadata !8, metadata !303, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 96, align 32, offset 0] [from int]
-!303 = metadata !{metadata !178}
-!304 = metadata !{metadata !"0x34\00dt\00dt\00\0035\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dt] [line 35] [local] [def]
-!305 = metadata !{metadata !"0x34\00rhs\00rhs\00\0068\001\001", null, metadata !300, metadata !306, null, null} ; [ DW_TAG_variable ] [rhs] [line 68] [local] [def]
-!306 = metadata !{metadata !"0x1\00\000\001385839040\0064\000\000", null, null, metadata !20, metadata !307, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 1385839040, align 64, offset 0] [from double]
-!307 = metadata !{metadata !308, metadata !308, metadata !308, metadata !93}
-!308 = metadata !{metadata !"0x21\000\00163"} ; [ DW_TAG_subrange_type ] [0, 162]
-!309 = metadata !{metadata !"0x34\00zzcon5\00zzcon5\00\0042\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [zzcon5] [line 42] [local] [def]
-!310 = metadata !{metadata !"0x34\00zzcon4\00zzcon4\00\0042\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [zzcon4] [line 42] [local] [def]
-!311 = metadata !{metadata !"0x34\00zzcon3\00zzcon3\00\0042\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [zzcon3] [line 42] [local] [def]
-!312 = metadata !{metadata !"0x34\00dz5tz1\00dz5tz1\00\0043\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dz5tz1] [line 43] [local] [def]
-!313 = metadata !{metadata !"0x34\00dz4tz1\00dz4tz1\00\0043\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dz4tz1] [line 43] [local] [def]
-!314 = metadata !{metadata !"0x34\00dz3tz1\00dz3tz1\00\0043\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dz3tz1] [line 43] [local] [def]
-!315 = metadata !{metadata !"0x34\00zzcon2\00zzcon2\00\0042\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [zzcon2] [line 42] [local] [def]
-!316 = metadata !{metadata !"0x34\00dz2tz1\00dz2tz1\00\0043\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dz2tz1] [line 43] [local] [def]
-!317 = metadata !{metadata !"0x34\00tz2\00tz2\00\0031\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [tz2] [line 31] [local] [def]
-!318 = metadata !{metadata !"0x34\00dz1tz1\00dz1tz1\00\0043\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dz1tz1] [line 43] [local] [def]
-!319 = metadata !{metadata !"0x34\00yycon5\00yycon5\00\0040\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [yycon5] [line 40] [local] [def]
-!320 = metadata !{metadata !"0x34\00yycon4\00yycon4\00\0040\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [yycon4] [line 40] [local] [def]
-!321 = metadata !{metadata !"0x34\00yycon3\00yycon3\00\0040\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [yycon3] [line 40] [local] [def]
-!322 = metadata !{metadata !"0x34\00dy5ty1\00dy5ty1\00\0041\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dy5ty1] [line 41] [local] [def]
-!323 = metadata !{metadata !"0x34\00dy4ty1\00dy4ty1\00\0041\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dy4ty1] [line 41] [local] [def]
-!324 = metadata !{metadata !"0x34\00dy3ty1\00dy3ty1\00\0041\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dy3ty1] [line 41] [local] [def]
-!325 = metadata !{metadata !"0x34\00yycon2\00yycon2\00\0040\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [yycon2] [line 40] [local] [def]
-!326 = metadata !{metadata !"0x34\00dy2ty1\00dy2ty1\00\0041\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dy2ty1] [line 41] [local] [def]
-!327 = metadata !{metadata !"0x34\00ty2\00ty2\00\0031\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [ty2] [line 31] [local] [def]
-!328 = metadata !{metadata !"0x34\00dy1ty1\00dy1ty1\00\0041\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dy1ty1] [line 41] [local] [def]
-!329 = metadata !{metadata !"0x34\00dssp\00dssp\00\0035\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dssp] [line 35] [local] [def]
-!330 = metadata !{metadata !"0x34\00c1\00c1\00\0045\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c1] [line 45] [local] [def]
-!331 = metadata !{metadata !"0x34\00xxcon5\00xxcon5\00\0038\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [xxcon5] [line 38] [local] [def]
-!332 = metadata !{metadata !"0x34\00xxcon4\00xxcon4\00\0038\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [xxcon4] [line 38] [local] [def]
-!333 = metadata !{metadata !"0x34\00xxcon3\00xxcon3\00\0038\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [xxcon3] [line 38] [local] [def]
-!334 = metadata !{metadata !"0x34\00dx5tx1\00dx5tx1\00\0039\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dx5tx1] [line 39] [local] [def]
-!335 = metadata !{metadata !"0x34\00dx4tx1\00dx4tx1\00\0039\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dx4tx1] [line 39] [local] [def]
-!336 = metadata !{metadata !"0x34\00dx3tx1\00dx3tx1\00\0039\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dx3tx1] [line 39] [local] [def]
-!337 = metadata !{metadata !"0x34\00c2\00c2\00\0045\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c2] [line 45] [local] [def]
-!338 = metadata !{metadata !"0x34\00con43\00con43\00\0048\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [con43] [line 48] [local] [def]
-!339 = metadata !{metadata !"0x34\00xxcon2\00xxcon2\00\0038\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [xxcon2] [line 38] [local] [def]
-!340 = metadata !{metadata !"0x34\00dx2tx1\00dx2tx1\00\0039\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dx2tx1] [line 39] [local] [def]
-!341 = metadata !{metadata !"0x34\00tx2\00tx2\00\0031\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [tx2] [line 31] [local] [def]
-!342 = metadata !{metadata !"0x34\00dx1tx1\00dx1tx1\00\0039\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dx1tx1] [line 39] [local] [def]
-!343 = metadata !{metadata !"0x34\00forcing\00forcing\00\0066\001\001", null, metadata !300, metadata !344, null, null} ; [ DW_TAG_variable ] [forcing] [line 66] [local] [def]
-!344 = metadata !{metadata !"0x1\00\000\001663006848\0064\000\000", null, null, metadata !20, metadata !345, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 1663006848, align 64, offset 0] [from double]
-!345 = metadata !{metadata !308, metadata !308, metadata !308, metadata !346}
-!346 = metadata !{metadata !"0x21\000\006"} ; [ DW_TAG_subrange_type ] [0, 5]
-!347 = metadata !{metadata !"0x34\00qs\00qs\00\0063\001\001", null, metadata !300, metadata !348, null, null} ; [ DW_TAG_variable ] [qs] [line 63] [local] [def]
-!348 = metadata !{metadata !"0x1\00\000\00277167808\0064\000\000", null, null, metadata !20, metadata !349, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 277167808, align 64, offset 0] [from double]
-!349 = metadata !{metadata !308, metadata !308, metadata !308}
-!350 = metadata !{metadata !"0x34\00square\00square\00\0065\001\001", null, metadata !300, metadata !348, null, null} ; [ DW_TAG_variable ] [square] [line 65] [local] [def]
-!351 = metadata !{metadata !"0x34\00ws\00ws\00\0062\001\001", null, metadata !300, metadata !348, null, null} ; [ DW_TAG_variable ] [ws] [line 62] [local] [def]
-!352 = metadata !{metadata !"0x34\00vs\00vs\00\0061\001\001", null, metadata !300, metadata !348, null, null} ; [ DW_TAG_variable ] [vs] [line 61] [local] [def]
-!353 = metadata !{metadata !"0x34\00us\00us\00\0060\001\001", null, metadata !300, metadata !348, null, null} ; [ DW_TAG_variable ] [us] [line 60] [local] [def]
-!354 = metadata !{metadata !"0x34\00rho_i\00rho_i\00\0064\001\001", null, metadata !300, metadata !348, null, null} ; [ DW_TAG_variable ] [rho_i] [line 64] [local] [def]
-!355 = metadata !{metadata !"0x34\00u\00u\00\0067\001\001", null, metadata !300, metadata !306, null, null} ; [ DW_TAG_variable ] [u] [line 67] [local] [def]
-!356 = metadata !{metadata !"0x34\00ce\00ce\00\0036\001\001", null, metadata !300, metadata !357, null, null} ; [ DW_TAG_variable ] [ce] [line 36] [local] [def]
-!357 = metadata !{metadata !"0x1\00\000\004160\0064\000\000", null, null, metadata !20, metadata !358, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 4160, align 64, offset 0] [from double]
-!358 = metadata !{metadata !93, metadata !359}
-!359 = metadata !{metadata !"0x21\000\0013"} ; [ DW_TAG_subrange_type ] [0, 12]
-!360 = metadata !{metadata !"0x34\00dnzm1\00dnzm1\00\0044\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dnzm1] [line 44] [local] [def]
-!361 = metadata !{metadata !"0x34\00dnym1\00dnym1\00\0044\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dnym1] [line 44] [local] [def]
-!362 = metadata !{metadata !"0x34\00dnxm1\00dnxm1\00\0044\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dnxm1] [line 44] [local] [def]
-!363 = metadata !{metadata !"0x34\00zzcon1\00zzcon1\00\0042\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [zzcon1] [line 42] [local] [def]
-!364 = metadata !{metadata !"0x34\00yycon1\00yycon1\00\0040\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [yycon1] [line 40] [local] [def]
-!365 = metadata !{metadata !"0x34\00xxcon1\00xxcon1\00\0038\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [xxcon1] [line 38] [local] [def]
-!366 = metadata !{metadata !"0x34\00con16\00con16\00\0048\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [con16] [line 48] [local] [def]
-!367 = metadata !{metadata !"0x34\00c2iv\00c2iv\00\0048\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c2iv] [line 48] [local] [def]
-!368 = metadata !{metadata !"0x34\00c3c4tz3\00c3c4tz3\00\0048\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c3c4tz3] [line 48] [local] [def]
-!369 = metadata !{metadata !"0x34\00c3c4ty3\00c3c4ty3\00\0048\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c3c4ty3] [line 48] [local] [def]
-!370 = metadata !{metadata !"0x34\00c3c4tx3\00c3c4tx3\00\0048\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c3c4tx3] [line 48] [local] [def]
-!371 = metadata !{metadata !"0x34\00comz6\00comz6\00\0047\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [comz6] [line 47] [local] [def]
-!372 = metadata !{metadata !"0x34\00comz5\00comz5\00\0047\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [comz5] [line 47] [local] [def]
-!373 = metadata !{metadata !"0x34\00comz4\00comz4\00\0047\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [comz4] [line 47] [local] [def]
-!374 = metadata !{metadata !"0x34\00comz1\00comz1\00\0047\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [comz1] [line 47] [local] [def]
-!375 = metadata !{metadata !"0x34\00dtdssp\00dtdssp\00\0045\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dtdssp] [line 45] [local] [def]
-!376 = metadata !{metadata !"0x34\00c2dttz1\00c2dttz1\00\0047\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c2dttz1] [line 47] [local] [def]
-!377 = metadata !{metadata !"0x34\00c2dtty1\00c2dtty1\00\0047\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c2dtty1] [line 47] [local] [def]
-!378 = metadata !{metadata !"0x34\00c2dttx1\00c2dttx1\00\0047\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c2dttx1] [line 47] [local] [def]
-!379 = metadata !{metadata !"0x34\00dttz2\00dttz2\00\0046\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dttz2] [line 46] [local] [def]
-!380 = metadata !{metadata !"0x34\00dttz1\00dttz1\00\0046\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dttz1] [line 46] [local] [def]
-!381 = metadata !{metadata !"0x34\00dtty2\00dtty2\00\0046\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dtty2] [line 46] [local] [def]
-!382 = metadata !{metadata !"0x34\00dtty1\00dtty1\00\0046\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dtty1] [line 46] [local] [def]
-!383 = metadata !{metadata !"0x34\00dttx2\00dttx2\00\0046\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dttx2] [line 46] [local] [def]
-!384 = metadata !{metadata !"0x34\00dttx1\00dttx1\00\0046\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dttx1] [line 46] [local] [def]
-!385 = metadata !{metadata !"0x34\00c5dssp\00c5dssp\00\0045\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c5dssp] [line 45] [local] [def]
-!386 = metadata !{metadata !"0x34\00c4dssp\00c4dssp\00\0045\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c4dssp] [line 45] [local] [def]
-!387 = metadata !{metadata !"0x34\00dzmax\00dzmax\00\0037\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dzmax] [line 37] [local] [def]
-!388 = metadata !{metadata !"0x34\00dymax\00dymax\00\0037\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dymax] [line 37] [local] [def]
-!389 = metadata !{metadata !"0x34\00dxmax\00dxmax\00\0037\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dxmax] [line 37] [local] [def]
-!390 = metadata !{metadata !"0x34\00dz5\00dz5\00\0034\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dz5] [line 34] [local] [def]
-!391 = metadata !{metadata !"0x34\00dz4\00dz4\00\0034\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dz4] [line 34] [local] [def]
-!392 = metadata !{metadata !"0x34\00dz3\00dz3\00\0034\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dz3] [line 34] [local] [def]
-!393 = metadata !{metadata !"0x34\00dz2\00dz2\00\0034\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dz2] [line 34] [local] [def]
-!394 = metadata !{metadata !"0x34\00dz1\00dz1\00\0034\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dz1] [line 34] [local] [def]
-!395 = metadata !{metadata !"0x34\00dy5\00dy5\00\0033\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dy5] [line 33] [local] [def]
-!396 = metadata !{metadata !"0x34\00dy4\00dy4\00\0033\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dy4] [line 33] [local] [def]
-!397 = metadata !{metadata !"0x34\00dy3\00dy3\00\0033\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dy3] [line 33] [local] [def]
-!398 = metadata !{metadata !"0x34\00dy2\00dy2\00\0033\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dy2] [line 33] [local] [def]
-!399 = metadata !{metadata !"0x34\00dy1\00dy1\00\0033\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dy1] [line 33] [local] [def]
-!400 = metadata !{metadata !"0x34\00dx5\00dx5\00\0032\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dx5] [line 32] [local] [def]
-!401 = metadata !{metadata !"0x34\00dx4\00dx4\00\0032\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dx4] [line 32] [local] [def]
-!402 = metadata !{metadata !"0x34\00dx3\00dx3\00\0032\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dx3] [line 32] [local] [def]
-!403 = metadata !{metadata !"0x34\00dx2\00dx2\00\0032\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dx2] [line 32] [local] [def]
-!404 = metadata !{metadata !"0x34\00dx1\00dx1\00\0032\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [dx1] [line 32] [local] [def]
-!405 = metadata !{metadata !"0x34\00tz3\00tz3\00\0031\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [tz3] [line 31] [local] [def]
-!406 = metadata !{metadata !"0x34\00tz1\00tz1\00\0031\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [tz1] [line 31] [local] [def]
-!407 = metadata !{metadata !"0x34\00ty3\00ty3\00\0031\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [ty3] [line 31] [local] [def]
-!408 = metadata !{metadata !"0x34\00ty1\00ty1\00\0031\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [ty1] [line 31] [local] [def]
-!409 = metadata !{metadata !"0x34\00tx3\00tx3\00\0031\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [tx3] [line 31] [local] [def]
-!410 = metadata !{metadata !"0x34\00tx1\00tx1\00\0031\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [tx1] [line 31] [local] [def]
-!411 = metadata !{metadata !"0x34\00conz1\00conz1\00\0045\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [conz1] [line 45] [local] [def]
-!412 = metadata !{metadata !"0x34\00c1345\00c1345\00\0044\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c1345] [line 44] [local] [def]
-!413 = metadata !{metadata !"0x34\00c3c4\00c3c4\00\0044\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c3c4] [line 44] [local] [def]
-!414 = metadata !{metadata !"0x34\00c1c5\00c1c5\00\0044\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c1c5] [line 44] [local] [def]
-!415 = metadata !{metadata !"0x34\00c1c2\00c1c2\00\0044\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c1c2] [line 44] [local] [def]
-!416 = metadata !{metadata !"0x34\00c5\00c5\00\0045\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c5] [line 45] [local] [def]
-!417 = metadata !{metadata !"0x34\00c4\00c4\00\0045\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c4] [line 45] [local] [def]
-!418 = metadata !{metadata !"0x34\00c3\00c3\00\0045\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [c3] [line 45] [local] [def]
-!419 = metadata !{metadata !"0x34\00lhs\00lhs\00\0069\001\001", null, metadata !300, metadata !420, null, null} ; [ DW_TAG_variable ] [lhs] [line 69] [local] [def]
-!420 = metadata !{metadata !"0x1\00\000\0020787585600\0064\000\000", null, null, metadata !20, metadata !421, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 20787585600, align 64, offset 0] [from double]
-!421 = metadata !{metadata !308, metadata !308, metadata !308, metadata !178, metadata !93, metadata !93}
-!422 = metadata !{metadata !"0x34\00q\00q\00\0073\001\001", null, metadata !300, metadata !423, null, null} ; [ DW_TAG_variable ] [q] [line 73] [local] [def]
-!423 = metadata !{metadata !"0x1\00\000\0010368\0064\000\000", null, null, metadata !20, metadata !424, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 10368, align 64, offset 0] [from double]
-!424 = metadata !{metadata !425}
-!425 = metadata !{metadata !"0x21\000\00162"} ; [ DW_TAG_subrange_type ] [0, 161]
-!426 = metadata !{metadata !"0x34\00cuf\00cuf\00\0072\001\001", null, metadata !300, metadata !423, null, null} ; [ DW_TAG_variable ] [cuf] [line 72] [local] [def]
-!427 = metadata !{metadata !"0x34\00buf\00buf\00\0075\001\001", null, metadata !300, metadata !428, null, null} ; [ DW_TAG_variable ] [buf] [line 75] [local] [def]
-!428 = metadata !{metadata !"0x1\00\000\0051840\0064\000\000", null, null, metadata !20, metadata !429, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 51840, align 64, offset 0] [from double]
-!429 = metadata !{metadata !425, metadata !93}
-!430 = metadata !{metadata !"0x34\00ue\00ue\00\0074\001\001", null, metadata !300, metadata !428, null, null} ; [ DW_TAG_variable ] [ue] [line 74] [local] [def]
-!431 = metadata !{metadata !"0x34\00njac\00njac\00\0086\001\001", null, metadata !300, metadata !432, null, null} ; [ DW_TAG_variable ] [njac] [line 86] [local] [def]
-!432 = metadata !{metadata !"0x1\00\000\006886684800\0064\000\000", null, null, metadata !20, metadata !433, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 6886684800, align 64, offset 0] [from double]
-!433 = metadata !{metadata !308, metadata !308, metadata !425, metadata !93, metadata !93}
-!434 = metadata !{metadata !"0x34\00fjac\00fjac\00\0084\001\001", null, metadata !300, metadata !432, null, null} ; [ DW_TAG_variable ] [fjac] [line 84] [local] [def]
-!435 = metadata !{metadata !"0x34\00tmp3\00tmp3\00\0088\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [tmp3] [line 88] [local] [def]
-!436 = metadata !{metadata !"0x34\00tmp2\00tmp2\00\0088\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [tmp2] [line 88] [local] [def]
-!437 = metadata !{metadata !"0x34\00tmp1\00tmp1\00\0088\001\001", null, metadata !300, metadata !20, null, null} ; [ DW_TAG_variable ] [tmp1] [line 88] [local] [def]
-!438 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
-!439 = metadata !{i32 1898, i32 0, metadata !440, null}
-!440 = metadata !{metadata !"0xb\001898\000\00107", metadata !1, metadata !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!441 = metadata !{i32 1913, i32 0, metadata !442, null}
-!442 = metadata !{metadata !"0xb\001913\000\00115", metadata !1, metadata !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!443 = metadata !{i32 1923, i32 0, metadata !114, null}
-!444 = metadata !{metadata !"int", metadata !445}
-!445 = metadata !{metadata !"omnipotent char", metadata !446}
-!446 = metadata !{metadata !"Simple C/C++ TBAA"}
-!447 = metadata !{i32 1}
-!448 = metadata !{i32 1925, i32 0, metadata !449, null}
-!449 = metadata !{metadata !"0xb\001925\000\00121", metadata !1, metadata !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!450 = metadata !{i32 1939, i32 0, metadata !451, null}
-!451 = metadata !{metadata !"0xb\001939\000\00127", metadata !1, metadata !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!452 = metadata !{i32 1940, i32 0, metadata !453, null}
-!453 = metadata !{metadata !"0xb\001940\000\00129", metadata !1, metadata !454} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!454 = metadata !{metadata !"0xb\001939\000\00128", metadata !1, metadata !451} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!455 = metadata !{i32 1941, i32 0, metadata !456, null}
-!456 = metadata !{metadata !"0xb\001941\000\00131", metadata !1, metadata !457} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!457 = metadata !{metadata !"0xb\001940\000\00130", metadata !1, metadata !453} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!458 = metadata !{i32 2020, i32 0, metadata !459, null}
-!459 = metadata !{metadata !"0xb\002020\000\00149", metadata !1, metadata !460} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!460 = metadata !{metadata !"0xb\002019\000\00148", metadata !1, metadata !461} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!461 = metadata !{metadata !"0xb\002019\000\00147", metadata !1, metadata !462} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!462 = metadata !{metadata !"0xb\002018\000\00146", metadata !1, metadata !463} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!463 = metadata !{metadata !"0xb\002018\000\00145", metadata !1, metadata !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
-!464 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x11\0012\00clang version 3.4 (trunk 190311)\001\00\000\00\000", !1, !2, !2, !3, !298, !2} ; [ DW_TAG_compile_unit ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c] [DW_LANG_C99]
+!1 = !{!"bt.c", !"/home/hfinkel/src/NPB2.3-omp-C/BT"}
+!2 = !{}
+!3 = !{!4, !82, !102, !114, !132, !145, !154, !155, !162, !183, !200, !201, !207, !208, !215, !221, !230, !238, !246, !255, !260, !261, !268, !274, !279, !280, !287, !293}
+!4 = !{!"0x2e\00main\00main\00\0074\000\001\000\006\00256\001\0074", !1, !5, !6, null, null, null, null, !12} ; [ DW_TAG_subprogram ] [line 74] [def] [main]
+!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = !{!8, !8, !9}
+!8 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = !{!"0xf\00\000\0064\0064\000\000", null, null, !10} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!10 = !{!"0xf\00\000\0064\0064\000\000", null, null, !11} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from char]
+!11 = !{!"0x24\00char\000\008\008\000\000\008", null, null} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_unsigned_char]
+!12 = !{!13, !14, !15, !16, !17, !18, !19, !21, !22, !23, !25, !26}
+!13 = !{!"0x101\00argc\0016777290\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [argc] [line 74]
+!14 = !{!"0x101\00argv\0033554506\000", !4, !5, !9} ; [ DW_TAG_arg_variable ] [argv] [line 74]
+!15 = !{!"0x100\00niter\0076\000", !4, !5, !8} ; [ DW_TAG_auto_variable ] [niter] [line 76]
+!16 = !{!"0x100\00step\0076\000", !4, !5, !8} ; [ DW_TAG_auto_variable ] [step] [line 76]
+!17 = !{!"0x100\00n3\0076\000", !4, !5, !8} ; [ DW_TAG_auto_variable ] [n3] [line 76]
+!18 = !{!"0x100\00nthreads\0077\000", !4, !5, !8} ; [ DW_TAG_auto_variable ] [nthreads] [line 77]
+!19 = !{!"0x100\00navg\0078\000", !4, !5, !20} ; [ DW_TAG_auto_variable ] [navg] [line 78]
+!20 = !{!"0x24\00double\000\0064\0064\000\000\004", null, null} ; [ DW_TAG_base_type ] [double] [line 0, size 64, align 64, offset 0, enc DW_ATE_float]
+!21 = !{!"0x100\00mflops\0078\000", !4, !5, !20} ; [ DW_TAG_auto_variable ] [mflops] [line 78]
+!22 = !{!"0x100\00tmax\0080\000", !4, !5, !20} ; [ DW_TAG_auto_variable ] [tmax] [line 80]
+!23 = !{!"0x100\00verified\0081\000", !4, !5, !24} ; [ DW_TAG_auto_variable ] [verified] [line 81]
+!24 = !{!"0x16\00boolean\0012\000\000\000\000", !1, null, !8} ; [ DW_TAG_typedef ] [boolean] [line 12, size 0, align 0, offset 0] [from int]
+!25 = !{!"0x100\00class\0082\000", !4, !5, !11} ; [ DW_TAG_auto_variable ] [class] [line 82]
+!26 = !{!"0x100\00fp\0083\000", !4, !5, !27} ; [ DW_TAG_auto_variable ] [fp] [line 83]
+!27 = !{!"0xf\00\000\0064\0064\000\000", null, null, !28} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from FILE]
+!28 = !{!"0x16\00FILE\0049\000\000\000\000", !1, null, !29} ; [ DW_TAG_typedef ] [FILE] [line 49, size 0, align 0, offset 0] [from _IO_FILE]
+!29 = !{!"0x13\00_IO_FILE\00271\001728\0064\000\000\000", !30, null, null, !31, null, null, null} ; [ DW_TAG_structure_type ] [_IO_FILE] [line 271, size 1728, align 64, offset 0] [def] [from ]
+!30 = !{!"/usr/include/libio.h", !"/home/hfinkel/src/NPB2.3-omp-C/BT"}
+!31 = !{!32, !33, !34, !35, !36, !37, !38, !39, !40, !41, !42, !43, !44, !52, !53, !54, !55, !58, !60, !62, !66, !68, !70, !71, !72, !73, !74, !77, !78}
+!32 = !{!"0xd\00_flags\00272\0032\0032\000\000", !30, !29, !8} ; [ DW_TAG_member ] [_flags] [line 272, size 32, align 32, offset 0] [from int]
+!33 = !{!"0xd\00_IO_read_ptr\00277\0064\0064\0064\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_read_ptr] [line 277, size 64, align 64, offset 64] [from ]
+!34 = !{!"0xd\00_IO_read_end\00278\0064\0064\00128\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_read_end] [line 278, size 64, align 64, offset 128] [from ]
+!35 = !{!"0xd\00_IO_read_base\00279\0064\0064\00192\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_read_base] [line 279, size 64, align 64, offset 192] [from ]
+!36 = !{!"0xd\00_IO_write_base\00280\0064\0064\00256\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_write_base] [line 280, size 64, align 64, offset 256] [from ]
+!37 = !{!"0xd\00_IO_write_ptr\00281\0064\0064\00320\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_write_ptr] [line 281, size 64, align 64, offset 320] [from ]
+!38 = !{!"0xd\00_IO_write_end\00282\0064\0064\00384\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_write_end] [line 282, size 64, align 64, offset 384] [from ]
+!39 = !{!"0xd\00_IO_buf_base\00283\0064\0064\00448\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_buf_base] [line 283, size 64, align 64, offset 448] [from ]
+!40 = !{!"0xd\00_IO_buf_end\00284\0064\0064\00512\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_buf_end] [line 284, size 64, align 64, offset 512] [from ]
+!41 = !{!"0xd\00_IO_save_base\00286\0064\0064\00576\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_save_base] [line 286, size 64, align 64, offset 576] [from ]
+!42 = !{!"0xd\00_IO_backup_base\00287\0064\0064\00640\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_backup_base] [line 287, size 64, align 64, offset 640] [from ]
+!43 = !{!"0xd\00_IO_save_end\00288\0064\0064\00704\000", !30, !29, !10} ; [ DW_TAG_member ] [_IO_save_end] [line 288, size 64, align 64, offset 704] [from ]
+!44 = !{!"0xd\00_markers\00290\0064\0064\00768\000", !30, !29, !45} ; [ DW_TAG_member ] [_markers] [line 290, size 64, align 64, offset 768] [from ]
+!45 = !{!"0xf\00\000\0064\0064\000\000", null, null, !46} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _IO_marker]
+!46 = !{!"0x13\00_IO_marker\00186\00192\0064\000\000\000", !30, null, null, !47, null, null, null} ; [ DW_TAG_structure_type ] [_IO_marker] [line 186, size 192, align 64, offset 0] [def] [from ]
+!47 = !{!48, !49, !51}
+!48 = !{!"0xd\00_next\00187\0064\0064\000\000", !30, !46, !45} ; [ DW_TAG_member ] [_next] [line 187, size 64, align 64, offset 0] [from ]
+!49 = !{!"0xd\00_sbuf\00188\0064\0064\0064\000", !30, !46, !50} ; [ DW_TAG_member ] [_sbuf] [line 188, size 64, align 64, offset 64] [from ]
+!50 = !{!"0xf\00\000\0064\0064\000\000", null, null, !29} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _IO_FILE]
+!51 = !{!"0xd\00_pos\00192\0032\0032\00128\000", !30, !46, !8} ; [ DW_TAG_member ] [_pos] [line 192, size 32, align 32, offset 128] [from int]
+!52 = !{!"0xd\00_chain\00292\0064\0064\00832\000", !30, !29, !50} ; [ DW_TAG_member ] [_chain] [line 292, size 64, align 64, offset 832] [from ]
+!53 = !{!"0xd\00_fileno\00294\0032\0032\00896\000", !30, !29, !8} ; [ DW_TAG_member ] [_fileno] [line 294, size 32, align 32, offset 896] [from int]
+!54 = !{!"0xd\00_flags2\00298\0032\0032\00928\000", !30, !29, !8} ; [ DW_TAG_member ] [_flags2] [line 298, size 32, align 32, offset 928] [from int]
+!55 = !{!"0xd\00_old_offset\00300\0064\0064\00960\000", !30, !29, !56} ; [ DW_TAG_member ] [_old_offset] [line 300, size 64, align 64, offset 960] [from __off_t]
+!56 = !{!"0x16\00__off_t\00141\000\000\000\000", !30, null, !57} ; [ DW_TAG_typedef ] [__off_t] [line 141, size 0, align 0, offset 0] [from long int]
+!57 = !{!"0x24\00long int\000\0064\0064\000\000\005", null, null} ; [ DW_TAG_base_type ] [long int] [line 0, size 64, align 64, offset 0, enc DW_ATE_signed]
+!58 = !{!"0xd\00_cur_column\00304\0016\0016\001024\000", !30, !29, !59} ; [ DW_TAG_member ] [_cur_column] [line 304, size 16, align 16, offset 1024] [from unsigned short]
+!59 = !{!"0x24\00unsigned short\000\0016\0016\000\000\007", null, null} ; [ DW_TAG_base_type ] [unsigned short] [line 0, size 16, align 16, offset 0, enc DW_ATE_unsigned]
+!60 = !{!"0xd\00_vtable_offset\00305\008\008\001040\000", !30, !29, !61} ; [ DW_TAG_member ] [_vtable_offset] [line 305, size 8, align 8, offset 1040] [from signed char]
+!61 = !{!"0x24\00signed char\000\008\008\000\000\006", null, null} ; [ DW_TAG_base_type ] [signed char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
+!62 = !{!"0xd\00_shortbuf\00306\008\008\001048\000", !30, !29, !63} ; [ DW_TAG_member ] [_shortbuf] [line 306, size 8, align 8, offset 1048] [from ]
+!63 = !{!"0x1\00\000\008\008\000\000", null, null, !11, !64, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 8, align 8, offset 0] [from char]
+!64 = !{!65}
+!65 = !{!"0x21\000\001"} ; [ DW_TAG_subrange_type ] [0, 0]
+!66 = !{!"0xd\00_lock\00310\0064\0064\001088\000", !30, !29, !67} ; [ DW_TAG_member ] [_lock] [line 310, size 64, align 64, offset 1088] [from ]
+!67 = !{!"0xf\00\000\0064\0064\000\000", null, null, null} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!68 = !{!"0xd\00_offset\00319\0064\0064\001152\000", !30, !29, !69} ; [ DW_TAG_member ] [_offset] [line 319, size 64, align 64, offset 1152] [from __off64_t]
+!69 = !{!"0x16\00__off64_t\00142\000\000\000\000", !30, null, !57} ; [ DW_TAG_typedef ] [__off64_t] [line 142, size 0, align 0, offset 0] [from long int]
+!70 = !{!"0xd\00__pad1\00328\0064\0064\001216\000", !30, !29, !67} ; [ DW_TAG_member ] [__pad1] [line 328, size 64, align 64, offset 1216] [from ]
+!71 = !{!"0xd\00__pad2\00329\0064\0064\001280\000", !30, !29, !67} ; [ DW_TAG_member ] [__pad2] [line 329, size 64, align 64, offset 1280] [from ]
+!72 = !{!"0xd\00__pad3\00330\0064\0064\001344\000", !30, !29, !67} ; [ DW_TAG_member ] [__pad3] [line 330, size 64, align 64, offset 1344] [from ]
+!73 = !{!"0xd\00__pad4\00331\0064\0064\001408\000", !30, !29, !67} ; [ DW_TAG_member ] [__pad4] [line 331, size 64, align 64, offset 1408] [from ]
+!74 = !{!"0xd\00__pad5\00332\0064\0064\001472\000", !30, !29, !75} ; [ DW_TAG_member ] [__pad5] [line 332, size 64, align 64, offset 1472] [from size_t]
+!75 = !{!"0x16\00size_t\0042\000\000\000\000", !30, null, !76} ; [ DW_TAG_typedef ] [size_t] [line 42, size 0, align 0, offset 0] [from long unsigned int]
+!76 = !{!"0x24\00long unsigned int\000\0064\0064\000\000\007", null, null} ; [ DW_TAG_base_type ] [long unsigned int] [line 0, size 64, align 64, offset 0, enc DW_ATE_unsigned]
+!77 = !{!"0xd\00_mode\00334\0032\0032\001536\000", !30, !29, !8} ; [ DW_TAG_member ] [_mode] [line 334, size 32, align 32, offset 1536] [from int]
+!78 = !{!"0xd\00_unused2\00336\00160\008\001568\000", !30, !29, !79} ; [ DW_TAG_member ] [_unused2] [line 336, size 160, align 8, offset 1568] [from ]
+!79 = !{!"0x1\00\000\00160\008\000\000", null, null, !11, !80, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 160, align 8, offset 0] [from char]
+!80 = !{!81}
+!81 = !{!"0x21\000\0020"} ; [ DW_TAG_subrange_type ] [0, 19]
+!82 = !{!"0x2e\00verify\00verify\00\002388\001\001\000\006\00256\001\002388", !1, !5, !83, null, null, null, null, !86} ; [ DW_TAG_subprogram ] [line 2388] [local] [def] [verify]
+!83 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !84, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!84 = !{null, !8, !10, !85}
+!85 = !{!"0xf\00\000\0064\0064\000\000", null, null, !24} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from boolean]
+!86 = !{!87, !88, !89, !90, !94, !95, !96, !97, !98, !99, !100, !101}
+!87 = !{!"0x101\00no_time_steps\0016779604\000", !82, !5, !8} ; [ DW_TAG_arg_variable ] [no_time_steps] [line 2388]
+!88 = !{!"0x101\00class\0033556820\000", !82, !5, !10} ; [ DW_TAG_arg_variable ] [class] [line 2388]
+!89 = !{!"0x101\00verified\0050334036\000", !82, !5, !85} ; [ DW_TAG_arg_variable ] [verified] [line 2388]
+!90 = !{!"0x100\00xcrref\002397\000", !82, !5, !91} ; [ DW_TAG_auto_variable ] [xcrref] [line 2397]
+!91 = !{!"0x1\00\000\00320\0064\000\000", null, null, !20, !92, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 320, align 64, offset 0] [from double]
+!92 = !{!93}
+!93 = !{!"0x21\000\005"} ; [ DW_TAG_subrange_type ] [0, 4]
+!94 = !{!"0x100\00xceref\002397\000", !82, !5, !91} ; [ DW_TAG_auto_variable ] [xceref] [line 2397]
+!95 = !{!"0x100\00xcrdif\002397\000", !82, !5, !91} ; [ DW_TAG_auto_variable ] [xcrdif] [line 2397]
+!96 = !{!"0x100\00xcedif\002397\000", !82, !5, !91} ; [ DW_TAG_auto_variable ] [xcedif] [line 2397]
+!97 = !{!"0x100\00epsilon\002398\000", !82, !5, !20} ; [ DW_TAG_auto_variable ] [epsilon] [line 2398]
+!98 = !{!"0x100\00xce\002398\000", !82, !5, !91} ; [ DW_TAG_auto_variable ] [xce] [line 2398]
+!99 = !{!"0x100\00xcr\002398\000", !82, !5, !91} ; [ DW_TAG_auto_variable ] [xcr] [line 2398]
+!100 = !{!"0x100\00dtref\002398\000", !82, !5, !20} ; [ DW_TAG_auto_variable ] [dtref] [line 2398]
+!101 = !{!"0x100\00m\002399\000", !82, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 2399]
+!102 = !{!"0x2e\00rhs_norm\00rhs_norm\00\00266\001\001\000\006\00256\001\00266", !1, !5, !103, null, null, null, null, !106} ; [ DW_TAG_subprogram ] [line 266] [local] [def] [rhs_norm]
+!103 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !104, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!104 = !{null, !105}
+!105 = !{!"0xf\00\000\0064\0064\000\000", null, null, !20} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from double]
+!106 = !{!107, !108, !109, !110, !111, !112, !113}
+!107 = !{!"0x101\00rms\0016777482\000", !102, !5, !105} ; [ DW_TAG_arg_variable ] [rms] [line 266]
+!108 = !{!"0x100\00i\00271\000", !102, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 271]
+!109 = !{!"0x100\00j\00271\000", !102, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 271]
+!110 = !{!"0x100\00k\00271\000", !102, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 271]
+!111 = !{!"0x100\00d\00271\000", !102, !5, !8} ; [ DW_TAG_auto_variable ] [d] [line 271]
+!112 = !{!"0x100\00m\00271\000", !102, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 271]
+!113 = !{!"0x100\00add\00272\000", !102, !5, !20} ; [ DW_TAG_auto_variable ] [add] [line 272]
+!114 = !{!"0x2e\00compute_rhs\00compute_rhs\00\001767\001\001\000\006\00256\001\001767", !1, !5, !115, null, void ()* @compute_rhs, null, null, !117} ; [ DW_TAG_subprogram ] [line 1767] [local] [def] [compute_rhs]
+!115 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !116, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!116 = !{null}
+!117 = !{!118, !119, !120, !121, !122, !123, !124, !125, !126, !127, !128, !129, !130, !131}
+!118 = !{!"0x100\00i\001769\000", !114, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 1769]
+!119 = !{!"0x100\00j\001769\000", !114, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 1769]
+!120 = !{!"0x100\00k\001769\000", !114, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 1769]
+!121 = !{!"0x100\00m\001769\000", !114, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 1769]
+!122 = !{!"0x100\00rho_inv\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [rho_inv] [line 1770]
+!123 = !{!"0x100\00uijk\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [uijk] [line 1770]
+!124 = !{!"0x100\00up1\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [up1] [line 1770]
+!125 = !{!"0x100\00um1\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [um1] [line 1770]
+!126 = !{!"0x100\00vijk\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [vijk] [line 1770]
+!127 = !{!"0x100\00vp1\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [vp1] [line 1770]
+!128 = !{!"0x100\00vm1\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [vm1] [line 1770]
+!129 = !{!"0x100\00wijk\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [wijk] [line 1770]
+!130 = !{!"0x100\00wp1\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [wp1] [line 1770]
+!131 = !{!"0x100\00wm1\001770\000", !114, !5, !20} ; [ DW_TAG_auto_variable ] [wm1] [line 1770]
+!132 = !{!"0x2e\00error_norm\00error_norm\00\00225\001\001\000\006\00256\001\00225", !1, !5, !103, null, null, null, null, !133} ; [ DW_TAG_subprogram ] [line 225] [local] [def] [error_norm]
+!133 = !{!134, !135, !136, !137, !138, !139, !140, !141, !142, !143, !144}
+!134 = !{!"0x101\00rms\0016777441\000", !132, !5, !105} ; [ DW_TAG_arg_variable ] [rms] [line 225]
+!135 = !{!"0x100\00i\00232\000", !132, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 232]
+!136 = !{!"0x100\00j\00232\000", !132, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 232]
+!137 = !{!"0x100\00k\00232\000", !132, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 232]
+!138 = !{!"0x100\00m\00232\000", !132, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 232]
+!139 = !{!"0x100\00d\00232\000", !132, !5, !8} ; [ DW_TAG_auto_variable ] [d] [line 232]
+!140 = !{!"0x100\00xi\00233\000", !132, !5, !20} ; [ DW_TAG_auto_variable ] [xi] [line 233]
+!141 = !{!"0x100\00eta\00233\000", !132, !5, !20} ; [ DW_TAG_auto_variable ] [eta] [line 233]
+!142 = !{!"0x100\00zeta\00233\000", !132, !5, !20} ; [ DW_TAG_auto_variable ] [zeta] [line 233]
+!143 = !{!"0x100\00u_exact\00233\000", !132, !5, !91} ; [ DW_TAG_auto_variable ] [u_exact] [line 233]
+!144 = !{!"0x100\00add\00233\000", !132, !5, !20} ; [ DW_TAG_auto_variable ] [add] [line 233]
+!145 = !{!"0x2e\00exact_solution\00exact_solution\00\00643\001\001\000\006\00256\001\00644", !1, !5, !146, null, null, null, null, !148} ; [ DW_TAG_subprogram ] [line 643] [local] [def] [scope 644] [exact_solution]
+!146 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !147, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!147 = !{null, !20, !20, !20, !105}
+!148 = !{!149, !150, !151, !152, !153}
+!149 = !{!"0x101\00xi\0016777859\000", !145, !5, !20} ; [ DW_TAG_arg_variable ] [xi] [line 643]
+!150 = !{!"0x101\00eta\0033555075\000", !145, !5, !20} ; [ DW_TAG_arg_variable ] [eta] [line 643]
+!151 = !{!"0x101\00zeta\0050332291\000", !145, !5, !20} ; [ DW_TAG_arg_variable ] [zeta] [line 643]
+!152 = !{!"0x101\00dtemp\0067109508\000", !145, !5, !105} ; [ DW_TAG_arg_variable ] [dtemp] [line 644]
+!153 = !{!"0x100\00m\00653\000", !145, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 653]
+!154 = !{!"0x2e\00set_constants\00set_constants\00\002191\001\001\000\006\00256\001\002191", !1, !5, !115, null, null, null, null, !2} ; [ DW_TAG_subprogram ] [line 2191] [local] [def] [set_constants]
+!155 = !{!"0x2e\00lhsinit\00lhsinit\00\00855\001\001\000\006\00256\001\00855", !1, !5, !115, null, null, null, null, !156} ; [ DW_TAG_subprogram ] [line 855] [local] [def] [lhsinit]
+!156 = !{!157, !158, !159, !160, !161}
+!157 = !{!"0x100\00i\00857\000", !155, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 857]
+!158 = !{!"0x100\00j\00857\000", !155, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 857]
+!159 = !{!"0x100\00k\00857\000", !155, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 857]
+!160 = !{!"0x100\00m\00857\000", !155, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 857]
+!161 = !{!"0x100\00n\00857\000", !155, !5, !8} ; [ DW_TAG_auto_variable ] [n] [line 857]
+!162 = !{!"0x2e\00initialize\00initialize\00\00669\001\001\000\006\00256\001\00669", !1, !5, !115, null, null, null, null, !163} ; [ DW_TAG_subprogram ] [line 669] [local] [def] [initialize]
+!163 = !{!164, !165, !166, !167, !168, !169, !170, !171, !172, !173, !174, !179, !180, !181, !182}
+!164 = !{!"0x100\00i\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 679]
+!165 = !{!"0x100\00j\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 679]
+!166 = !{!"0x100\00k\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 679]
+!167 = !{!"0x100\00m\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 679]
+!168 = !{!"0x100\00ix\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [ix] [line 679]
+!169 = !{!"0x100\00iy\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [iy] [line 679]
+!170 = !{!"0x100\00iz\00679\000", !162, !5, !8} ; [ DW_TAG_auto_variable ] [iz] [line 679]
+!171 = !{!"0x100\00xi\00680\000", !162, !5, !20} ; [ DW_TAG_auto_variable ] [xi] [line 680]
+!172 = !{!"0x100\00eta\00680\000", !162, !5, !20} ; [ DW_TAG_auto_variable ] [eta] [line 680]
+!173 = !{!"0x100\00zeta\00680\000", !162, !5, !20} ; [ DW_TAG_auto_variable ] [zeta] [line 680]
+!174 = !{!"0x100\00Pface\00680\000", !162, !5, !175} ; [ DW_TAG_auto_variable ] [Pface] [line 680]
+!175 = !{!"0x1\00\000\001920\0064\000\000", null, null, !20, !176, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 1920, align 64, offset 0] [from double]
+!176 = !{!177, !178, !93}
+!177 = !{!"0x21\000\002"} ; [ DW_TAG_subrange_type ] [0, 1]
+!178 = !{!"0x21\000\003"} ; [ DW_TAG_subrange_type ] [0, 2]
+!179 = !{!"0x100\00Pxi\00680\000", !162, !5, !20} ; [ DW_TAG_auto_variable ] [Pxi] [line 680]
+!180 = !{!"0x100\00Peta\00680\000", !162, !5, !20} ; [ DW_TAG_auto_variable ] [Peta] [line 680]
+!181 = !{!"0x100\00Pzeta\00680\000", !162, !5, !20} ; [ DW_TAG_auto_variable ] [Pzeta] [line 680]
+!182 = !{!"0x100\00temp\00680\000", !162, !5, !91} ; [ DW_TAG_auto_variable ] [temp] [line 680]
+!183 = !{!"0x2e\00exact_rhs\00exact_rhs\00\00301\001\001\000\006\00256\001\00301", !1, !5, !115, null, null, null, null, !184} ; [ DW_TAG_subprogram ] [line 301] [local] [def] [exact_rhs]
+!184 = !{!185, !186, !187, !188, !189, !190, !191, !192, !193, !194, !195, !196, !197, !198, !199}
+!185 = !{!"0x100\00dtemp\00310\000", !183, !5, !91} ; [ DW_TAG_auto_variable ] [dtemp] [line 310]
+!186 = !{!"0x100\00xi\00310\000", !183, !5, !20} ; [ DW_TAG_auto_variable ] [xi] [line 310]
+!187 = !{!"0x100\00eta\00310\000", !183, !5, !20} ; [ DW_TAG_auto_variable ] [eta] [line 310]
+!188 = !{!"0x100\00zeta\00310\000", !183, !5, !20} ; [ DW_TAG_auto_variable ] [zeta] [line 310]
+!189 = !{!"0x100\00dtpp\00310\000", !183, !5, !20} ; [ DW_TAG_auto_variable ] [dtpp] [line 310]
+!190 = !{!"0x100\00m\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 311]
+!191 = !{!"0x100\00i\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 311]
+!192 = !{!"0x100\00j\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 311]
+!193 = !{!"0x100\00k\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 311]
+!194 = !{!"0x100\00ip1\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [ip1] [line 311]
+!195 = !{!"0x100\00im1\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [im1] [line 311]
+!196 = !{!"0x100\00jp1\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [jp1] [line 311]
+!197 = !{!"0x100\00jm1\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [jm1] [line 311]
+!198 = !{!"0x100\00km1\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [km1] [line 311]
+!199 = !{!"0x100\00kp1\00311\000", !183, !5, !8} ; [ DW_TAG_auto_variable ] [kp1] [line 311]
+!200 = !{!"0x2e\00adi\00adi\00\00210\001\001\000\006\00256\001\00210", !1, !5, !115, null, null, null, null, !2} ; [ DW_TAG_subprogram ] [line 210] [local] [def] [adi]
+!201 = !{!"0x2e\00add\00add\00\00187\001\001\000\006\00256\001\00187", !1, !5, !115, null, null, null, null, !202} ; [ DW_TAG_subprogram ] [line 187] [local] [def] [add]
+!202 = !{!203, !204, !205, !206}
+!203 = !{!"0x100\00i\00193\000", !201, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 193]
+!204 = !{!"0x100\00j\00193\000", !201, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 193]
+!205 = !{!"0x100\00k\00193\000", !201, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 193]
+!206 = !{!"0x100\00m\00193\000", !201, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 193]
+!207 = !{!"0x2e\00z_solve\00z_solve\00\003457\001\001\000\006\00256\001\003457", !1, !5, !115, null, null, null, null, !2} ; [ DW_TAG_subprogram ] [line 3457] [local] [def] [z_solve]
+!208 = !{!"0x2e\00z_backsubstitute\00z_backsubstitute\00\003480\001\001\000\006\00256\001\003480", !1, !5, !115, null, null, null, null, !209} ; [ DW_TAG_subprogram ] [line 3480] [local] [def] [z_backsubstitute]
+!209 = !{!210, !211, !212, !213, !214}
+!210 = !{!"0x100\00i\003492\000", !208, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 3492]
+!211 = !{!"0x100\00j\003492\000", !208, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 3492]
+!212 = !{!"0x100\00k\003492\000", !208, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 3492]
+!213 = !{!"0x100\00m\003492\000", !208, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 3492]
+!214 = !{!"0x100\00n\003492\000", !208, !5, !8} ; [ DW_TAG_auto_variable ] [n] [line 3492]
+!215 = !{!"0x2e\00z_solve_cell\00z_solve_cell\00\003512\001\001\000\006\00256\001\003512", !1, !5, !115, null, null, null, null, !216} ; [ DW_TAG_subprogram ] [line 3512] [local] [def] [z_solve_cell]
+!216 = !{!217, !218, !219, !220}
+!217 = !{!"0x100\00i\003527\000", !215, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 3527]
+!218 = !{!"0x100\00j\003527\000", !215, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 3527]
+!219 = !{!"0x100\00k\003527\000", !215, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 3527]
+!220 = !{!"0x100\00ksize\003527\000", !215, !5, !8} ; [ DW_TAG_auto_variable ] [ksize] [line 3527]
+!221 = !{!"0x2e\00binvrhs\00binvrhs\00\003154\001\001\000\006\00256\001\003154", !1, !5, !222, null, null, null, null, !225} ; [ DW_TAG_subprogram ] [line 3154] [local] [def] [binvrhs]
+!222 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !223, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!223 = !{null, !224, !105}
+!224 = !{!"0xf\00\000\0064\0064\000\000", null, null, !91} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!225 = !{!226, !227, !228, !229}
+!226 = !{!"0x101\00lhs\0016780370\000", !221, !5, !224} ; [ DW_TAG_arg_variable ] [lhs] [line 3154]
+!227 = !{!"0x101\00r\0033557586\000", !221, !5, !105} ; [ DW_TAG_arg_variable ] [r] [line 3154]
+!228 = !{!"0x100\00pivot\003159\000", !221, !5, !20} ; [ DW_TAG_auto_variable ] [pivot] [line 3159]
+!229 = !{!"0x100\00coeff\003159\000", !221, !5, !20} ; [ DW_TAG_auto_variable ] [coeff] [line 3159]
+!230 = !{!"0x2e\00matmul_sub\00matmul_sub\00\002841\001\001\000\006\00256\001\002842", !1, !5, !231, null, null, null, null, !233} ; [ DW_TAG_subprogram ] [line 2841] [local] [def] [scope 2842] [matmul_sub]
+!231 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !232, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!232 = !{null, !224, !224, !224}
+!233 = !{!234, !235, !236, !237}
+!234 = !{!"0x101\00ablock\0016780057\000", !230, !5, !224} ; [ DW_TAG_arg_variable ] [ablock] [line 2841]
+!235 = !{!"0x101\00bblock\0033557273\000", !230, !5, !224} ; [ DW_TAG_arg_variable ] [bblock] [line 2841]
+!236 = !{!"0x101\00cblock\0050334490\000", !230, !5, !224} ; [ DW_TAG_arg_variable ] [cblock] [line 2842]
+!237 = !{!"0x100\00j\002851\000", !230, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 2851]
+!238 = !{!"0x2e\00matvec_sub\00matvec_sub\00\002814\001\001\000\006\00256\001\002814", !1, !5, !239, null, null, null, null, !241} ; [ DW_TAG_subprogram ] [line 2814] [local] [def] [matvec_sub]
+!239 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !240, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!240 = !{null, !224, !105, !105}
+!241 = !{!242, !243, !244, !245}
+!242 = !{!"0x101\00ablock\0016780030\000", !238, !5, !224} ; [ DW_TAG_arg_variable ] [ablock] [line 2814]
+!243 = !{!"0x101\00avec\0033557246\000", !238, !5, !105} ; [ DW_TAG_arg_variable ] [avec] [line 2814]
+!244 = !{!"0x101\00bvec\0050334462\000", !238, !5, !105} ; [ DW_TAG_arg_variable ] [bvec] [line 2814]
+!245 = !{!"0x100\00i\002823\000", !238, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 2823]
+!246 = !{!"0x2e\00binvcrhs\00binvcrhs\00\002885\001\001\000\006\00256\001\002885", !1, !5, !247, null, null, null, null, !249} ; [ DW_TAG_subprogram ] [line 2885] [local] [def] [binvcrhs]
+!247 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !248, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!248 = !{null, !224, !224, !105}
+!249 = !{!250, !251, !252, !253, !254}
+!250 = !{!"0x101\00lhs\0016780101\000", !246, !5, !224} ; [ DW_TAG_arg_variable ] [lhs] [line 2885]
+!251 = !{!"0x101\00c\0033557317\000", !246, !5, !224} ; [ DW_TAG_arg_variable ] [c] [line 2885]
+!252 = !{!"0x101\00r\0050334533\000", !246, !5, !105} ; [ DW_TAG_arg_variable ] [r] [line 2885]
+!253 = !{!"0x100\00pivot\002890\000", !246, !5, !20} ; [ DW_TAG_auto_variable ] [pivot] [line 2890]
+!254 = !{!"0x100\00coeff\002890\000", !246, !5, !20} ; [ DW_TAG_auto_variable ] [coeff] [line 2890]
+!255 = !{!"0x2e\00lhsz\00lhsz\00\001475\001\001\000\006\00256\001\001475", !1, !5, !115, null, null, null, null, !256} ; [ DW_TAG_subprogram ] [line 1475] [local] [def] [lhsz]
+!256 = !{!257, !258, !259}
+!257 = !{!"0x100\00i\001484\000", !255, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 1484]
+!258 = !{!"0x100\00j\001484\000", !255, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 1484]
+!259 = !{!"0x100\00k\001484\000", !255, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 1484]
+!260 = !{!"0x2e\00y_solve\00y_solve\00\003299\001\001\000\006\00256\001\003299", !1, !5, !115, null, null, null, null, !2} ; [ DW_TAG_subprogram ] [line 3299] [local] [def] [y_solve]
+!261 = !{!"0x2e\00y_backsubstitute\00y_backsubstitute\00\003323\001\001\000\006\00256\001\003323", !1, !5, !115, null, null, null, null, !262} ; [ DW_TAG_subprogram ] [line 3323] [local] [def] [y_backsubstitute]
+!262 = !{!263, !264, !265, !266, !267}
+!263 = !{!"0x100\00i\003335\000", !261, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 3335]
+!264 = !{!"0x100\00j\003335\000", !261, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 3335]
+!265 = !{!"0x100\00k\003335\000", !261, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 3335]
+!266 = !{!"0x100\00m\003335\000", !261, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 3335]
+!267 = !{!"0x100\00n\003335\000", !261, !5, !8} ; [ DW_TAG_auto_variable ] [n] [line 3335]
+!268 = !{!"0x2e\00y_solve_cell\00y_solve_cell\00\003355\001\001\000\006\00256\001\003355", !1, !5, !115, null, null, null, null, !269} ; [ DW_TAG_subprogram ] [line 3355] [local] [def] [y_solve_cell]
+!269 = !{!270, !271, !272, !273}
+!270 = !{!"0x100\00i\003370\000", !268, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 3370]
+!271 = !{!"0x100\00j\003370\000", !268, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 3370]
+!272 = !{!"0x100\00k\003370\000", !268, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 3370]
+!273 = !{!"0x100\00jsize\003370\000", !268, !5, !8} ; [ DW_TAG_auto_variable ] [jsize] [line 3370]
+!274 = !{!"0x2e\00lhsy\00lhsy\00\001181\001\001\000\006\00256\001\001181", !1, !5, !115, null, null, null, null, !275} ; [ DW_TAG_subprogram ] [line 1181] [local] [def] [lhsy]
+!275 = !{!276, !277, !278}
+!276 = !{!"0x100\00i\001190\000", !274, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 1190]
+!277 = !{!"0x100\00j\001190\000", !274, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 1190]
+!278 = !{!"0x100\00k\001190\000", !274, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 1190]
+!279 = !{!"0x2e\00x_solve\00x_solve\00\002658\001\001\000\006\00256\001\002658", !1, !5, !115, null, null, null, null, !2} ; [ DW_TAG_subprogram ] [line 2658] [local] [def] [x_solve]
+!280 = !{!"0x2e\00x_backsubstitute\00x_backsubstitute\00\002684\001\001\000\006\00256\001\002684", !1, !5, !115, null, null, null, null, !281} ; [ DW_TAG_subprogram ] [line 2684] [local] [def] [x_backsubstitute]
+!281 = !{!282, !283, !284, !285, !286}
+!282 = !{!"0x100\00i\002696\000", !280, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 2696]
+!283 = !{!"0x100\00j\002696\000", !280, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 2696]
+!284 = !{!"0x100\00k\002696\000", !280, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 2696]
+!285 = !{!"0x100\00m\002696\000", !280, !5, !8} ; [ DW_TAG_auto_variable ] [m] [line 2696]
+!286 = !{!"0x100\00n\002696\000", !280, !5, !8} ; [ DW_TAG_auto_variable ] [n] [line 2696]
+!287 = !{!"0x2e\00x_solve_cell\00x_solve_cell\00\002716\001\001\000\006\00256\001\002716", !1, !5, !115, null, null, null, null, !288} ; [ DW_TAG_subprogram ] [line 2716] [local] [def] [x_solve_cell]
+!288 = !{!289, !290, !291, !292}
+!289 = !{!"0x100\00i\002728\000", !287, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 2728]
+!290 = !{!"0x100\00j\002728\000", !287, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 2728]
+!291 = !{!"0x100\00k\002728\000", !287, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 2728]
+!292 = !{!"0x100\00isize\002728\000", !287, !5, !8} ; [ DW_TAG_auto_variable ] [isize] [line 2728]
+!293 = !{!"0x2e\00lhsx\00lhsx\00\00898\001\001\000\006\00256\001\00898", !1, !5, !115, null, null, null, null, !294} ; [ DW_TAG_subprogram ] [line 898] [local] [def] [lhsx]
+!294 = !{!295, !296, !297}
+!295 = !{!"0x100\00i\00907\000", !293, !5, !8} ; [ DW_TAG_auto_variable ] [i] [line 907]
+!296 = !{!"0x100\00j\00907\000", !293, !5, !8} ; [ DW_TAG_auto_variable ] [j] [line 907]
+!297 = !{!"0x100\00k\00907\000", !293, !5, !8} ; [ DW_TAG_auto_variable ] [k] [line 907]
+!298 = !{!299, !304, !305, !309, !310, !311, !312, !313, !314, !315, !316, !317, !318, !319, !320, !321, !322, !323, !324, !325, !326, !327, !328, !329, !330, !331, !332, !333, !334, !335, !336, !337, !338, !339, !340, !341, !342, !343, !347, !350, !351, !352, !353, !354, !355, !356, !360, !361, !362, !363, !364, !365, !366, !367, !368, !369, !370, !371, !372, !373, !374, !375, !376, !377, !378, !379, !380, !381, !382, !383, !384, !385, !386, !387, !388, !389, !390, !391, !392, !393, !394, !395, !396, !397, !398, !399, !400, !401, !402, !403, !404, !405, !406, !407, !408, !409, !410, !411, !412, !413, !414, !415, !416, !417, !418, !419, !422, !426, !427, !430, !431, !434, !435, !436, !437}
+!299 = !{!"0x34\00grid_points\00grid_points\00\0028\001\001", null, !300, !302, [3 x i32]* @grid_points, null} ; [ DW_TAG_variable ] [grid_points] [line 28] [local] [def]
+!300 = !{!"0x29", !301} ; [ DW_TAG_file_type ] [/home/hfinkel/src/NPB2.3-omp-C/BT/./header.h]
+!301 = !{!"./header.h", !"/home/hfinkel/src/NPB2.3-omp-C/BT"}
+!302 = !{!"0x1\00\000\0096\0032\000\000", null, null, !8, !303, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 96, align 32, offset 0] [from int]
+!303 = !{!178}
+!304 = !{!"0x34\00dt\00dt\00\0035\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dt] [line 35] [local] [def]
+!305 = !{!"0x34\00rhs\00rhs\00\0068\001\001", null, !300, !306, null, null} ; [ DW_TAG_variable ] [rhs] [line 68] [local] [def]
+!306 = !{!"0x1\00\000\001385839040\0064\000\000", null, null, !20, !307, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 1385839040, align 64, offset 0] [from double]
+!307 = !{!308, !308, !308, !93}
+!308 = !{!"0x21\000\00163"} ; [ DW_TAG_subrange_type ] [0, 162]
+!309 = !{!"0x34\00zzcon5\00zzcon5\00\0042\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [zzcon5] [line 42] [local] [def]
+!310 = !{!"0x34\00zzcon4\00zzcon4\00\0042\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [zzcon4] [line 42] [local] [def]
+!311 = !{!"0x34\00zzcon3\00zzcon3\00\0042\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [zzcon3] [line 42] [local] [def]
+!312 = !{!"0x34\00dz5tz1\00dz5tz1\00\0043\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz5tz1] [line 43] [local] [def]
+!313 = !{!"0x34\00dz4tz1\00dz4tz1\00\0043\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz4tz1] [line 43] [local] [def]
+!314 = !{!"0x34\00dz3tz1\00dz3tz1\00\0043\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz3tz1] [line 43] [local] [def]
+!315 = !{!"0x34\00zzcon2\00zzcon2\00\0042\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [zzcon2] [line 42] [local] [def]
+!316 = !{!"0x34\00dz2tz1\00dz2tz1\00\0043\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz2tz1] [line 43] [local] [def]
+!317 = !{!"0x34\00tz2\00tz2\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tz2] [line 31] [local] [def]
+!318 = !{!"0x34\00dz1tz1\00dz1tz1\00\0043\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz1tz1] [line 43] [local] [def]
+!319 = !{!"0x34\00yycon5\00yycon5\00\0040\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [yycon5] [line 40] [local] [def]
+!320 = !{!"0x34\00yycon4\00yycon4\00\0040\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [yycon4] [line 40] [local] [def]
+!321 = !{!"0x34\00yycon3\00yycon3\00\0040\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [yycon3] [line 40] [local] [def]
+!322 = !{!"0x34\00dy5ty1\00dy5ty1\00\0041\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy5ty1] [line 41] [local] [def]
+!323 = !{!"0x34\00dy4ty1\00dy4ty1\00\0041\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy4ty1] [line 41] [local] [def]
+!324 = !{!"0x34\00dy3ty1\00dy3ty1\00\0041\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy3ty1] [line 41] [local] [def]
+!325 = !{!"0x34\00yycon2\00yycon2\00\0040\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [yycon2] [line 40] [local] [def]
+!326 = !{!"0x34\00dy2ty1\00dy2ty1\00\0041\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy2ty1] [line 41] [local] [def]
+!327 = !{!"0x34\00ty2\00ty2\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [ty2] [line 31] [local] [def]
+!328 = !{!"0x34\00dy1ty1\00dy1ty1\00\0041\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy1ty1] [line 41] [local] [def]
+!329 = !{!"0x34\00dssp\00dssp\00\0035\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dssp] [line 35] [local] [def]
+!330 = !{!"0x34\00c1\00c1\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c1] [line 45] [local] [def]
+!331 = !{!"0x34\00xxcon5\00xxcon5\00\0038\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [xxcon5] [line 38] [local] [def]
+!332 = !{!"0x34\00xxcon4\00xxcon4\00\0038\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [xxcon4] [line 38] [local] [def]
+!333 = !{!"0x34\00xxcon3\00xxcon3\00\0038\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [xxcon3] [line 38] [local] [def]
+!334 = !{!"0x34\00dx5tx1\00dx5tx1\00\0039\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx5tx1] [line 39] [local] [def]
+!335 = !{!"0x34\00dx4tx1\00dx4tx1\00\0039\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx4tx1] [line 39] [local] [def]
+!336 = !{!"0x34\00dx3tx1\00dx3tx1\00\0039\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx3tx1] [line 39] [local] [def]
+!337 = !{!"0x34\00c2\00c2\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c2] [line 45] [local] [def]
+!338 = !{!"0x34\00con43\00con43\00\0048\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [con43] [line 48] [local] [def]
+!339 = !{!"0x34\00xxcon2\00xxcon2\00\0038\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [xxcon2] [line 38] [local] [def]
+!340 = !{!"0x34\00dx2tx1\00dx2tx1\00\0039\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx2tx1] [line 39] [local] [def]
+!341 = !{!"0x34\00tx2\00tx2\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tx2] [line 31] [local] [def]
+!342 = !{!"0x34\00dx1tx1\00dx1tx1\00\0039\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx1tx1] [line 39] [local] [def]
+!343 = !{!"0x34\00forcing\00forcing\00\0066\001\001", null, !300, !344, null, null} ; [ DW_TAG_variable ] [forcing] [line 66] [local] [def]
+!344 = !{!"0x1\00\000\001663006848\0064\000\000", null, null, !20, !345, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 1663006848, align 64, offset 0] [from double]
+!345 = !{!308, !308, !308, !346}
+!346 = !{!"0x21\000\006"} ; [ DW_TAG_subrange_type ] [0, 5]
+!347 = !{!"0x34\00qs\00qs\00\0063\001\001", null, !300, !348, null, null} ; [ DW_TAG_variable ] [qs] [line 63] [local] [def]
+!348 = !{!"0x1\00\000\00277167808\0064\000\000", null, null, !20, !349, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 277167808, align 64, offset 0] [from double]
+!349 = !{!308, !308, !308}
+!350 = !{!"0x34\00square\00square\00\0065\001\001", null, !300, !348, null, null} ; [ DW_TAG_variable ] [square] [line 65] [local] [def]
+!351 = !{!"0x34\00ws\00ws\00\0062\001\001", null, !300, !348, null, null} ; [ DW_TAG_variable ] [ws] [line 62] [local] [def]
+!352 = !{!"0x34\00vs\00vs\00\0061\001\001", null, !300, !348, null, null} ; [ DW_TAG_variable ] [vs] [line 61] [local] [def]
+!353 = !{!"0x34\00us\00us\00\0060\001\001", null, !300, !348, null, null} ; [ DW_TAG_variable ] [us] [line 60] [local] [def]
+!354 = !{!"0x34\00rho_i\00rho_i\00\0064\001\001", null, !300, !348, null, null} ; [ DW_TAG_variable ] [rho_i] [line 64] [local] [def]
+!355 = !{!"0x34\00u\00u\00\0067\001\001", null, !300, !306, null, null} ; [ DW_TAG_variable ] [u] [line 67] [local] [def]
+!356 = !{!"0x34\00ce\00ce\00\0036\001\001", null, !300, !357, null, null} ; [ DW_TAG_variable ] [ce] [line 36] [local] [def]
+!357 = !{!"0x1\00\000\004160\0064\000\000", null, null, !20, !358, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 4160, align 64, offset 0] [from double]
+!358 = !{!93, !359}
+!359 = !{!"0x21\000\0013"} ; [ DW_TAG_subrange_type ] [0, 12]
+!360 = !{!"0x34\00dnzm1\00dnzm1\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dnzm1] [line 44] [local] [def]
+!361 = !{!"0x34\00dnym1\00dnym1\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dnym1] [line 44] [local] [def]
+!362 = !{!"0x34\00dnxm1\00dnxm1\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dnxm1] [line 44] [local] [def]
+!363 = !{!"0x34\00zzcon1\00zzcon1\00\0042\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [zzcon1] [line 42] [local] [def]
+!364 = !{!"0x34\00yycon1\00yycon1\00\0040\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [yycon1] [line 40] [local] [def]
+!365 = !{!"0x34\00xxcon1\00xxcon1\00\0038\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [xxcon1] [line 38] [local] [def]
+!366 = !{!"0x34\00con16\00con16\00\0048\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [con16] [line 48] [local] [def]
+!367 = !{!"0x34\00c2iv\00c2iv\00\0048\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c2iv] [line 48] [local] [def]
+!368 = !{!"0x34\00c3c4tz3\00c3c4tz3\00\0048\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c3c4tz3] [line 48] [local] [def]
+!369 = !{!"0x34\00c3c4ty3\00c3c4ty3\00\0048\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c3c4ty3] [line 48] [local] [def]
+!370 = !{!"0x34\00c3c4tx3\00c3c4tx3\00\0048\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c3c4tx3] [line 48] [local] [def]
+!371 = !{!"0x34\00comz6\00comz6\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [comz6] [line 47] [local] [def]
+!372 = !{!"0x34\00comz5\00comz5\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [comz5] [line 47] [local] [def]
+!373 = !{!"0x34\00comz4\00comz4\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [comz4] [line 47] [local] [def]
+!374 = !{!"0x34\00comz1\00comz1\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [comz1] [line 47] [local] [def]
+!375 = !{!"0x34\00dtdssp\00dtdssp\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dtdssp] [line 45] [local] [def]
+!376 = !{!"0x34\00c2dttz1\00c2dttz1\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c2dttz1] [line 47] [local] [def]
+!377 = !{!"0x34\00c2dtty1\00c2dtty1\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c2dtty1] [line 47] [local] [def]
+!378 = !{!"0x34\00c2dttx1\00c2dttx1\00\0047\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c2dttx1] [line 47] [local] [def]
+!379 = !{!"0x34\00dttz2\00dttz2\00\0046\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dttz2] [line 46] [local] [def]
+!380 = !{!"0x34\00dttz1\00dttz1\00\0046\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dttz1] [line 46] [local] [def]
+!381 = !{!"0x34\00dtty2\00dtty2\00\0046\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dtty2] [line 46] [local] [def]
+!382 = !{!"0x34\00dtty1\00dtty1\00\0046\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dtty1] [line 46] [local] [def]
+!383 = !{!"0x34\00dttx2\00dttx2\00\0046\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dttx2] [line 46] [local] [def]
+!384 = !{!"0x34\00dttx1\00dttx1\00\0046\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dttx1] [line 46] [local] [def]
+!385 = !{!"0x34\00c5dssp\00c5dssp\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c5dssp] [line 45] [local] [def]
+!386 = !{!"0x34\00c4dssp\00c4dssp\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c4dssp] [line 45] [local] [def]
+!387 = !{!"0x34\00dzmax\00dzmax\00\0037\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dzmax] [line 37] [local] [def]
+!388 = !{!"0x34\00dymax\00dymax\00\0037\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dymax] [line 37] [local] [def]
+!389 = !{!"0x34\00dxmax\00dxmax\00\0037\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dxmax] [line 37] [local] [def]
+!390 = !{!"0x34\00dz5\00dz5\00\0034\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz5] [line 34] [local] [def]
+!391 = !{!"0x34\00dz4\00dz4\00\0034\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz4] [line 34] [local] [def]
+!392 = !{!"0x34\00dz3\00dz3\00\0034\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz3] [line 34] [local] [def]
+!393 = !{!"0x34\00dz2\00dz2\00\0034\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz2] [line 34] [local] [def]
+!394 = !{!"0x34\00dz1\00dz1\00\0034\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dz1] [line 34] [local] [def]
+!395 = !{!"0x34\00dy5\00dy5\00\0033\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy5] [line 33] [local] [def]
+!396 = !{!"0x34\00dy4\00dy4\00\0033\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy4] [line 33] [local] [def]
+!397 = !{!"0x34\00dy3\00dy3\00\0033\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy3] [line 33] [local] [def]
+!398 = !{!"0x34\00dy2\00dy2\00\0033\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy2] [line 33] [local] [def]
+!399 = !{!"0x34\00dy1\00dy1\00\0033\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dy1] [line 33] [local] [def]
+!400 = !{!"0x34\00dx5\00dx5\00\0032\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx5] [line 32] [local] [def]
+!401 = !{!"0x34\00dx4\00dx4\00\0032\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx4] [line 32] [local] [def]
+!402 = !{!"0x34\00dx3\00dx3\00\0032\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx3] [line 32] [local] [def]
+!403 = !{!"0x34\00dx2\00dx2\00\0032\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx2] [line 32] [local] [def]
+!404 = !{!"0x34\00dx1\00dx1\00\0032\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [dx1] [line 32] [local] [def]
+!405 = !{!"0x34\00tz3\00tz3\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tz3] [line 31] [local] [def]
+!406 = !{!"0x34\00tz1\00tz1\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tz1] [line 31] [local] [def]
+!407 = !{!"0x34\00ty3\00ty3\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [ty3] [line 31] [local] [def]
+!408 = !{!"0x34\00ty1\00ty1\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [ty1] [line 31] [local] [def]
+!409 = !{!"0x34\00tx3\00tx3\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tx3] [line 31] [local] [def]
+!410 = !{!"0x34\00tx1\00tx1\00\0031\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tx1] [line 31] [local] [def]
+!411 = !{!"0x34\00conz1\00conz1\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [conz1] [line 45] [local] [def]
+!412 = !{!"0x34\00c1345\00c1345\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c1345] [line 44] [local] [def]
+!413 = !{!"0x34\00c3c4\00c3c4\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c3c4] [line 44] [local] [def]
+!414 = !{!"0x34\00c1c5\00c1c5\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c1c5] [line 44] [local] [def]
+!415 = !{!"0x34\00c1c2\00c1c2\00\0044\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c1c2] [line 44] [local] [def]
+!416 = !{!"0x34\00c5\00c5\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c5] [line 45] [local] [def]
+!417 = !{!"0x34\00c4\00c4\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c4] [line 45] [local] [def]
+!418 = !{!"0x34\00c3\00c3\00\0045\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [c3] [line 45] [local] [def]
+!419 = !{!"0x34\00lhs\00lhs\00\0069\001\001", null, !300, !420, null, null} ; [ DW_TAG_variable ] [lhs] [line 69] [local] [def]
+!420 = !{!"0x1\00\000\0020787585600\0064\000\000", null, null, !20, !421, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 20787585600, align 64, offset 0] [from double]
+!421 = !{!308, !308, !308, !178, !93, !93}
+!422 = !{!"0x34\00q\00q\00\0073\001\001", null, !300, !423, null, null} ; [ DW_TAG_variable ] [q] [line 73] [local] [def]
+!423 = !{!"0x1\00\000\0010368\0064\000\000", null, null, !20, !424, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 10368, align 64, offset 0] [from double]
+!424 = !{!425}
+!425 = !{!"0x21\000\00162"} ; [ DW_TAG_subrange_type ] [0, 161]
+!426 = !{!"0x34\00cuf\00cuf\00\0072\001\001", null, !300, !423, null, null} ; [ DW_TAG_variable ] [cuf] [line 72] [local] [def]
+!427 = !{!"0x34\00buf\00buf\00\0075\001\001", null, !300, !428, null, null} ; [ DW_TAG_variable ] [buf] [line 75] [local] [def]
+!428 = !{!"0x1\00\000\0051840\0064\000\000", null, null, !20, !429, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 51840, align 64, offset 0] [from double]
+!429 = !{!425, !93}
+!430 = !{!"0x34\00ue\00ue\00\0074\001\001", null, !300, !428, null, null} ; [ DW_TAG_variable ] [ue] [line 74] [local] [def]
+!431 = !{!"0x34\00njac\00njac\00\0086\001\001", null, !300, !432, null, null} ; [ DW_TAG_variable ] [njac] [line 86] [local] [def]
+!432 = !{!"0x1\00\000\006886684800\0064\000\000", null, null, !20, !433, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 6886684800, align 64, offset 0] [from double]
+!433 = !{!308, !308, !425, !93, !93}
+!434 = !{!"0x34\00fjac\00fjac\00\0084\001\001", null, !300, !432, null, null} ; [ DW_TAG_variable ] [fjac] [line 84] [local] [def]
+!435 = !{!"0x34\00tmp3\00tmp3\00\0088\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tmp3] [line 88] [local] [def]
+!436 = !{!"0x34\00tmp2\00tmp2\00\0088\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tmp2] [line 88] [local] [def]
+!437 = !{!"0x34\00tmp1\00tmp1\00\0088\001\001", null, !300, !20, null, null} ; [ DW_TAG_variable ] [tmp1] [line 88] [local] [def]
+!438 = !{i32 2, !"Dwarf Version", i32 4}
+!439 = !MDLocation(line: 1898, scope: !440)
+!440 = !{!"0xb\001898\000\00107", !1, !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!441 = !MDLocation(line: 1913, scope: !442)
+!442 = !{!"0xb\001913\000\00115", !1, !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!443 = !MDLocation(line: 1923, scope: !114)
+!444 = !{!"int", !445}
+!445 = !{!"omnipotent char", !446}
+!446 = !{!"Simple C/C++ TBAA"}
+!447 = !{i32 1}
+!448 = !MDLocation(line: 1925, scope: !449)
+!449 = !{!"0xb\001925\000\00121", !1, !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!450 = !MDLocation(line: 1939, scope: !451)
+!451 = !{!"0xb\001939\000\00127", !1, !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!452 = !MDLocation(line: 1940, scope: !453)
+!453 = !{!"0xb\001940\000\00129", !1, !454} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!454 = !{!"0xb\001939\000\00128", !1, !451} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!455 = !MDLocation(line: 1941, scope: !456)
+!456 = !{!"0xb\001941\000\00131", !1, !457} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!457 = !{!"0xb\001940\000\00130", !1, !453} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!458 = !MDLocation(line: 2020, scope: !459)
+!459 = !{!"0xb\002020\000\00149", !1, !460} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!460 = !{!"0xb\002019\000\00148", !1, !461} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!461 = !{!"0xb\002019\000\00147", !1, !462} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!462 = !{!"0xb\002018\000\00146", !1, !463} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!463 = !{!"0xb\002018\000\00145", !1, !114} ; [ DW_TAG_lexical_block ] [/home/hfinkel/src/NPB2.3-omp-C/BT/bt.c]
+!464 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/PowerPC/preincprep-invoke.ll b/test/CodeGen/PowerPC/preincprep-invoke.ll
new file mode 100644
index 0000000..473b7d0
--- /dev/null
+++ b/test/CodeGen/PowerPC/preincprep-invoke.ll
@@ -0,0 +1,50 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+@.str1 = external unnamed_addr constant [1 x i8], align 1
+@.str2 = external unnamed_addr constant [39 x i8], align 1
+
+declare void @_ZN13CStdOutStreamlsEPKc()
+
+declare void @_ZN13CStdOutStream5FlushEv()
+
+declare i32 @__gxx_personality_v0(...)
+
+define void @_Z11GetPasswordP13CStdOutStreamb() {
+entry:
+ br label %for.cond.i.i
+
+for.cond.i.i: ; preds = %for.cond.i.i, %entry
+ br i1 undef, label %_ZN11CStringBaseIcEC2EPKc.exit.critedge, label %for.cond.i.i
+
+_ZN11CStringBaseIcEC2EPKc.exit.critedge: ; preds = %for.cond.i.i
+ invoke void @_ZN13CStdOutStreamlsEPKc()
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont: ; preds = %_ZN11CStringBaseIcEC2EPKc.exit.critedge
+ invoke void @_ZN13CStdOutStream5FlushEv()
+ to label %invoke.cont4 unwind label %lpad
+
+invoke.cont4: ; preds = %invoke.cont
+ %call7 = invoke i8* @getpass()
+ to label %for.cond.i.i30 unwind label %lpad
+
+; CHECK-LABEL: @_Z11GetPasswordP13CStdOutStreamb
+; CHECK: addi {{[0-9]+}}, 3, -1
+
+for.cond.i.i30: ; preds = %for.cond.i.i30, %invoke.cont4
+ %indvars.iv.i.i26 = phi i64 [ %indvars.iv.next.i.i29, %for.cond.i.i30 ], [ 0, %invoke.cont4 ]
+ %arrayidx.i.i27 = getelementptr inbounds i8* %call7, i64 %indvars.iv.i.i26
+ %0 = load i8* %arrayidx.i.i27, align 1
+ %indvars.iv.next.i.i29 = add nuw nsw i64 %indvars.iv.i.i26, 1
+ br label %for.cond.i.i30
+
+lpad: ; preds = %invoke.cont4, %invoke.cont, %_ZN11CStringBaseIcEC2EPKc.exit.critedge
+ %1 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ resume { i8*, i32 } undef
+}
+
+declare i8* @getpass()
+
diff --git a/test/CodeGen/PowerPC/qpx-bv-sint.ll b/test/CodeGen/PowerPC/qpx-bv-sint.ll
new file mode 100644
index 0000000..0bc14ed
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-bv-sint.ll
@@ -0,0 +1,33 @@
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-bgq-linux"
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+
+define void @s452() nounwind {
+entry:
+ br label %for.body4
+
+for.body4: ; preds = %for.body4, %entry
+ %conv.4 = sitofp i32 undef to double
+ %conv.5 = sitofp i32 undef to double
+ %mul.4.v.i0.1 = insertelement <2 x double> undef, double %conv.4, i32 0
+ %mul.4.v.i0.2 = insertelement <2 x double> %mul.4.v.i0.1, double %conv.5, i32 1
+ %mul.4 = fmul <2 x double> %mul.4.v.i0.2, undef
+ %add7.4 = fadd <2 x double> undef, %mul.4
+ store <2 x double> %add7.4, <2 x double>* undef, align 16
+ br i1 undef, label %for.end, label %for.body4
+
+for.end: ; preds = %for.body4
+ unreachable
+; CHECK-LABEL: @s452
+; CHECK: lfiwax [[REG1:[0-9]+]],
+; CHECK: fcfid [[REG2:[0-9]+]], [[REG1]]
+; FIXME: We could 'promote' this to a vector earlier and remove this splat.
+; CHECK: qvesplati {{[0-9]+}}, [[REG2]], 0
+; CHECK: qvfmul
+; CHECK: qvfadd
+; CHECK: qvesplati {{[0-9]+}},
+; FIXME: We can use qvstfcdx here instead of two stores.
+; CHECK: stfd
+; CHECK: stfd
+}
+
diff --git a/test/CodeGen/PowerPC/qpx-bv.ll b/test/CodeGen/PowerPC/qpx-bv.ll
new file mode 100644
index 0000000..ae181de
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-bv.ll
@@ -0,0 +1,37 @@
+; RUN: llc < %s -mcpu=a2q | FileCheck %s
+
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+define <4 x double> @foo(double %f1, double %f2, double %f3, double %f4) {
+ %v1 = insertelement <4 x double> undef, double %f1, i32 0
+ %v2 = insertelement <4 x double> %v1, double %f2, i32 1
+ %v3 = insertelement <4 x double> %v2, double %f3, i32 2
+ %v4 = insertelement <4 x double> %v3, double %f4, i32 3
+ ret <4 x double> %v4
+
+; CHECK-LABEL: @foo
+; CHECK: qvgpci [[REG1:[0-9]+]], 275
+; CHECK-DAG: qvgpci [[REG2:[0-9]+]], 101
+; CHECK-DAG: qvfperm [[REG3:[0-9]+]], 3, 4, [[REG1]]
+; CHECK-DAG: qvfperm [[REG4:[0-9]+]], 1, 2, [[REG1]]
+; CHECK-DAG: qvfperm 1, [[REG4]], [[REG3]], [[REG2]]
+; CHECK: blr
+}
+
+define <4 x float> @goo(float %f1, float %f2, float %f3, float %f4) {
+ %v1 = insertelement <4 x float> undef, float %f1, i32 0
+ %v2 = insertelement <4 x float> %v1, float %f2, i32 1
+ %v3 = insertelement <4 x float> %v2, float %f3, i32 2
+ %v4 = insertelement <4 x float> %v3, float %f4, i32 3
+ ret <4 x float> %v4
+
+; CHECK-LABEL: @goo
+; CHECK: qvgpci [[REG1:[0-9]+]], 275
+; CHECK-DAG: qvgpci [[REG2:[0-9]+]], 101
+; CHECK-DAG: qvfperm [[REG3:[0-9]+]], 3, 4, [[REG1]]
+; CHECK-DAG: qvfperm [[REG4:[0-9]+]], 1, 2, [[REG1]]
+; CHECK-DAG: qvfperm 1, [[REG4]], [[REG3]], [[REG2]]
+; CHECK: blr
+}
+
diff --git a/test/CodeGen/PowerPC/qpx-func-clobber.ll b/test/CodeGen/PowerPC/qpx-func-clobber.ll
new file mode 100644
index 0000000..511fa38
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-func-clobber.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+declare <4 x double> @foo(<4 x double> %p)
+
+define <4 x double> @bar(<4 x double> %p, <4 x double> %q) {
+entry:
+ %v = call <4 x double> @foo(<4 x double> %p)
+ %w = call <4 x double> @foo(<4 x double> %q)
+ %x = fadd <4 x double> %v, %w
+ ret <4 x double> %x
+
+; CHECK-LABEL: @bar
+; CHECK: qvstfdx 2,
+; CHECK: bl foo
+; CHECK: qvstfdx 1,
+; CHECK: qvlfdx 1,
+; CHECK: bl foo
+; CHECK: qvlfdx [[REG:[0-9]+]],
+; CHECK: qvfadd 1, [[REG]], 1
+}
+
diff --git a/test/CodeGen/PowerPC/qpx-load.ll b/test/CodeGen/PowerPC/qpx-load.ll
new file mode 100644
index 0000000..bea3477
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-load.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+define <4 x double> @foo(<4 x double>* %p) {
+entry:
+ %v = load <4 x double>* %p, align 8
+ ret <4 x double> %v
+}
+
+; CHECK: @foo
+; CHECK-DAG: li [[REG1:[0-9]+]], 31
+; CHECK-DAG: qvlfdx [[REG4:[0-9]+]], 0, 3
+; CHECK-DAG: qvlfdx [[REG2:[0-9]+]], 3, [[REG1]]
+; CHECK-DAG: qvlpcldx [[REG3:[0-9]+]], 0, 3
+; CHECK-DAG: qvfperm 1, [[REG4]], [[REG2]], [[REG3]]
+; CHECK: blr
+
+define <4 x double> @bar(<4 x double>* %p) {
+entry:
+ %v = load <4 x double>* %p, align 32
+ ret <4 x double> %v
+}
+
+; CHECK: @bar
+; CHECK: qvlfdx
+
diff --git a/test/CodeGen/PowerPC/qpx-recipest.ll b/test/CodeGen/PowerPC/qpx-recipest.ll
new file mode 100644
index 0000000..0e01358
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-recipest.ll
@@ -0,0 +1,194 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2q -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2q | FileCheck -check-prefix=CHECK-SAFE %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+declare <4 x double> @llvm.sqrt.v4f64(<4 x double>)
+declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
+
+define <4 x double> @foo(<4 x double> %a, <4 x double> %b) nounwind {
+entry:
+ %x = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %b)
+ %r = fdiv <4 x double> %a, %x
+ ret <4 x double> %r
+
+; CHECK-LABEL: @foo
+; CHECK: qvfrsqrte
+; CHECK: qvfmul
+; FIXME: We're currently loading two constants here (1.5 and -1.5), and using
+; an qvfmadd instead of a qvfnmsub
+; CHECK: qvfmadd
+; CHECK: qvfmadd
+; CHECK: qvfmul
+; CHECK: qvfmul
+; CHECK: qvfmadd
+; CHECK: qvfmul
+; CHECK: qvfmul
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @foo
+; CHECK-SAFE: fsqrt
+; CHECK-SAFE: fdiv
+; CHECK-SAFE: blr
+}
+
+define <4 x double> @foof(<4 x double> %a, <4 x float> %b) nounwind {
+entry:
+ %x = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %b)
+ %y = fpext <4 x float> %x to <4 x double>
+ %r = fdiv <4 x double> %a, %y
+ ret <4 x double> %r
+
+; CHECK-LABEL: @foof
+; CHECK: qvfrsqrtes
+; CHECK: qvfmuls
+; FIXME: We're currently loading two constants here (1.5 and -1.5), and using
+; an qvfmadd instead of a qvfnmsubs
+; CHECK: qvfmadds
+; CHECK: qvfmadds
+; CHECK: qvfmuls
+; CHECK: qvfmul
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @foof
+; CHECK-SAFE: fsqrts
+; CHECK-SAFE: fdiv
+; CHECK-SAFE: blr
+}
+
+define <4 x float> @food(<4 x float> %a, <4 x double> %b) nounwind {
+entry:
+ %x = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %b)
+ %y = fptrunc <4 x double> %x to <4 x float>
+ %r = fdiv <4 x float> %a, %y
+ ret <4 x float> %r
+
+; CHECK-LABEL: @food
+; CHECK: qvfrsqrte
+; CHECK: qvfmul
+; FIXME: We're currently loading two constants here (1.5 and -1.5), and using
+; an qvfmadd instead of a qvfnmsub
+; CHECK: qvfmadd
+; CHECK: qvfmadd
+; CHECK: qvfmul
+; CHECK: qvfmul
+; CHECK: qvfmadd
+; CHECK: qvfmul
+; CHECK: qvfrsp
+; CHECK: qvfmuls
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @food
+; CHECK-SAFE: fsqrt
+; CHECK-SAFE: fdivs
+; CHECK-SAFE: blr
+}
+
+define <4 x float> @goo(<4 x float> %a, <4 x float> %b) nounwind {
+entry:
+ %x = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %b)
+ %r = fdiv <4 x float> %a, %x
+ ret <4 x float> %r
+
+; CHECK-LABEL: @goo
+; CHECK: qvfrsqrtes
+; CHECK: qvfmuls
+; FIXME: We're currently loading two constants here (1.5 and -1.5), and using
+; an qvfmadd instead of a qvfnmsubs
+; CHECK: qvfmadds
+; CHECK: qvfmadds
+; CHECK: qvfmuls
+; CHECK: qvfmuls
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @goo
+; CHECK-SAFE: fsqrts
+; CHECK-SAFE: fdivs
+; CHECK-SAFE: blr
+}
+
+define <4 x double> @foo2(<4 x double> %a, <4 x double> %b) nounwind {
+entry:
+ %r = fdiv <4 x double> %a, %b
+ ret <4 x double> %r
+
+; CHECK-LABEL: @foo2
+; CHECK: qvfre
+; CHECK: qvfnmsub
+; CHECK: qvfmadd
+; CHECK: qvfnmsub
+; CHECK: qvfmadd
+; CHECK: qvfmul
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @foo2
+; CHECK-SAFE: fdiv
+; CHECK-SAFE: blr
+}
+
+define <4 x float> @goo2(<4 x float> %a, <4 x float> %b) nounwind {
+entry:
+ %r = fdiv <4 x float> %a, %b
+ ret <4 x float> %r
+
+; CHECK-LABEL: @goo2
+; CHECK: qvfres
+; CHECK: qvfnmsubs
+; CHECK: qvfmadds
+; CHECK: qvfmuls
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @goo2
+; CHECK-SAFE: fdivs
+; CHECK-SAFE: blr
+}
+
+define <4 x double> @foo3(<4 x double> %a) nounwind {
+entry:
+ %r = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %a)
+ ret <4 x double> %r
+
+; CHECK-LABEL: @foo3
+; CHECK: qvfrsqrte
+; CHECK: qvfmul
+; FIXME: We're currently loading two constants here (1.5 and -1.5), and using
+; an qvfmadd instead of a qvfnmsub
+; CHECK-DAG: qvfmadd
+; CHECK-DAG: qvfcmpeq
+; CHECK-DAG: qvfmadd
+; CHECK-DAG: qvfmul
+; CHECK-DAG: qvfmul
+; CHECK-DAG: qvfmadd
+; CHECK-DAG: qvfmul
+; CHECK-DAG: qvfmul
+; CHECK: qvfsel
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @foo3
+; CHECK-SAFE: fsqrt
+; CHECK-SAFE: blr
+}
+
+define <4 x float> @goo3(<4 x float> %a) nounwind {
+entry:
+ %r = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %a)
+ ret <4 x float> %r
+
+; CHECK-LABEL: @goo3
+; CHECK: qvfrsqrtes
+; CHECK: qvfmuls
+; FIXME: We're currently loading two constants here (1.5 and -1.5), and using
+; an qvfmadds instead of a qvfnmsubs
+; CHECK-DAG: qvfmadds
+; CHECK-DAG: qvfcmpeq
+; CHECK-DAG: qvfmadds
+; CHECK-DAG: qvfmuls
+; CHECK-DAG: qvfmuls
+; CHECK: qvfsel
+; CHECK: blr
+
+; CHECK-SAFE-LABEL: @goo3
+; CHECK-SAFE: fsqrts
+; CHECK-SAFE: blr
+}
+
diff --git a/test/CodeGen/PowerPC/qpx-rounding-ops.ll b/test/CodeGen/PowerPC/qpx-rounding-ops.ll
new file mode 100644
index 0000000..6fdd8e6
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-rounding-ops.ll
@@ -0,0 +1,109 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2q | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2q -enable-unsafe-fp-math | FileCheck -check-prefix=CHECK-FM %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define <4 x float> @test1(<4 x float> %x) nounwind {
+ %call = tail call <4 x float> @llvm.floor.v4f32(<4 x float> %x) nounwind readnone
+ ret <4 x float> %call
+
+; CHECK: test1:
+; CHECK: qvfrim 1, 1
+
+; CHECK-FM: test1:
+; CHECK-FM: qvfrim 1, 1
+}
+
+declare <4 x float> @llvm.floor.v4f32(<4 x float>) nounwind readnone
+
+define <4 x double> @test2(<4 x double> %x) nounwind {
+ %call = tail call <4 x double> @llvm.floor.v4f64(<4 x double> %x) nounwind readnone
+ ret <4 x double> %call
+
+; CHECK: test2:
+; CHECK: qvfrim 1, 1
+
+; CHECK-FM: test2:
+; CHECK-FM: qvfrim 1, 1
+}
+
+declare <4 x double> @llvm.floor.v4f64(<4 x double>) nounwind readnone
+
+define <4 x float> @test3(<4 x float> %x) nounwind {
+ %call = tail call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %x) nounwind readnone
+ ret <4 x float> %call
+
+; CHECK: test3:
+; CHECK-NOT: qvfrin
+
+; CHECK-FM: test3:
+; CHECK-FM-NOT: qvfrin
+}
+
+declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) nounwind readnone
+
+define <4 x double> @test4(<4 x double> %x) nounwind {
+ %call = tail call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %x) nounwind readnone
+ ret <4 x double> %call
+
+; CHECK: test4:
+; CHECK-NOT: qvfrin
+
+; CHECK-FM: test4:
+; CHECK-FM-NOT: qvfrin
+}
+
+declare <4 x double> @llvm.nearbyint.v4f64(<4 x double>) nounwind readnone
+
+define <4 x float> @test5(<4 x float> %x) nounwind {
+ %call = tail call <4 x float> @llvm.ceil.v4f32(<4 x float> %x) nounwind readnone
+ ret <4 x float> %call
+
+; CHECK: test5:
+; CHECK: qvfrip 1, 1
+
+; CHECK-FM: test5:
+; CHECK-FM: qvfrip 1, 1
+}
+
+declare <4 x float> @llvm.ceil.v4f32(<4 x float>) nounwind readnone
+
+define <4 x double> @test6(<4 x double> %x) nounwind {
+ %call = tail call <4 x double> @llvm.ceil.v4f64(<4 x double> %x) nounwind readnone
+ ret <4 x double> %call
+
+; CHECK: test6:
+; CHECK: qvfrip 1, 1
+
+; CHECK-FM: test6:
+; CHECK-FM: qvfrip 1, 1
+}
+
+declare <4 x double> @llvm.ceil.v4f64(<4 x double>) nounwind readnone
+
+define <4 x float> @test9(<4 x float> %x) nounwind {
+ %call = tail call <4 x float> @llvm.trunc.v4f32(<4 x float> %x) nounwind readnone
+ ret <4 x float> %call
+
+; CHECK: test9:
+; CHECK: qvfriz 1, 1
+
+; CHECK-FM: test9:
+; CHECK-FM: qvfriz 1, 1
+}
+
+declare <4 x float> @llvm.trunc.v4f32(<4 x float>) nounwind readnone
+
+define <4 x double> @test10(<4 x double> %x) nounwind {
+ %call = tail call <4 x double> @llvm.trunc.v4f64(<4 x double> %x) nounwind readnone
+ ret <4 x double> %call
+
+; CHECK: test10:
+; CHECK: qvfriz 1, 1
+
+; CHECK-FM: test10:
+; CHECK-FM: qvfriz 1, 1
+}
+
+declare <4 x double> @llvm.trunc.v4f64(<4 x double>) nounwind readnone
+
diff --git a/test/CodeGen/PowerPC/qpx-s-load.ll b/test/CodeGen/PowerPC/qpx-s-load.ll
new file mode 100644
index 0000000..1ca0ae6
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-s-load.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+define <4 x float> @foo(<4 x float>* %p) {
+entry:
+ %v = load <4 x float>* %p, align 4
+ ret <4 x float> %v
+}
+
+; CHECK: @foo
+; CHECK-DAG: li [[REG1:[0-9]+]], 15
+; CHECK-DAG: qvlfsx [[REG4:[0-9]+]], 0, 3
+; CHECK-DAG: qvlfsx [[REG2:[0-9]+]], 3, [[REG1]]
+; CHECK-DAG: qvlpclsx [[REG3:[0-9]+]], 0, 3
+; CHECK-DAG: qvfperm 1, [[REG4]], [[REG2]], [[REG3]]
+; CHECK: blr
+
+define <4 x float> @bar(<4 x float>* %p) {
+entry:
+ %v = load <4 x float>* %p, align 16
+ ret <4 x float> %v
+}
+
+; CHECK: @bar
+; CHECK: qvlfsx
+
diff --git a/test/CodeGen/PowerPC/qpx-s-sel.ll b/test/CodeGen/PowerPC/qpx-s-sel.ll
new file mode 100644
index 0000000..e3a2dd9
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-s-sel.ll
@@ -0,0 +1,144 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+@Q = constant <4 x i1> <i1 0, i1 undef, i1 1, i1 1>, align 16
+@R = global <4 x i1> <i1 0, i1 0, i1 0, i1 0>, align 16
+
+define <4 x float> @test1(<4 x float> %a, <4 x float> %b, <4 x i1> %c) nounwind readnone {
+entry:
+ %r = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %r
+
+; CHECK-LABEL: @test1
+; CHECK: qvfsel 1, 3, 1, 2
+; CHECK: blr
+}
+
+define <4 x float> @test2(<4 x float> %a, <4 x float> %b, i1 %c1, i1 %c2, i1 %c3, i1 %c4) nounwind readnone {
+entry:
+ %v = insertelement <4 x i1> undef, i1 %c1, i32 0
+ %v2 = insertelement <4 x i1> %v, i1 %c2, i32 1
+ %v3 = insertelement <4 x i1> %v2, i1 %c3, i32 2
+ %v4 = insertelement <4 x i1> %v3, i1 %c4, i32 3
+ %r = select <4 x i1> %v4, <4 x float> %a, <4 x float> %b
+ ret <4 x float> %r
+
+; CHECK-LABEL: @test2
+; CHECK: stw
+; CHECK-DAG: qvlfiwzx [[REG1:[0-9]+]],
+; CHECK-DAG: qvlfdx [[REG2:[0-9]+]],
+; CHECK-DAG: qvfcfidu [[REG3:[0-9]+]], [[REG1]]
+; CHECK: qvfcmpeq [[REG4:[0-9]+]], [[REG3]], [[REG2]]
+; CHECK: qvfsel 1, [[REG4]], 1, 2
+; CHECK: blr
+}
+
+define <4 x i1> @test3(<4 x i1> %a) nounwind readnone {
+entry:
+ %v = and <4 x i1> %a, <i1 0, i1 undef, i1 1, i1 1>
+ ret <4 x i1> %v
+
+; CHECK-LABEL: @test3
+; CHECK: qvlfsx [[REG:[0-9]+]],
+; qvflogical 1, 1, [[REG]], 1
+; blr
+}
+
+define <4 x i1> @test4(<4 x i1> %a) nounwind {
+entry:
+ %q = load <4 x i1>* @Q, align 16
+ %v = and <4 x i1> %a, %q
+ ret <4 x i1> %v
+
+; CHECK-LABEL: @test4
+; CHECK-DAG: lbz
+; CHECK-DAG: qvlfdx [[REG1:[0-9]+]],
+; CHECK-DAG: stw
+; CHECK-DAG: qvlfiwzx [[REG2:[0-9]+]],
+; CHECK-DAG: qvfcfidu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvfcmpeq [[REG4:[0-9]+]], [[REG3]], [[REG1]]
+; CHECK: qvflogical 1, 1, [[REG4]], 1
+; CHECK: blr
+}
+
+define void @test5(<4 x i1> %a) nounwind {
+entry:
+ store <4 x i1> %a, <4 x i1>* @R
+ ret void
+
+; CHECK-LABEL: @test5
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK: lwz
+; CHECK: stb
+; CHECK: blr
+}
+
+define i1 @test6(<4 x i1> %a) nounwind {
+entry:
+ %r = extractelement <4 x i1> %a, i32 2
+ ret i1 %r
+
+; CHECK-LABEL: @test6
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK: lwz
+; CHECK: blr
+}
+
+define i1 @test7(<4 x i1> %a) nounwind {
+entry:
+ %r = extractelement <4 x i1> %a, i32 2
+ %s = extractelement <4 x i1> %a, i32 3
+ %q = and i1 %r, %s
+ ret i1 %q
+
+; CHECK-LABEL: @test7
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK-DAG: lwz [[REG4:[0-9]+]],
+; FIXME: We're storing the vector twice, and that's silly.
+; CHECK-DAG: qvstfiwx [[REG3]],
+; CHECK: lwz [[REG5:[0-9]+]],
+; CHECK: and 3,
+; CHECK: blr
+}
+
+define i1 @test8(<3 x i1> %a) nounwind {
+entry:
+ %r = extractelement <3 x i1> %a, i32 2
+ ret i1 %r
+
+; CHECK-LABEL: @test8
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK: lwz
+; CHECK: blr
+}
+
+define <3 x float> @test9(<3 x float> %a, <3 x float> %b, i1 %c1, i1 %c2, i1 %c3) nounwind readnone {
+entry:
+ %v = insertelement <3 x i1> undef, i1 %c1, i32 0
+ %v2 = insertelement <3 x i1> %v, i1 %c2, i32 1
+ %v3 = insertelement <3 x i1> %v2, i1 %c3, i32 2
+ %r = select <3 x i1> %v3, <3 x float> %a, <3 x float> %b
+ ret <3 x float> %r
+
+; CHECK-LABEL: @test9
+; CHECK: stw
+; CHECK-DAG: qvlfiwzx [[REG1:[0-9]+]],
+; CHECK-DAG: qvlfdx [[REG2:[0-9]+]],
+; CHECK-DAG: qvfcfidu [[REG3:[0-9]+]], [[REG1]]
+; CHECK: qvfcmpeq [[REG4:[0-9]+]], [[REG3]], [[REG2]]
+; CHECK: qvfsel 1, [[REG4]], 1, 2
+; CHECK: blr
+}
+
diff --git a/test/CodeGen/PowerPC/qpx-s-store.ll b/test/CodeGen/PowerPC/qpx-s-store.ll
new file mode 100644
index 0000000..0bd6201
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-s-store.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+define void @foo(<4 x float> %v, <4 x float>* %p) {
+entry:
+ store <4 x float> %v, <4 x float>* %p, align 4
+ ret void
+}
+
+; CHECK: @foo
+; CHECK: stfs
+; CHECK: stfs
+; CHECK: stfs
+; CHECK: stfs
+; CHECK: blr
+
+define void @bar(<4 x float> %v, <4 x float>* %p) {
+entry:
+ store <4 x float> %v, <4 x float>* %p, align 16
+ ret void
+}
+
+; CHECK: @bar
+; CHECK: qvstfsx
+
diff --git a/test/CodeGen/PowerPC/qpx-sel.ll b/test/CodeGen/PowerPC/qpx-sel.ll
new file mode 100644
index 0000000..6822735
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-sel.ll
@@ -0,0 +1,152 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+@Q = constant <4 x i1> <i1 0, i1 undef, i1 1, i1 1>, align 16
+@R = global <4 x i1> <i1 0, i1 0, i1 0, i1 0>, align 16
+
+define <4 x double> @test1(<4 x double> %a, <4 x double> %b, <4 x i1> %c) nounwind readnone {
+entry:
+ %r = select <4 x i1> %c, <4 x double> %a, <4 x double> %b
+ ret <4 x double> %r
+
+; CHECK-LABEL: @test1
+; CHECK: qvfsel 1, 3, 1, 2
+; CHECK: blr
+}
+
+define <4 x double> @test2(<4 x double> %a, <4 x double> %b, i1 %c1, i1 %c2, i1 %c3, i1 %c4) nounwind readnone {
+entry:
+ %v = insertelement <4 x i1> undef, i1 %c1, i32 0
+ %v2 = insertelement <4 x i1> %v, i1 %c2, i32 1
+ %v3 = insertelement <4 x i1> %v2, i1 %c3, i32 2
+ %v4 = insertelement <4 x i1> %v3, i1 %c4, i32 3
+ %r = select <4 x i1> %v4, <4 x double> %a, <4 x double> %b
+ ret <4 x double> %r
+
+; CHECK-LABEL: @test2
+
+; FIXME: This load/store sequence is unnecessary.
+; CHECK-DAG: lbz
+; CHECK-DAG: stw
+
+; CHECK-DAG: qvlfiwzx [[REG1:[0-9]+]],
+; CHECK-DAG: qvlfdx [[REG2:[0-9]+]],
+; CHECK-DAG: qvfcfidu [[REG3:[0-9]+]], [[REG1]]
+; CHECK: qvfcmpeq [[REG4:[0-9]+]], [[REG3]], [[REG2]]
+; CHECK: qvfsel 1, [[REG4]], 1, 2
+; CHECK: blr
+}
+
+define <4 x i1> @test3(<4 x i1> %a) nounwind readnone {
+entry:
+ %v = and <4 x i1> %a, <i1 0, i1 undef, i1 1, i1 1>
+ ret <4 x i1> %v
+
+; CHECK-LABEL: @test3
+; CHECK: qvlfsx [[REG:[0-9]+]],
+; qvflogical 1, 1, [[REG]], 1
+; blr
+}
+
+define <4 x i1> @test4(<4 x i1> %a) nounwind {
+entry:
+ %q = load <4 x i1>* @Q, align 16
+ %v = and <4 x i1> %a, %q
+ ret <4 x i1> %v
+
+; CHECK-LABEL: @test4
+; CHECK-DAG: lbz
+; CHECK-DAG: qvlfdx [[REG1:[0-9]+]],
+; CHECK-DAG: stw
+; CHECK-DAG: qvlfiwzx [[REG2:[0-9]+]],
+; CHECK-DAG: qvfcfidu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvfcmpeq [[REG4:[0-9]+]], [[REG3]], [[REG1]]
+; CHECK: qvflogical 1, 1, [[REG4]], 1
+; CHECK: blr
+}
+
+define void @test5(<4 x i1> %a) nounwind {
+entry:
+ store <4 x i1> %a, <4 x i1>* @R
+ ret void
+
+; CHECK-LABEL: @test5
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK: lwz
+; CHECK: stb
+; CHECK: blr
+}
+
+define i1 @test6(<4 x i1> %a) nounwind {
+entry:
+ %r = extractelement <4 x i1> %a, i32 2
+ ret i1 %r
+
+; CHECK-LABEL: @test6
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK: lwz
+; CHECK: blr
+}
+
+define i1 @test7(<4 x i1> %a) nounwind {
+entry:
+ %r = extractelement <4 x i1> %a, i32 2
+ %s = extractelement <4 x i1> %a, i32 3
+ %q = and i1 %r, %s
+ ret i1 %q
+
+; CHECK-LABEL: @test7
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK-DAG: lwz [[REG4:[0-9]+]],
+; FIXME: We're storing the vector twice, and that's silly.
+; CHECK-DAG: qvstfiwx [[REG3]],
+; CHECK-DAG: lwz [[REG5:[0-9]+]],
+; CHECK: and 3,
+; CHECK: blr
+}
+
+define i1 @test8(<3 x i1> %a) nounwind {
+entry:
+ %r = extractelement <3 x i1> %a, i32 2
+ ret i1 %r
+
+; CHECK-LABEL: @test8
+; CHECK: qvlfdx [[REG1:[0-9]+]],
+; CHECK: qvfmadd [[REG2:[0-9]+]], 1, [[REG1]], [[REG1]]
+; CHECK: qvfctiwu [[REG3:[0-9]+]], [[REG2]]
+; CHECK: qvstfiwx [[REG3]],
+; CHECK: lwz
+; CHECK: blr
+}
+
+define <3 x double> @test9(<3 x double> %a, <3 x double> %b, i1 %c1, i1 %c2, i1 %c3) nounwind readnone {
+entry:
+ %v = insertelement <3 x i1> undef, i1 %c1, i32 0
+ %v2 = insertelement <3 x i1> %v, i1 %c2, i32 1
+ %v3 = insertelement <3 x i1> %v2, i1 %c3, i32 2
+ %r = select <3 x i1> %v3, <3 x double> %a, <3 x double> %b
+ ret <3 x double> %r
+
+; CHECK-LABEL: @test9
+
+; FIXME: This load/store sequence is unnecessary.
+; CHECK-DAG: lbz
+; CHECK-DAG: stw
+
+; CHECK-DAG: qvlfiwzx [[REG1:[0-9]+]],
+; CHECK-DAG: qvlfdx [[REG2:[0-9]+]],
+; CHECK-DAG: qvfcfidu [[REG3:[0-9]+]], [[REG1]]
+; CHECK: qvfcmpeq [[REG4:[0-9]+]], [[REG3]], [[REG2]]
+; CHECK: qvfsel 1, [[REG4]], 1, 2
+; CHECK: blr
+}
+
diff --git a/test/CodeGen/PowerPC/qpx-store.ll b/test/CodeGen/PowerPC/qpx-store.ll
new file mode 100644
index 0000000..2579d2c
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-store.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -march=ppc64 -mcpu=a2q | FileCheck %s
+target triple = "powerpc64-bgq-linux"
+
+define void @foo(<4 x double> %v, <4 x double>* %p) {
+entry:
+ store <4 x double> %v, <4 x double>* %p, align 8
+ ret void
+}
+
+; CHECK: @foo
+; CHECK: stfd
+; CHECK: stfd
+; CHECK: stfd
+; CHECK: stfd
+; CHECK: blr
+
+define void @bar(<4 x double> %v, <4 x double>* %p) {
+entry:
+ store <4 x double> %v, <4 x double>* %p, align 32
+ ret void
+}
+
+; CHECK: @bar
+; CHECK: qvstfdx
+
diff --git a/test/CodeGen/PowerPC/qpx-unalperm.ll b/test/CodeGen/PowerPC/qpx-unalperm.ll
new file mode 100644
index 0000000..e765b46
--- /dev/null
+++ b/test/CodeGen/PowerPC/qpx-unalperm.ll
@@ -0,0 +1,64 @@
+; RUN: llc < %s -mcpu=a2q | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-bgq-linux"
+
+define <4 x double> @foo(<4 x double>* %a) {
+entry:
+ %r = load <4 x double>* %a, align 32
+ ret <4 x double> %r
+; CHECK: qvlfdx
+; CHECK: blr
+}
+
+define <4 x double> @bar(<4 x double>* %a) {
+entry:
+ %r = load <4 x double>* %a, align 8
+ %b = getelementptr <4 x double>* %a, i32 16
+ %s = load <4 x double>* %b, align 32
+ %t = fadd <4 x double> %r, %s
+ ret <4 x double> %t
+; CHECK: qvlpcldx
+; CHECK: qvlfdx
+; CHECK: qvfperm
+; CHECK: blr
+}
+
+define <4 x double> @bar1(<4 x double>* %a) {
+entry:
+ %r = load <4 x double>* %a, align 8
+ %b = getelementptr <4 x double>* %a, i32 16
+ %s = load <4 x double>* %b, align 8
+ %t = fadd <4 x double> %r, %s
+ ret <4 x double> %t
+}
+
+define <4 x double> @bar2(<4 x double>* %a) {
+entry:
+ %r = load <4 x double>* %a, align 8
+ %b = getelementptr <4 x double>* %a, i32 1
+ %s = load <4 x double>* %b, align 32
+ %t = fadd <4 x double> %r, %s
+ ret <4 x double> %t
+}
+
+define <4 x double> @bar3(<4 x double>* %a) {
+entry:
+ %r = load <4 x double>* %a, align 8
+ %b = getelementptr <4 x double>* %a, i32 1
+ %s = load <4 x double>* %b, align 8
+ %t = fadd <4 x double> %r, %s
+ ret <4 x double> %t
+}
+
+define <4 x double> @bar4(<4 x double>* %a) {
+entry:
+ %r = load <4 x double>* %a, align 8
+ %b = getelementptr <4 x double>* %a, i32 1
+ %s = load <4 x double>* %b, align 8
+ %c = getelementptr <4 x double>* %b, i32 1
+ %t = load <4 x double>* %c, align 8
+ %u = fadd <4 x double> %r, %s
+ %v = fadd <4 x double> %u, %t
+ ret <4 x double> %v
+}
+
diff --git a/test/CodeGen/PowerPC/retaddr2.ll b/test/CodeGen/PowerPC/retaddr2.ll
new file mode 100644
index 0000000..8581f6c
--- /dev/null
+++ b/test/CodeGen/PowerPC/retaddr2.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone
+define i8* @test1() #0 {
+entry:
+ %0 = tail call i8* @llvm.returnaddress(i32 0)
+ ret i8* %0
+}
+
+; CHECK-LABEL: @test1
+; CHECK: mflr 0
+; CHECK: std 0, 16(1)
+; CHECK-DAG: ld 3, 64(1)
+; CHECK-DAG: ld 0, 16(1)
+; CHECK: mtlr 0
+; CHECK: blr
+
+; Function Attrs: nounwind readnone
+declare i8* @llvm.returnaddress(i32) #0
+
+attributes #0 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/rlwimi-and.ll b/test/CodeGen/PowerPC/rlwimi-and.ll
index 213363e..9433f8e 100644
--- a/test/CodeGen/PowerPC/rlwimi-and.ll
+++ b/test/CodeGen/PowerPC/rlwimi-and.ll
@@ -28,11 +28,9 @@ codeRepl17: ; preds = %codeRepl4
store i16 %rvml38.sroa.0.0.insert.insert, i16* undef, align 2
unreachable
-; FIXME: the SLWI could be folded into the RLWIMI to give a rotate of 8.
; CHECK: @test
-; CHECK-DAG: slwi [[R1:[0-9]+]], {{[0-9]+}}, 31
-; CHECK-DAG: rlwinm [[R2:[0-9]+]], {{[0-9]+}}, 0, 31, 31
-; CHECK: rlwimi [[R2]], [[R1]], 9, 23, 23
+; CHECK: rlwinm [[R1:[0-9]+]], {{[0-9]+}}, 0, 31, 31
+; CHECK: rlwimi [[R1]], {{[0-9]+}}, 8, 23, 23
codeRepl29: ; preds = %codeRepl1
unreachable
diff --git a/test/CodeGen/PowerPC/rlwimi2.ll b/test/CodeGen/PowerPC/rlwimi2.ll
index 1bee4e0..7978718 100644
--- a/test/CodeGen/PowerPC/rlwimi2.ll
+++ b/test/CodeGen/PowerPC/rlwimi2.ll
@@ -1,7 +1,7 @@
; All of these ands and shifts should be folded into rlwimi's
; RUN: llc < %s -march=ppc32 -o %t
-; RUN: grep rlwimi %t | count 3
-; RUN: grep srwi %t | count 1
+; RUN: grep rlwimi %t | count 4
+; RUN: not grep srwi %t
; RUN: not grep slwi %t
define i16 @test1(i32 %srcA, i32 %srcB, i32 %alpha) nounwind {
diff --git a/test/CodeGen/PowerPC/rm-zext.ll b/test/CodeGen/PowerPC/rm-zext.ll
new file mode 100644
index 0000000..33995e1
--- /dev/null
+++ b/test/CodeGen/PowerPC/rm-zext.ll
@@ -0,0 +1,89 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone
+define signext i32 @foo(i32 signext %a) #0 {
+entry:
+ %mul = mul nsw i32 %a, %a
+ %shr2 = lshr i32 %mul, 5
+ ret i32 %shr2
+
+; CHECK-LABEL @foo
+; CHECK-NOT: rldicl 3, {{[0-9]+}}, 0, 32
+; CHECK: blr
+}
+
+define zeroext i32 @test6(i32 zeroext %x) #0 {
+entry:
+ %and = lshr i32 %x, 16
+ %shr = and i32 %and, 255
+ %and1 = shl i32 %x, 16
+ %shl = and i32 %and1, 16711680
+ %or = or i32 %shr, %shl
+ ret i32 %or
+
+; CHECK-LABEL @test6
+; CHECK-NOT: rldicl 3, {{[0-9]+}}, 0, 32
+; CHECK: blr
+}
+
+define zeroext i32 @min(i32 zeroext %a, i32 zeroext %b) #0 {
+entry:
+ %cmp = icmp ule i32 %a, %b
+ %cond = select i1 %cmp, i32 %a, i32 %b
+ ret i32 %cond
+
+; CHECK-LABEL @min
+; CHECK-NOT: rldicl 3, {{[0-9]+}}, 0, 32
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.bswap.i32(i32) #0
+
+; Function Attrs: nounwind readonly
+define zeroext i32 @bs32(i32* nocapture readonly %x) #1 {
+entry:
+ %0 = load i32* %x, align 4
+ %1 = tail call i32 @llvm.bswap.i32(i32 %0)
+ ret i32 %1
+
+; CHECK-LABEL: @bs32
+; CHECK-NOT: rldicl 3, {{[0-9]+}}, 0, 32
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readonly
+define zeroext i16 @bs16(i16* nocapture readonly %x) #1 {
+entry:
+ %0 = load i16* %x, align 2
+ %1 = tail call i16 @llvm.bswap.i16(i16 %0)
+ ret i16 %1
+
+; CHECK-LABEL: @bs16
+; CHECK-NOT: rldicl 3, {{[0-9]+}}, 0, 32
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+declare i16 @llvm.bswap.i16(i16) #0
+
+; Function Attrs: nounwind readnone
+define zeroext i32 @ctlz32(i32 zeroext %x) #0 {
+entry:
+ %0 = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
+ ret i32 %0
+
+; CHECK-LABEL: @ctlz32
+; CHECK-NOT: rldicl 3, {{[0-9]+}}, 0, 32
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.ctlz.i32(i32, i1) #0
+
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind readonly }
+
diff --git a/test/CodeGen/PowerPC/sdiv-pow2.ll b/test/CodeGen/PowerPC/sdiv-pow2.ll
new file mode 100644
index 0000000..5ec019d
--- /dev/null
+++ b/test/CodeGen/PowerPC/sdiv-pow2.ll
@@ -0,0 +1,67 @@
+; RUN: llc -mcpu=ppc64 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc-unknown-linux-gnu -mcpu=ppc < %s | FileCheck -check-prefix=CHECK-32 %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone
+define signext i32 @foo4(i32 signext %a) #0 {
+entry:
+ %div = sdiv i32 %a, 8
+ ret i32 %div
+
+; CHECK-LABEL @foo4
+; CHECK: srawi [[REG1:[0-9]+]], 3, 3
+; CHECK: addze [[REG2:[0-9]+]], [[REG1]]
+; CHECK: extsw 3, [[REG2]]
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define i64 @foo8(i64 %a) #0 {
+entry:
+ %div = sdiv i64 %a, 8
+ ret i64 %div
+
+; CHECK-LABEL @foo8
+; CHECK: sradi [[REG1:[0-9]+]], 3, 3
+; CHECK: addze 3, [[REG1]]
+; CHECK: blr
+
+; CHECK-32-LABEL @foo8
+; CHECK-32-NOT: sradi
+; CHECK-32: blr
+}
+
+; Function Attrs: nounwind readnone
+define signext i32 @foo4n(i32 signext %a) #0 {
+entry:
+ %div = sdiv i32 %a, -8
+ ret i32 %div
+
+; CHECK-LABEL: @foo4n
+; CHECK: srawi [[REG1:[0-9]+]], 3, 3
+; CHECK: addze [[REG2:[0-9]+]], [[REG1]]
+; CHECK: neg [[REG3:[0-9]+]], [[REG2]]
+; CHECK: extsw 3, [[REG3]]
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define i64 @foo8n(i64 %a) #0 {
+entry:
+ %div = sdiv i64 %a, -8
+ ret i64 %div
+
+; CHECK-LABEL: @foo8n
+; CHECK: sradi [[REG1:[0-9]+]], 3, 3
+; CHECK: addze [[REG2:[0-9]+]], [[REG1]]
+; CHECK: neg 3, [[REG2]]
+; CHECK: blr
+
+; CHECK-32-LABEL @foo8n
+; CHECK-32-NOT: sradi
+; CHECK-32: blr
+}
+
+attributes #0 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/stack-realign.ll b/test/CodeGen/PowerPC/stack-realign.ll
index a59fceb..762f50a 100644
--- a/test/CodeGen/PowerPC/stack-realign.ll
+++ b/test/CodeGen/PowerPC/stack-realign.ll
@@ -37,6 +37,7 @@ entry:
; CHECK-DAG: subfic 0, [[REG]], -160
; CHECK: stdux 1, 1, 0
+; CHECK: .cfi_def_cfa_register r30
; CHECK: .cfi_offset r30, -16
; CHECK: .cfi_offset lr, 16
@@ -59,6 +60,7 @@ entry:
; CHECK-FP-DAG: subfic 0, [[REG]], -160
; CHECK-FP: stdux 1, 1, 0
+; CHECK-FP: .cfi_def_cfa_register r30
; CHECK-FP: .cfi_offset r31, -8
; CHECK-FP: .cfi_offset r30, -16
; CHECK-FP: .cfi_offset lr, 16
@@ -120,6 +122,8 @@ entry:
; CHECK-DAG: subfc 0, [[REG3]], [[REG2]]
; CHECK: stdux 1, 1, 0
+; CHECK: .cfi_def_cfa_register r30
+
; CHECK: blr
; CHECK-32-LABEL: @hoo
@@ -178,6 +182,8 @@ entry:
; CHECK-DAG: subfic 0, [[REG]], -192
; CHECK: stdux 1, 1, 0
+; CHECK: .cfi_def_cfa_register r30
+
; CHECK: stfd 30, -16(30)
; CHECK: blr
@@ -193,6 +199,8 @@ entry:
; CHECK-FP-DAG: subfic 0, [[REG]], -192
; CHECK-FP: stdux 1, 1, 0
+; CHECK-FP: .cfi_def_cfa_register r30
+
; CHECK-FP: stfd 30, -16(30)
; CHECK-FP: blr
diff --git a/test/CodeGen/PowerPC/subreg-postra-2.ll b/test/CodeGen/PowerPC/subreg-postra-2.ll
new file mode 100644
index 0000000..2faaa61
--- /dev/null
+++ b/test/CodeGen/PowerPC/subreg-postra-2.ll
@@ -0,0 +1,175 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @jbd2_journal_commit_transaction() #0 {
+entry:
+ br i1 undef, label %do.body, label %if.then5
+
+if.then5: ; preds = %entry
+ unreachable
+
+do.body: ; preds = %entry
+ br i1 undef, label %do.body.i, label %trace_jbd2_start_commit.exit
+
+do.body.i: ; preds = %do.body
+ unreachable
+
+trace_jbd2_start_commit.exit: ; preds = %do.body
+ br i1 undef, label %do.body.i1116, label %trace_jbd2_commit_locking.exit
+
+do.body.i1116: ; preds = %trace_jbd2_start_commit.exit
+ unreachable
+
+trace_jbd2_commit_locking.exit: ; preds = %trace_jbd2_start_commit.exit
+ br i1 undef, label %while.end, label %while.body.lr.ph
+
+while.body.lr.ph: ; preds = %trace_jbd2_commit_locking.exit
+ unreachable
+
+while.end: ; preds = %trace_jbd2_commit_locking.exit
+ br i1 undef, label %spin_unlock.exit1146, label %if.then.i.i.i.i1144
+
+if.then.i.i.i.i1144: ; preds = %while.end
+ unreachable
+
+spin_unlock.exit1146: ; preds = %while.end
+ br i1 undef, label %spin_unlock.exit1154, label %if.then.i.i.i.i1152
+
+if.then.i.i.i.i1152: ; preds = %spin_unlock.exit1146
+ unreachable
+
+spin_unlock.exit1154: ; preds = %spin_unlock.exit1146
+ br i1 undef, label %do.body.i1159, label %trace_jbd2_commit_flushing.exit
+
+do.body.i1159: ; preds = %spin_unlock.exit1154
+ br i1 undef, label %if.end.i1166, label %do.body5.i1165
+
+do.body5.i1165: ; preds = %do.body.i1159
+ unreachable
+
+if.end.i1166: ; preds = %do.body.i1159
+ unreachable
+
+trace_jbd2_commit_flushing.exit: ; preds = %spin_unlock.exit1154
+ br i1 undef, label %for.end.i, label %for.body.lr.ph.i
+
+for.body.lr.ph.i: ; preds = %trace_jbd2_commit_flushing.exit
+ unreachable
+
+for.end.i: ; preds = %trace_jbd2_commit_flushing.exit
+ br i1 undef, label %journal_submit_data_buffers.exit, label %if.then.i.i.i.i31.i
+
+if.then.i.i.i.i31.i: ; preds = %for.end.i
+ br label %journal_submit_data_buffers.exit
+
+journal_submit_data_buffers.exit: ; preds = %if.then.i.i.i.i31.i, %for.end.i
+ br i1 undef, label %if.end103, label %if.then102
+
+if.then102: ; preds = %journal_submit_data_buffers.exit
+ unreachable
+
+if.end103: ; preds = %journal_submit_data_buffers.exit
+ br i1 undef, label %do.body.i1182, label %trace_jbd2_commit_logging.exit
+
+do.body.i1182: ; preds = %if.end103
+ br i1 undef, label %if.end.i1189, label %do.body5.i1188
+
+do.body5.i1188: ; preds = %do.body5.i1188, %do.body.i1182
+ br i1 undef, label %if.end.i1189, label %do.body5.i1188
+
+if.end.i1189: ; preds = %do.body5.i1188, %do.body.i1182
+ unreachable
+
+trace_jbd2_commit_logging.exit: ; preds = %if.end103
+ br label %while.cond129.outer1451
+
+while.cond129.outer1451: ; preds = %start_journal_io, %trace_jbd2_commit_logging.exit
+ br label %while.cond129
+
+while.cond129: ; preds = %if.then135, %while.cond129.outer1451
+ br i1 undef, label %while.end246, label %if.then135
+
+if.then135: ; preds = %while.cond129
+ br i1 undef, label %start_journal_io, label %while.cond129
+
+start_journal_io: ; preds = %if.then135
+ br label %while.cond129.outer1451
+
+while.end246: ; preds = %while.cond129
+ br i1 undef, label %for.end.i1287, label %for.body.i1277
+
+for.body.i1277: ; preds = %while.end246
+ unreachable
+
+for.end.i1287: ; preds = %while.end246
+ br i1 undef, label %journal_finish_inode_data_buffers.exit, label %if.then.i.i.i.i84.i
+
+if.then.i.i.i.i84.i: ; preds = %for.end.i1287
+ unreachable
+
+journal_finish_inode_data_buffers.exit: ; preds = %for.end.i1287
+ br i1 undef, label %if.end256, label %if.then249
+
+if.then249: ; preds = %journal_finish_inode_data_buffers.exit
+ unreachable
+
+if.end256: ; preds = %journal_finish_inode_data_buffers.exit
+ br label %while.body318
+
+while.body318: ; preds = %wait_on_buffer.exit, %if.end256
+ br i1 undef, label %wait_on_buffer.exit, label %if.then.i1296
+
+if.then.i1296: ; preds = %while.body318
+ br label %wait_on_buffer.exit
+
+wait_on_buffer.exit: ; preds = %if.then.i1296, %while.body318
+ br i1 undef, label %do.body378, label %while.body318
+
+do.body378: ; preds = %wait_on_buffer.exit
+ br i1 undef, label %while.end418, label %while.body392.lr.ph
+
+while.body392.lr.ph: ; preds = %do.body378
+ br label %while.body392
+
+while.body392: ; preds = %wait_on_buffer.exit1319, %while.body392.lr.ph
+ %0 = load i8** undef, align 8
+ %add.ptr399 = getelementptr inbounds i8* %0, i64 -72
+ %b_state.i.i1314 = bitcast i8* %add.ptr399 to i64*
+ %tobool.i1316 = icmp eq i64 undef, 0
+ br i1 %tobool.i1316, label %wait_on_buffer.exit1319, label %if.then.i1317
+
+if.then.i1317: ; preds = %while.body392
+ unreachable
+
+wait_on_buffer.exit1319: ; preds = %while.body392
+ %1 = load volatile i64* %b_state.i.i1314, align 8
+ %conv.i.i1322 = and i64 %1, 1
+ %lnot404 = icmp eq i64 %conv.i.i1322, 0
+ %.err.4 = select i1 %lnot404, i32 -5, i32 undef
+ %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(i64* %b_state.i.i1314, i64 262144, i64* %b_state.i.i1314, i64* %b_state.i.i1314) #0
+ store i8* %0, i8** undef, align 8
+ %cmp.i1312 = icmp eq i32* undef, undef
+ br i1 %cmp.i1312, label %while.end418, label %while.body392
+
+while.end418: ; preds = %wait_on_buffer.exit1319, %do.body378
+ %err.4.lcssa = phi i32 [ undef, %do.body378 ], [ %.err.4, %wait_on_buffer.exit1319 ]
+ %tobool419 = icmp eq i32 %err.4.lcssa, 0
+ br i1 %tobool419, label %if.end421, label %if.then420
+
+; CHECK-LABEL: @jbd2_journal_commit_transaction
+; CHECK: andi.
+; CHECK: cror [[REG:[0-9]+]], 1, 1
+; CHECK: stdcx.
+; CHECK: isel {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}, [[REG]]
+
+if.then420: ; preds = %while.end418
+ unreachable
+
+if.end421: ; preds = %while.end418
+ unreachable
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/subreg-postra.ll b/test/CodeGen/PowerPC/subreg-postra.ll
new file mode 100644
index 0000000..b10fa66
--- /dev/null
+++ b/test/CodeGen/PowerPC/subreg-postra.ll
@@ -0,0 +1,168 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @jbd2_journal_commit_transaction(i32* %journal) #0 {
+entry:
+ br i1 undef, label %do.body, label %if.then5
+
+if.then5: ; preds = %entry
+ unreachable
+
+do.body: ; preds = %entry
+ br i1 undef, label %do.body.i, label %trace_jbd2_start_commit.exit
+
+do.body.i: ; preds = %do.body
+ unreachable
+
+trace_jbd2_start_commit.exit: ; preds = %do.body
+ br i1 undef, label %do.body.i1116, label %trace_jbd2_commit_locking.exit
+
+do.body.i1116: ; preds = %trace_jbd2_start_commit.exit
+ br i1 undef, label %if.end.i1123, label %do.body5.i1122
+
+do.body5.i1122: ; preds = %do.body.i1116
+ unreachable
+
+if.end.i1123: ; preds = %do.body.i1116
+ br label %trace_jbd2_commit_locking.exit
+
+trace_jbd2_commit_locking.exit: ; preds = %if.end.i1123, %trace_jbd2_start_commit.exit
+ br i1 undef, label %spin_unlock.exit1146, label %if.then.i.i.i.i1144
+
+if.then.i.i.i.i1144: ; preds = %trace_jbd2_commit_locking.exit
+ unreachable
+
+spin_unlock.exit1146: ; preds = %trace_jbd2_commit_locking.exit
+ br i1 undef, label %spin_unlock.exit1154, label %if.then.i.i.i.i1152
+
+if.then.i.i.i.i1152: ; preds = %spin_unlock.exit1146
+ br label %spin_unlock.exit1154
+
+spin_unlock.exit1154: ; preds = %if.then.i.i.i.i1152, %spin_unlock.exit1146
+ br i1 undef, label %do.body.i1159, label %trace_jbd2_commit_flushing.exit
+
+do.body.i1159: ; preds = %spin_unlock.exit1154
+ unreachable
+
+trace_jbd2_commit_flushing.exit: ; preds = %spin_unlock.exit1154
+ br i1 undef, label %for.end.i, label %for.body.lr.ph.i
+
+for.body.lr.ph.i: ; preds = %trace_jbd2_commit_flushing.exit
+ br i1 undef, label %spin_unlock.exit.i, label %if.then.i.i.i.i.i
+
+if.then.i.i.i.i.i: ; preds = %for.body.lr.ph.i
+ unreachable
+
+spin_unlock.exit.i: ; preds = %for.body.lr.ph.i
+ unreachable
+
+for.end.i: ; preds = %trace_jbd2_commit_flushing.exit
+ br i1 undef, label %journal_submit_data_buffers.exit, label %if.then.i.i.i.i31.i
+
+if.then.i.i.i.i31.i: ; preds = %for.end.i
+ unreachable
+
+journal_submit_data_buffers.exit: ; preds = %for.end.i
+ br i1 undef, label %if.end103, label %if.then102
+
+if.then102: ; preds = %journal_submit_data_buffers.exit
+ unreachable
+
+if.end103: ; preds = %journal_submit_data_buffers.exit
+ br i1 undef, label %do.body.i1182, label %trace_jbd2_commit_logging.exit
+
+do.body.i1182: ; preds = %if.end103
+ unreachable
+
+trace_jbd2_commit_logging.exit: ; preds = %if.end103
+ br i1 undef, label %for.end.i1287, label %for.body.i1277
+
+for.body.i1277: ; preds = %trace_jbd2_commit_logging.exit
+ unreachable
+
+for.end.i1287: ; preds = %trace_jbd2_commit_logging.exit
+ br i1 undef, label %journal_finish_inode_data_buffers.exit, label %if.then.i.i.i.i84.i
+
+if.then.i.i.i.i84.i: ; preds = %for.end.i1287
+ unreachable
+
+journal_finish_inode_data_buffers.exit: ; preds = %for.end.i1287
+ br i1 undef, label %if.end256, label %if.then249
+
+if.then249: ; preds = %journal_finish_inode_data_buffers.exit
+ unreachable
+
+if.end256: ; preds = %journal_finish_inode_data_buffers.exit
+ br i1 undef, label %do.body277, label %if.then260
+
+if.then260: ; preds = %if.end256
+ br label %do.body277
+
+do.body277: ; preds = %if.then260, %if.end256
+ br label %while.body318
+
+while.body318: ; preds = %wait_on_buffer.exit, %do.body277
+ %tobool.i1295 = icmp eq i64 undef, 0
+ br i1 %tobool.i1295, label %wait_on_buffer.exit, label %if.then.i1296
+
+if.then.i1296: ; preds = %while.body318
+ unreachable
+
+wait_on_buffer.exit: ; preds = %while.body318
+ br i1 undef, label %do.body378, label %while.body318
+
+do.body378: ; preds = %wait_on_buffer.exit
+ br i1 undef, label %while.end418, label %while.body392.lr.ph
+
+while.body392.lr.ph: ; preds = %do.body378
+ br label %while.body392
+
+while.body392: ; preds = %wait_on_buffer.exit1319, %while.body392.lr.ph
+ %0 = load i8** undef, align 8
+ %add.ptr399 = getelementptr inbounds i8* %0, i64 -72
+ %b_state.i.i1314 = bitcast i8* %add.ptr399 to i64*
+ %tobool.i1316 = icmp eq i64 undef, 0
+ br i1 %tobool.i1316, label %wait_on_buffer.exit1319, label %if.then.i1317
+
+if.then.i1317: ; preds = %while.body392
+ unreachable
+
+wait_on_buffer.exit1319: ; preds = %while.body392
+ %1 = load volatile i64* %b_state.i.i1314, align 8
+ %conv.i.i1322 = and i64 %1, 1
+ %lnot404 = icmp eq i64 %conv.i.i1322, 0
+ %.err.4 = select i1 %lnot404, i32 -5, i32 undef
+ %2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(i64* %b_state.i.i1314, i64 262144, i64* %b_state.i.i1314, i64* %b_state.i.i1314) #1
+ %prev.i.i.i1325 = getelementptr inbounds i8* %0, i64 8
+ %3 = load i32** null, align 8
+ store i32* %3, i32** undef, align 8
+ call void @__brelse(i32* undef) #1
+ br i1 undef, label %while.end418, label %while.body392
+
+; CHECK-LABEL: @jbd2_journal_commit_transaction
+; CHECK: andi.
+; CHECK: cror [[REG:[0-9]+]], 1, 1
+; CHECK: stdcx.
+; CHECK: isel {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}, [[REG]]
+
+while.end418: ; preds = %wait_on_buffer.exit1319, %do.body378
+ %err.4.lcssa = phi i32 [ undef, %do.body378 ], [ %.err.4, %wait_on_buffer.exit1319 ]
+ br i1 undef, label %if.end421, label %if.then420
+
+if.then420: ; preds = %while.end418
+ call void @jbd2_journal_abort(i32* %journal, i32 signext %err.4.lcssa) #1
+ br label %if.end421
+
+if.end421: ; preds = %if.then420, %while.end418
+ unreachable
+}
+
+declare void @jbd2_journal_abort(i32*, i32 signext)
+
+declare void @__brelse(i32*)
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/tls-cse.ll b/test/CodeGen/PowerPC/tls-cse.ll
new file mode 100644
index 0000000..2aa75f9
--- /dev/null
+++ b/test/CodeGen/PowerPC/tls-cse.ll
@@ -0,0 +1,52 @@
+; RUN: llc -march=ppc64 -mcpu=pwr7 -O2 -relocation-model=pic < %s | FileCheck %s
+; RUN: llc -march=ppc64 -mcpu=pwr7 -O2 -relocation-model=pic < %s | grep "__tls_get_addr" | count 1
+
+; This test was derived from LLVM's own
+; PrettyStackTraceEntry::~PrettyStackTraceEntry(). It demonstrates an
+; opportunity for CSE of calls to __tls_get_addr().
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+%"class.llvm::PrettyStackTraceEntry" = type { i32 (...)**, %"class.llvm::PrettyStackTraceEntry"* }
+
+@_ZTVN4llvm21PrettyStackTraceEntryE = unnamed_addr constant [5 x i8*] [i8* null, i8* null, i8* bitcast (void (%"class.llvm::PrettyStackTraceEntry"*)* @_ZN4llvm21PrettyStackTraceEntryD2Ev to i8*), i8* bitcast (void (%"class.llvm::PrettyStackTraceEntry"*)* @_ZN4llvm21PrettyStackTraceEntryD0Ev to i8*), i8* bitcast (void ()* @__cxa_pure_virtual to i8*)], align 8
+@_ZL20PrettyStackTraceHead = internal thread_local unnamed_addr global %"class.llvm::PrettyStackTraceEntry"* null, align 8
+@.str = private unnamed_addr constant [87 x i8] c"PrettyStackTraceHead == this && \22Pretty stack trace entry destruction is out of order\22\00", align 1
+@.str1 = private unnamed_addr constant [64 x i8] c"/home/wschmidt/llvm/llvm-test2/lib/Support/PrettyStackTrace.cpp\00", align 1
+@__PRETTY_FUNCTION__._ZN4llvm21PrettyStackTraceEntryD2Ev = private unnamed_addr constant [62 x i8] c"virtual llvm::PrettyStackTraceEntry::~PrettyStackTraceEntry()\00", align 1
+
+declare void @_ZN4llvm21PrettyStackTraceEntryD2Ev(%"class.llvm::PrettyStackTraceEntry"* %this) unnamed_addr
+declare void @__cxa_pure_virtual()
+declare void @__assert_fail(i8*, i8*, i32 zeroext, i8*)
+declare void @_ZdlPv(i8*)
+
+define void @_ZN4llvm21PrettyStackTraceEntryD0Ev(%"class.llvm::PrettyStackTraceEntry"* %this) unnamed_addr align 2 {
+entry:
+ %0 = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 0
+ store i32 (...)** bitcast (i8** getelementptr inbounds ([5 x i8*]* @_ZTVN4llvm21PrettyStackTraceEntryE, i64 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8
+ %1 = load %"class.llvm::PrettyStackTraceEntry"** @_ZL20PrettyStackTraceHead, align 8
+ %cmp.i = icmp eq %"class.llvm::PrettyStackTraceEntry"* %1, %this
+ br i1 %cmp.i, label %_ZN4llvm21PrettyStackTraceEntryD2Ev.exit, label %cond.false.i
+
+cond.false.i: ; preds = %entry
+ tail call void @__assert_fail(i8* getelementptr inbounds ([87 x i8]* @.str, i64 0, i64 0), i8* getelementptr inbounds ([64 x i8]* @.str1, i64 0, i64 0), i32 zeroext 119, i8* getelementptr inbounds ([62 x i8]* @__PRETTY_FUNCTION__._ZN4llvm21PrettyStackTraceEntryD2Ev, i64 0, i64 0))
+ unreachable
+
+_ZN4llvm21PrettyStackTraceEntryD2Ev.exit: ; preds = %entry
+ %NextEntry.i.i = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 1
+ %2 = bitcast %"class.llvm::PrettyStackTraceEntry"** %NextEntry.i.i to i64*
+ %3 = load i64* %2, align 8
+ store i64 %3, i64* bitcast (%"class.llvm::PrettyStackTraceEntry"** @_ZL20PrettyStackTraceHead to i64*), align 8
+ %4 = bitcast %"class.llvm::PrettyStackTraceEntry"* %this to i8*
+ tail call void @_ZdlPv(i8* %4)
+ ret void
+}
+
+; CHECK-LABEL: _ZN4llvm21PrettyStackTraceEntryD0Ev:
+; CHECK: addis [[REG1:[0-9]+]], 2, _ZL20PrettyStackTraceHead@got@tlsld@ha
+; CHECK: addi 3, [[REG1]], _ZL20PrettyStackTraceHead@got@tlsld@l
+; CHECK: bl __tls_get_addr(_ZL20PrettyStackTraceHead@tlsld)
+; CHECK: addis 3, 3, _ZL20PrettyStackTraceHead@dtprel@ha
+; CHECK: ld {{[0-9]+}}, _ZL20PrettyStackTraceHead@dtprel@l(3)
+; CHECK: std {{[0-9]+}}, _ZL20PrettyStackTraceHead@dtprel@l(3)
diff --git a/test/CodeGen/PowerPC/tls-pic.ll b/test/CodeGen/PowerPC/tls-pic.ll
index 9ba3725..6c671b0 100644
--- a/test/CodeGen/PowerPC/tls-pic.ll
+++ b/test/CodeGen/PowerPC/tls-pic.ll
@@ -19,32 +19,32 @@ entry:
; OPT0-LABEL: main:
; OPT0: addis [[REG:[0-9]+]], 2, a@got@tlsld@ha
-; OPT0-NEXT: addi 3, [[REG]], a@got@tlsld@l
+; OPT0: addi 3, [[REG]], a@got@tlsld@l
; OPT0: bl __tls_get_addr(a@tlsld)
; OPT0-NEXT: nop
; OPT0: addis [[REG2:[0-9]+]], 3, a@dtprel@ha
-; OPT0-NEXT: addi {{[0-9]+}}, [[REG2]], a@dtprel@l
+; OPT0: addi {{[0-9]+}}, [[REG2]], a@dtprel@l
; OPT0-32-LABEL: main
; OPT0-32: addi {{[0-9]+}}, {{[0-9]+}}, a@got@tlsld
; OPT0-32: bl __tls_get_addr(a@tlsld)@PLT
; OPT0-32: addis [[REG:[0-9]+]], 3, a@dtprel@ha
-; OPT0-32-NEXT: addi {{[0-9]+}}, [[REG]], a@dtprel@l
+; OPT0-32: addi {{[0-9]+}}, [[REG]], a@dtprel@l
; OPT1-32-LABEL: main
; OPT1-32: addi 3, {{[0-9]+}}, a@got@tlsld
; OPT1-32: bl __tls_get_addr(a@tlsld)@PLT
; OPT1-32: addis [[REG:[0-9]+]], 3, a@dtprel@ha
-; OPT1-32-NEXT: addi {{[0-9]+}}, [[REG]], a@dtprel@l
+; OPT1-32: addi {{[0-9]+}}, [[REG]], a@dtprel@l
; Test peephole optimization for thread-local storage using the
; local dynamic model.
; OPT1-LABEL: main:
; OPT1: addis [[REG:[0-9]+]], 2, a@got@tlsld@ha
-; OPT1-NEXT: addi 3, [[REG]], a@got@tlsld@l
+; OPT1: addi 3, [[REG]], a@got@tlsld@l
; OPT1: bl __tls_get_addr(a@tlsld)
; OPT1-NEXT: nop
; OPT1: addis [[REG2:[0-9]+]], 3, a@dtprel@ha
-; OPT1-NEXT: lwa {{[0-9]+}}, a@dtprel@l([[REG2]])
+; OPT1: lwa {{[0-9]+}}, a@dtprel@l([[REG2]])
; Test correct assembly code generation for thread-local storage using
; the general dynamic model.
@@ -60,8 +60,8 @@ entry:
}
; OPT1-LABEL: main2
-; OPT1: addis [[REG:[0-9]+]], 2, a2@got@tlsgd@ha
-; OPT1-NEXT: addi 3, [[REG]], a2@got@tlsgd@l
+; OPT1: addis [[REG:[0-9]+]], 2, a2@got@tlsgd@ha
+; OPT1: addi 3, [[REG]], a2@got@tlsgd@l
; OPT1: bl __tls_get_addr(a2@tlsgd)
; OPT1-NEXT: nop
; OPT1-32-LABEL: main2
diff --git a/test/CodeGen/PowerPC/tls-store2.ll b/test/CodeGen/PowerPC/tls-store2.ll
index f884dd8..e9aa17e 100644
--- a/test/CodeGen/PowerPC/tls-store2.ll
+++ b/test/CodeGen/PowerPC/tls-store2.ll
@@ -19,13 +19,14 @@ entry:
}
; CHECK-LABEL: call_once:
-; CHECK: addis 3, 2, __once_callable@got@tlsgd@ha
-; CHECK: addi 3, 3, __once_callable@got@tlsgd@l
+; CHECK: addi 3, {{[0-9]+}}, __once_callable@got@tlsgd@l
; CHECK: bl __tls_get_addr(__once_callable@tlsgd)
; CHECK-NEXT: nop
-; CHECK: std {{[0-9]+}}, 0(3)
-; CHECK: addis 3, 2, __once_call@got@tlsgd@ha
-; CHECK: addi 3, 3, __once_call@got@tlsgd@l
+; FIXME: We could check here for 'std {{[0-9]+}}, 0(3)', but that no longer
+; works because, with new scheduling freedom, we create a copy of R3 based on the
+; initial scheduling, but don't coalesce it again after we move the instructions
+; so that the copy is no longer necessary.
+; CHECK: addi 3, {{[0-9]+}}, __once_call@got@tlsgd@l
; CHECK: bl __tls_get_addr(__once_call@tlsgd)
; CHECK-NEXT: nop
; CHECK: std {{[0-9]+}}, 0(3)
diff --git a/test/CodeGen/PowerPC/toc-load-sched-bug.ll b/test/CodeGen/PowerPC/toc-load-sched-bug.ll
index d437915..e92c4f4 100644
--- a/test/CodeGen/PowerPC/toc-load-sched-bug.ll
+++ b/test/CodeGen/PowerPC/toc-load-sched-bug.ll
@@ -484,51 +484,51 @@ attributes #7 = { noreturn nounwind }
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"clang version 3.6.0 (trunk 215115) (llvm/trunk 215117)"}
-!1 = metadata !{metadata !2, metadata !4, i64 0}
-!2 = metadata !{metadata !"_ZTSSs", metadata !3, i64 0}
-!3 = metadata !{metadata !"_ZTSNSs12_Alloc_hiderE", metadata !4, i64 0}
-!4 = metadata !{metadata !"any pointer", metadata !5, i64 0}
-!5 = metadata !{metadata !"omnipotent char", metadata !6, i64 0}
-!6 = metadata !{metadata !"Simple C/C++ TBAA"}
-!7 = metadata !{metadata !8, metadata !9, i64 0}
-!8 = metadata !{metadata !"_ZTSNSs9_Rep_baseE", metadata !9, i64 0, metadata !9, i64 8, metadata !10, i64 16}
-!9 = metadata !{metadata !"long", metadata !5, i64 0}
-!10 = metadata !{metadata !"int", metadata !5, i64 0}
-!11 = metadata !{metadata !12, metadata !12, i64 0}
-!12 = metadata !{metadata !"vtable pointer", metadata !6, i64 0}
-!13 = metadata !{metadata !3, metadata !4, i64 0}
-!14 = metadata !{metadata !15, metadata !10, i64 24}
-!15 = metadata !{metadata !"_ZTSN4llvm12SMDiagnosticE", metadata !4, i64 0, metadata !16, i64 8, metadata !2, i64 16, metadata !10, i64 24, metadata !10, i64 28, metadata !17, i64 32, metadata !2, i64 40, metadata !2, i64 48, metadata !18, i64 56, metadata !19, i64 80}
-!16 = metadata !{metadata !"_ZTSN4llvm5SMLocE", metadata !4, i64 0}
-!17 = metadata !{metadata !"_ZTSN4llvm9SourceMgr8DiagKindE", metadata !5, i64 0}
-!18 = metadata !{metadata !"_ZTSSt6vectorISt4pairIjjESaIS1_EE"}
-!19 = metadata !{metadata !"_ZTSN4llvm11SmallVectorINS_7SMFixItELj4EEE", metadata !20, i64 48}
-!20 = metadata !{metadata !"_ZTSN4llvm18SmallVectorStorageINS_7SMFixItELj4EEE", metadata !5, i64 0}
-!21 = metadata !{metadata !15, metadata !10, i64 28}
-!22 = metadata !{metadata !15, metadata !17, i64 32}
-!23 = metadata !{metadata !24, metadata !4, i64 0}
-!24 = metadata !{metadata !"_ZTSN4llvm15SmallVectorBaseE", metadata !4, i64 0, metadata !4, i64 8, metadata !4, i64 16}
-!25 = metadata !{metadata !24, metadata !4, i64 8}
-!26 = metadata !{metadata !24, metadata !4, i64 16}
-!27 = metadata !{metadata !4, metadata !4, i64 0}
-!28 = metadata !{metadata !"branch_weights", i32 64, i32 4}
-!29 = metadata !{metadata !10, metadata !10, i64 0}
-!30 = metadata !{metadata !31, metadata !4, i64 8}
-!31 = metadata !{metadata !"_ZTSN4llvm12MemoryBufferE", metadata !4, i64 8, metadata !4, i64 16}
-!32 = metadata !{metadata !31, metadata !4, i64 16}
-!33 = metadata !{metadata !5, metadata !5, i64 0}
-!34 = metadata !{metadata !35, metadata !4, i64 0}
-!35 = metadata !{metadata !"_ZTSSt12_Vector_baseISt4pairIjjESaIS1_EE", metadata !36, i64 0}
-!36 = metadata !{metadata !"_ZTSNSt12_Vector_baseISt4pairIjjESaIS1_EE12_Vector_implE", metadata !4, i64 0, metadata !4, i64 8, metadata !4, i64 16}
-!37 = metadata !{metadata !38, metadata !38, i64 0}
-!38 = metadata !{metadata !"bool", metadata !5, i64 0}
-!39 = metadata !{i8 0, i8 2}
-!40 = metadata !{metadata !41, metadata !4, i64 0}
-!41 = metadata !{metadata !"_ZTSN4llvm10TimeRegionE", metadata !4, i64 0}
-!42 = metadata !{metadata !43, metadata !44, i64 32}
-!43 = metadata !{metadata !"_ZTSN4llvm11raw_ostreamE", metadata !4, i64 8, metadata !4, i64 16, metadata !4, i64 24, metadata !44, i64 32}
-!44 = metadata !{metadata !"_ZTSN4llvm11raw_ostream10BufferKindE", metadata !5, i64 0}
-!45 = metadata !{metadata !43, metadata !4, i64 24}
-!46 = metadata !{metadata !43, metadata !4, i64 8}
-!47 = metadata !{i64 0, i64 8, metadata !27, i64 8, i64 8, metadata !27}
+!0 = !{!"clang version 3.6.0 (trunk 215115) (llvm/trunk 215117)"}
+!1 = !{!2, !4, i64 0}
+!2 = !{!"_ZTSSs", !3, i64 0}
+!3 = !{!"_ZTSNSs12_Alloc_hiderE", !4, i64 0}
+!4 = !{!"any pointer", !5, i64 0}
+!5 = !{!"omnipotent char", !6, i64 0}
+!6 = !{!"Simple C/C++ TBAA"}
+!7 = !{!8, !9, i64 0}
+!8 = !{!"_ZTSNSs9_Rep_baseE", !9, i64 0, !9, i64 8, !10, i64 16}
+!9 = !{!"long", !5, i64 0}
+!10 = !{!"int", !5, i64 0}
+!11 = !{!12, !12, i64 0}
+!12 = !{!"vtable pointer", !6, i64 0}
+!13 = !{!3, !4, i64 0}
+!14 = !{!15, !10, i64 24}
+!15 = !{!"_ZTSN4llvm12SMDiagnosticE", !4, i64 0, !16, i64 8, !2, i64 16, !10, i64 24, !10, i64 28, !17, i64 32, !2, i64 40, !2, i64 48, !18, i64 56, !19, i64 80}
+!16 = !{!"_ZTSN4llvm5SMLocE", !4, i64 0}
+!17 = !{!"_ZTSN4llvm9SourceMgr8DiagKindE", !5, i64 0}
+!18 = !{!"_ZTSSt6vectorISt4pairIjjESaIS1_EE"}
+!19 = !{!"_ZTSN4llvm11SmallVectorINS_7SMFixItELj4EEE", !20, i64 48}
+!20 = !{!"_ZTSN4llvm18SmallVectorStorageINS_7SMFixItELj4EEE", !5, i64 0}
+!21 = !{!15, !10, i64 28}
+!22 = !{!15, !17, i64 32}
+!23 = !{!24, !4, i64 0}
+!24 = !{!"_ZTSN4llvm15SmallVectorBaseE", !4, i64 0, !4, i64 8, !4, i64 16}
+!25 = !{!24, !4, i64 8}
+!26 = !{!24, !4, i64 16}
+!27 = !{!4, !4, i64 0}
+!28 = !{!"branch_weights", i32 64, i32 4}
+!29 = !{!10, !10, i64 0}
+!30 = !{!31, !4, i64 8}
+!31 = !{!"_ZTSN4llvm12MemoryBufferE", !4, i64 8, !4, i64 16}
+!32 = !{!31, !4, i64 16}
+!33 = !{!5, !5, i64 0}
+!34 = !{!35, !4, i64 0}
+!35 = !{!"_ZTSSt12_Vector_baseISt4pairIjjESaIS1_EE", !36, i64 0}
+!36 = !{!"_ZTSNSt12_Vector_baseISt4pairIjjESaIS1_EE12_Vector_implE", !4, i64 0, !4, i64 8, !4, i64 16}
+!37 = !{!38, !38, i64 0}
+!38 = !{!"bool", !5, i64 0}
+!39 = !{i8 0, i8 2}
+!40 = !{!41, !4, i64 0}
+!41 = !{!"_ZTSN4llvm10TimeRegionE", !4, i64 0}
+!42 = !{!43, !44, i64 32}
+!43 = !{!"_ZTSN4llvm11raw_ostreamE", !4, i64 8, !4, i64 16, !4, i64 24, !44, i64 32}
+!44 = !{!"_ZTSN4llvm11raw_ostream10BufferKindE", !5, i64 0}
+!45 = !{!43, !4, i64 24}
+!46 = !{!43, !4, i64 8}
+!47 = !{i64 0, i64 8, !27, i64 8, i64 8, !27}
diff --git a/test/CodeGen/PowerPC/unwind-dw2-g.ll b/test/CodeGen/PowerPC/unwind-dw2-g.ll
index 54d3189..4ae6ff2 100644
--- a/test/CodeGen/PowerPC/unwind-dw2-g.ll
+++ b/test/CodeGen/PowerPC/unwind-dw2-g.ll
@@ -21,15 +21,15 @@ attributes #0 = { nounwind }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!8, !11}
-!0 = metadata !{metadata !"0x11\0012\00clang version 3.4\000\00\000\00\000", metadata !1, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2} ; [ DW_TAG_compile_unit ] [/tmp/unwind-dw2.c] [DW_LANG_C99]
-!1 = metadata !{metadata !"/tmp/unwind-dw2.c", metadata !"/tmp"}
-!2 = metadata !{}
-!3 = metadata !{metadata !4}
-!4 = metadata !{metadata !"0x2e\00foo\00foo\00\001\000\001\000\006\000\000\001", metadata !1, metadata !5, metadata !6, null, void ()* @foo, null, null, metadata !2} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
-!5 = metadata !{metadata !"0x29", metadata !1} ; [ DW_TAG_file_type ] [/tmp/unwind-dw2.c]
-!6 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!7 = metadata !{null}
-!8 = metadata !{i32 2, metadata !"Dwarf Version", i32 3}
-!9 = metadata !{i32 2, i32 0, metadata !4, null}
-!10 = metadata !{i32 3, i32 0, metadata !4, null}
-!11 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x11\0012\00clang version 3.4\000\00\000\00\000", !1, !2, !2, !3, !2, !2} ; [ DW_TAG_compile_unit ] [/tmp/unwind-dw2.c] [DW_LANG_C99]
+!1 = !{!"/tmp/unwind-dw2.c", !"/tmp"}
+!2 = !{}
+!3 = !{!4}
+!4 = !{!"0x2e\00foo\00foo\00\001\000\001\000\006\000\000\001", !1, !5, !6, null, void ()* @foo, null, null, !2} ; [ DW_TAG_subprogram ] [line 1] [def] [foo]
+!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/tmp/unwind-dw2.c]
+!6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = !{null}
+!8 = !{i32 2, !"Dwarf Version", i32 3}
+!9 = !MDLocation(line: 2, scope: !4)
+!10 = !MDLocation(line: 3, scope: !4)
+!11 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/PowerPC/vec-abi-align.ll b/test/CodeGen/PowerPC/vec-abi-align.ll
index 5075ff2..2ec57af 100644
--- a/test/CodeGen/PowerPC/vec-abi-align.ll
+++ b/test/CodeGen/PowerPC/vec-abi-align.ll
@@ -35,17 +35,17 @@ entry:
ret void
; CHECK-LABEL: @test2
-; CHECK: ld {{[0-9]+}}, 112(1)
-; CHECK: li [[REG16:[0-9]+]], 16
-; CHECK: addi [[REGB:[0-9]+]], 1, 112
-; CHECK: lvx 2, [[REGB]], [[REG16]]
+; CHECK-DAG: ld {{[0-9]+}}, 112(1)
+; CHECK-DAG: li [[REG16:[0-9]+]], 16
+; CHECK-DAG: addi [[REGB:[0-9]+]], 1, 112
+; CHECK-DAG: lvx 2, [[REGB]], [[REG16]]
; CHECK: blr
; CHECK-VSX-LABEL: @test2
-; CHECK-VSX: ld {{[0-9]+}}, 112(1)
-; CHECK-VSX: li [[REG16:[0-9]+]], 16
-; CHECK-VSX: addi [[REGB:[0-9]+]], 1, 112
-; CHECK-VSX: lxvw4x {{[0-9]+}}, [[REGB]], [[REG16]]
+; CHECK-VSX-DAG: ld {{[0-9]+}}, 112(1)
+; CHECK-VSX-DAG: li [[REG16:[0-9]+]], 16
+; CHECK-VSX-DAG: addi [[REGB:[0-9]+]], 1, 112
+; CHECK-VSX-DAG: lxvw4x {{[0-9]+}}, [[REGB]], [[REG16]]
; CHECK-VSX: blr
}
@@ -61,17 +61,17 @@ entry:
ret void
; CHECK-LABEL: @test3
-; CHECK: ld {{[0-9]+}}, 128(1)
-; CHECK: li [[REG16:[0-9]+]], 16
-; CHECK: addi [[REGB:[0-9]+]], 1, 128
-; CHECK: lvx 2, [[REGB]], [[REG16]]
+; CHECK-DAG: ld {{[0-9]+}}, 128(1)
+; CHECK-DAG: li [[REG16:[0-9]+]], 16
+; CHECK-DAG: addi [[REGB:[0-9]+]], 1, 128
+; CHECK-DAG: lvx 2, [[REGB]], [[REG16]]
; CHECK: blr
; CHECK-VSX-LABEL: @test3
-; CHECK-VSX: ld {{[0-9]+}}, 128(1)
-; CHECK-VSX: li [[REG16:[0-9]+]], 16
-; CHECK-VSX: addi [[REGB:[0-9]+]], 1, 128
-; CHECK-VSX: lxvw4x {{[0-9]+}}, [[REGB]], [[REG16]]
+; CHECK-VSX-DAG: ld {{[0-9]+}}, 128(1)
+; CHECK-VSX-DAG: li [[REG16:[0-9]+]], 16
+; CHECK-VSX-DAG: addi [[REGB:[0-9]+]], 1, 128
+; CHECK-VSX-DAG: lxvw4x {{[0-9]+}}, [[REGB]], [[REG16]]
; CHECK-VSX: blr
}
diff --git a/test/CodeGen/PowerPC/vec_clz.ll b/test/CodeGen/PowerPC/vec_clz.ll
new file mode 100644
index 0000000..01cdecd
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_clz.ll
@@ -0,0 +1,40 @@
+; Check the vctlz* instructions that were added in P8
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>) nounwind readnone
+
+define <16 x i8> @test_v16i8(<16 x i8> %x) nounwind readnone {
+ %vcnt = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %x)
+ ret <16 x i8> %vcnt
+; CHECK: @test_v16i8
+; CHECK: vclzb 2, 2
+; CHECK: blr
+}
+
+define <8 x i16> @test_v8i16(<8 x i16> %x) nounwind readnone {
+ %vcnt = tail call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %x)
+ ret <8 x i16> %vcnt
+; CHECK: @test_v8i16
+; CHECK: vclzh 2, 2
+; CHECK: blr
+}
+
+define <4 x i32> @test_v4i32(<4 x i32> %x) nounwind readnone {
+ %vcnt = tail call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %x)
+ ret <4 x i32> %vcnt
+; CHECK: @test_v4i32
+; CHECK: vclzw 2, 2
+; CHECK: blr
+}
+
+define <2 x i64> @test_v2i64(<2 x i64> %x) nounwind readnone {
+ %vcnt = tail call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %x)
+ ret <2 x i64> %vcnt
+; CHECK: @test_v2i64
+; CHECK: vclzd 2, 2
+; CHECK: blr
+}
diff --git a/test/CodeGen/PowerPC/vec_misaligned.ll b/test/CodeGen/PowerPC/vec_misaligned.ll
index 73a4a4d..49f11e4 100644
--- a/test/CodeGen/PowerPC/vec_misaligned.ll
+++ b/test/CodeGen/PowerPC/vec_misaligned.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=ppc32 -mcpu=g5 | FileCheck %s
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec -mattr=-vsx -mattr=-power8-vector | FileCheck %s
-; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec | FileCheck %s -check-prefix=CHECK-LE
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec -mattr=-vsx -mattr=-power8-vector | FileCheck %s -check-prefix=CHECK-LE
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
target triple = "powerpc-apple-darwin8"
diff --git a/test/CodeGen/PowerPC/vec_popcnt.ll b/test/CodeGen/PowerPC/vec_popcnt.ll
new file mode 100644
index 0000000..0ce9dfa
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_popcnt.ll
@@ -0,0 +1,72 @@
+; Check the vecpopcnt* instructions that were added in P8
+; In addition, check the conversions to/from the v2i64 VMX register that was also added in P8.
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
+
+define <16 x i8> @test_v16i8_v2i64(<2 x i64> %x) nounwind readnone {
+ %tmp = bitcast <2 x i64> %x to <16 x i8>;
+ %vcnt = tail call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp)
+ ret <16 x i8> %vcnt
+; CHECK: @test_v16i8_v2i64
+; CHECK: vpopcntb 2, 2
+; CHECK: blr
+}
+
+define <8 x i16> @test_v8i16_v2i64(<2 x i64> %x) nounwind readnone {
+ %tmp = bitcast <2 x i64> %x to <8 x i16>
+ %vcnt = tail call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %tmp)
+ ret <8 x i16> %vcnt
+; CHECK: @test_v8i16_v2i64
+; CHECK: vpopcnth 2, 2
+; CHECK: blr
+}
+
+define <4 x i32> @test_v4i32_v2i64(<2 x i64> %x) nounwind readnone {
+ %tmp = bitcast <2 x i64> %x to <4 x i32>
+ %vcnt = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %tmp)
+ ret <4 x i32> %vcnt
+; CHECK: @test_v4i32_v2i64
+; CHECK: vpopcntw 2, 2
+; CHECK: blr
+}
+
+define <2 x i64> @test_v2i64_v2i64(<2 x i64> %x) nounwind readnone {
+ %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %x)
+ ret <2 x i64> %vcnt
+; CHECK: @test_v2i64_v2i64
+; CHECK: vpopcntd 2, 2
+; CHECK: blr
+}
+
+define <2 x i64> @test_v2i64_v4i32(<4 x i32> %x) nounwind readnone {
+ %tmp = bitcast <4 x i32> %x to <2 x i64>
+ %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
+ ret <2 x i64> %vcnt
+; CHECK: @test_v2i64_v4i32
+; CHECK: vpopcntd 2, 2
+; CHECK: blr
+}
+
+
+define <2 x i64> @test_v2i64_v8i16(<8 x i16> %x) nounwind readnone {
+ %tmp = bitcast <8 x i16> %x to <2 x i64>
+ %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
+ ret <2 x i64> %vcnt
+; CHECK: @test_v2i64_v8i16
+; CHECK: vpopcntd 2, 2
+; CHECK: blr
+}
+
+define <2 x i64> @test_v2i64_v16i8(<16 x i8> %x) nounwind readnone {
+ %tmp = bitcast <16 x i8> %x to <2 x i64>
+ %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
+ ret <2 x i64> %vcnt
+; CHECK: @test_v2i64_v16i8
+; CHECK: vpopcntd 2, 2
+; CHECK: blr
+}
diff --git a/test/CodeGen/PowerPC/vec_shuffle_le.ll b/test/CodeGen/PowerPC/vec_shuffle_le.ll
index a4b2119..c7fc1c6 100644
--- a/test/CodeGen/PowerPC/vec_shuffle_le.ll
+++ b/test/CodeGen/PowerPC/vec_shuffle_le.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec -mattr=-vsx -mcpu=pwr7 | FileCheck %s
define void @VPKUHUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
diff --git a/test/CodeGen/PowerPC/vec_veqv_vnand_vorc.ll b/test/CodeGen/PowerPC/vec_veqv_vnand_vorc.ll
new file mode 100644
index 0000000..f7d5a51
--- /dev/null
+++ b/test/CodeGen/PowerPC/vec_veqv_vnand_vorc.ll
@@ -0,0 +1,29 @@
+; Check the miscellaneous logical vector operations added in P8
+;
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
+; Test x eqv y
+define <4 x i32> @test_veqv(<4 x i32> %x, <4 x i32> %y) nounwind {
+ %tmp = xor <4 x i32> %x, %y
+ %ret_val = xor <4 x i32> %tmp, < i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %ret_val
+; CHECK: veqv 2, 2, 3
+}
+
+; Test x vnand y
+define <4 x i32> @test_vnand(<4 x i32> %x, <4 x i32> %y) nounwind {
+ %tmp = and <4 x i32> %x, %y
+ %ret_val = xor <4 x i32> %tmp, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %ret_val
+; CHECK: vnand 2, 2, 3
+}
+
+; Test x vorc y and variants
+define <4 x i32> @test_vorc(<4 x i32> %x, <4 x i32> %y) nounwind {
+ %tmp1 = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %tmp2 = or <4 x i32> %x, %tmp1
+; CHECK: vorc 3, 2, 3
+ %tmp3 = xor <4 x i32> %tmp2, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %tmp4 = or <4 x i32> %tmp3, %x
+; CHECK: vorc 2, 2, 3
+ ret <4 x i32> %tmp4
+}
diff --git a/test/CodeGen/PowerPC/vsel-prom.ll b/test/CodeGen/PowerPC/vsel-prom.ll
new file mode 100644
index 0000000..dd219ec
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsel-prom.ll
@@ -0,0 +1,23 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @Compute_Lateral() #0 {
+entry:
+ br i1 undef, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ unreachable
+
+if.end: ; preds = %entry
+ %0 = select i1 undef, <2 x double> undef, <2 x double> zeroinitializer
+ %1 = extractelement <2 x double> %0, i32 1
+ store double %1, double* undef, align 8
+ ret void
+
+; CHECK-LABEL: @Compute_Lateral
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/vsx-args.ll b/test/CodeGen/PowerPC/vsx-args.ll
index 520aeb5..2b53c0a 100644
--- a/test/CodeGen/PowerPC/vsx-args.ll
+++ b/test/CodeGen/PowerPC/vsx-args.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -mcpu=pwr7 -mattr=+vsx | FileCheck %s
+; RUN: llc < %s -mcpu=pwr7 -mattr=+vsx -fast-isel -O0 | FileCheck %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/vsx-fma-m.ll b/test/CodeGen/PowerPC/vsx-fma-m.ll
index 9dff9a7..ab36072 100644
--- a/test/CodeGen/PowerPC/vsx-fma-m.ll
+++ b/test/CodeGen/PowerPC/vsx-fma-m.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -mcpu=pwr7 -mattr=+vsx | FileCheck %s
+; RUN: llc < %s -mcpu=pwr7 -mattr=+vsx -fast-isel -O0 | FileCheck -check-prefix=CHECK-FISL %s
; Also run with -schedule-ppc-vsx-fma-mutation-early as a stress test for the
; live-interval-updating logic.
@@ -22,6 +23,15 @@ entry:
; CHECK-DAG: stxsdx 3, 0, 7
; CHECK-DAG: stxsdx 1, 7, [[C1]]
; CHECK: blr
+
+; CHECK-FISL-LABEL: @test1
+; CHECK-FISL-DAG: fmr 0, 1
+; CHECK-FISL-DAG: xsmaddadp 0, 2, 3
+; CHECK-FISL-DAG: stxsdx 0, 0, 7
+; CHECK-FISL-DAG: xsmaddadp 1, 2, 4
+; CHECK-FISL-DAG: li [[C1:[0-9]+]], 8
+; CHECK-FISL-DAG: stxsdx 1, 7, [[C1]]
+; CHECK-FISL: blr
}
define void @test2(double %a, double %b, double %c, double %e, double %f, double* nocapture %d) #0 {
@@ -46,6 +56,19 @@ entry:
; CHECK-DAG: stxsdx 4, 8, [[C1]]
; CHECK-DAG: stxsdx 1, 8, [[C2]]
; CHECK: blr
+
+; CHECK-FISL-LABEL: @test2
+; CHECK-FISL-DAG: fmr 0, 1
+; CHECK-FISL-DAG: xsmaddadp 0, 2, 3
+; CHECK-FISL-DAG: stxsdx 0, 0, 8
+; CHECK-FISL-DAG: fmr 0, 1
+; CHECK-FISL-DAG: xsmaddadp 0, 2, 4
+; CHECK-FISL-DAG: li [[C1:[0-9]+]], 8
+; CHECK-FISL-DAG: stxsdx 0, 8, [[C1]]
+; CHECK-FISL-DAG: xsmaddadp 1, 2, 5
+; CHECK-FISL-DAG: li [[C2:[0-9]+]], 16
+; CHECK-FISL-DAG: stxsdx 1, 8, [[C2]]
+; CHECK-FISL: blr
}
define void @test3(double %a, double %b, double %c, double %e, double %f, double* nocapture %d) #0 {
@@ -81,6 +104,20 @@ entry:
; CHECK-DAG: stxsdx 1, 8, [[C2]]
; CHECK-DAG: stxsdx 4, 8, [[C3]]
; CHECK: blr
+
+; CHECK-FISL-LABEL: @test3
+; CHECK-FISL-DAG: fmr [[F1:[0-9]+]], 1
+; CHECK-FISL-DAG: xsmaddadp [[F1]], 2, 4
+; CHECK-FISL-DAG: fmr 4, [[F1]]
+; CHECK-FISL-DAG: xsmaddadp 4, 2, 3
+; CHECK-FISL-DAG: li [[C1:[0-9]+]], 24
+; CHECK-FISL-DAG: stxsdx 4, 8, [[C1]]
+; CHECK-FISL-DAG: xsmaddadp 1, 2, 5
+; CHECK-FISL-DAG: li [[C2:[0-9]+]], 16
+; CHECK-FISL-DAG: stxsdx 1, 8, [[C2]]
+; CHECK-FISL-DAG: li [[C3:[0-9]+]], 8
+; CHECK-FISL-DAG: stxsdx 0, 8, [[C3]]
+; CHECK-FISL: blr
}
define void @test4(double %a, double %b, double %c, double %e, double %f, double* nocapture %d) #0 {
@@ -116,6 +153,22 @@ entry:
; CHECK-DAG: stxsdx 4, 8, [[C3]]
; CHECK-DAG: stxsdx 1, 8, [[C2]]
; CHECK: blr
+
+; CHECK-FISL-LABEL: @test4
+; CHECK-FISL-DAG: fmr [[F1:[0-9]+]], 1
+; CHECK-FISL-DAG: xsmaddadp [[F1]], 2, 3
+; CHECK-FISL-DAG: stxsdx 0, 0, 8
+; CHECK-FISL-DAG: fmr [[F1]], 1
+; CHECK-FISL-DAG: xsmaddadp [[F1]], 2, 4
+; CHECK-FISL-DAG: li [[C3:[0-9]+]], 8
+; CHECK-FISL-DAG: stxsdx 0, 8, [[C3]]
+; CHECK-FISL-DAG: xsmaddadp 0, 2, 3
+; CHECK-FISL-DAG: li [[C1:[0-9]+]], 24
+; CHECK-FISL-DAG: stxsdx 0, 8, [[C1]]
+; CHECK-FISL-DAG: xsmaddadp 1, 2, 5
+; CHECK-FISL-DAG: li [[C2:[0-9]+]], 16
+; CHECK-FISL-DAG: stxsdx 1, 8, [[C2]]
+; CHECK-FISL: blr
}
declare double @llvm.fma.f64(double, double, double) #0
@@ -136,6 +189,15 @@ entry:
; CHECK-DAG: stxvd2x 36, 0, 3
; CHECK-DAG: stxvd2x 34, 3, [[C1:[0-9]+]]
; CHECK: blr
+
+; CHECK-FISL-LABEL: @testv1
+; CHECK-FISL-DAG: xxlor 0, 34, 34
+; CHECK-FISL-DAG: xvmaddadp 0, 35, 36
+; CHECK-FISL-DAG: stxvd2x 0, 0, 3
+; CHECK-FISL-DAG: xvmaddadp 34, 35, 37
+; CHECK-FISL-DAG: li [[C1:[0-9]+]], 16
+; CHECK-FISL-DAG: stxvd2x 34, 3, [[C1:[0-9]+]]
+; CHECK-FISL: blr
}
define void @testv2(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, <2 x double>* nocapture %d) #0 {
@@ -160,6 +222,19 @@ entry:
; CHECK-DAG: stxvd2x 37, 3, [[C1:[0-9]+]]
; CHECK-DAG: stxvd2x 34, 3, [[C2:[0-9]+]]
; CHECK: blr
+
+; CHECK-FISL-LABEL: @testv2
+; CHECK-FISL-DAG: xxlor 0, 34, 34
+; CHECK-FISL-DAG: xvmaddadp 0, 35, 36
+; CHECK-FISL-DAG: stxvd2x 0, 0, 3
+; CHECK-FISL-DAG: xxlor 0, 34, 34
+; CHECK-FISL-DAG: xvmaddadp 0, 35, 37
+; CHECK-FISL-DAG: li [[C1:[0-9]+]], 16
+; CHECK-FISL-DAG: stxvd2x 0, 3, [[C1:[0-9]+]]
+; CHECK-FISL-DAG: xvmaddadp 34, 35, 38
+; CHECK-FISL-DAG: li [[C2:[0-9]+]], 32
+; CHECK-FISL-DAG: stxvd2x 34, 3, [[C2:[0-9]+]]
+; CHECK-FISL: blr
}
define void @testv3(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, <2 x double>* nocapture %d) #0 {
@@ -194,13 +269,30 @@ entry:
; re-ordering the instructions.
; CHECK-DAG: xvmaddadp [[V1]], 35, 36
-; CHECK-DAG: xvmaddmdp 36, 35, 37
+; CHECK-DAG: xvmaddmdp 35, 36, 37
; CHECK-DAG: xvmaddadp 34, 35, 38
; CHECK-DAG: stxvd2x 32, 0, 3
-; CHECK-DAG: stxvd2x 36, 3, [[C1]]
+; CHECK-DAG: stxvd2x 35, 3, [[C1]]
; CHECK-DAG: stxvd2x 34, 3, [[C2]]
; CHECK-DAG: stxvd2x 37, 3, [[C3]]
; CHECK: blr
+
+; CHECK-FISL-LABEL: @testv3
+; CHECK-FISL-DAG: xxlor [[V1:[0-9]+]], 34, 34
+; CHECK-FISL-DAG: xvmaddadp [[V1]], 35, 36
+; CHECK-FISL-DAG: stxvd2x [[V1]], 0, 3
+; CHECK-FISL-DAG: xxlor [[V2:[0-9]+]], 34, 34
+; CHECK-FISL-DAG: xvmaddadp [[V2]], 35, 37
+; CHECK-FISL-DAG: xxlor [[V3:[0-9]+]], 0, 0
+; CHECK-FISL-DAG: xvmaddadp [[V3]], 35, 36
+; CHECK-FISL-DAG: li [[C1:[0-9]+]], 48
+; CHECK-FISL-DAG: stxvd2x [[V3]], 3, [[C1]]
+; CHECK-FISL-DAG: xvmaddadp 34, 35, 38
+; CHECK-FISL-DAG: li [[C2:[0-9]+]], 32
+; CHECK-FISL-DAG: stxvd2x 34, 3, [[C2]]
+; CHECK-FISL-DAG: li [[C3:[0-9]+]], 16
+; CHECK-FISL-DAG: stxvd2x 0, 3, [[C3]]
+; CHECK-FISL: blr
}
define void @testv4(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %e, <2 x double> %f, <2 x double>* nocapture %d) #0 {
@@ -236,6 +328,22 @@ entry:
; CHECK-DAG: stxvd2x 37, 3, [[C3]]
; CHECK-DAG: stxvd2x 34, 3, [[C2]]
; CHECK: blr
+
+; CHECK-FISL-LABEL: @testv4
+; CHECK-FISL-DAG: xxlor [[V1:[0-9]+]], 34, 34
+; CHECK-FISL-DAG: xvmaddadp [[V1]], 35, 36
+; CHECK-FISL-DAG: stxvd2x 0, 0, 3
+; CHECK-FISL-DAG: xxlor [[V2:[0-9]+]], 34, 34
+; CHECK-FISL-DAG: xvmaddadp [[V2]], 35, 37
+; CHECK-FISL-DAG: li [[C1:[0-9]+]], 16
+; CHECK-FISL-DAG: stxvd2x 0, 3, [[C1]]
+; CHECK-FISL-DAG: xvmaddadp 0, 35, 37
+; CHECK-FISL-DAG: li [[C3:[0-9]+]], 48
+; CHECK-FISL-DAG: stxvd2x 0, 3, [[C3]]
+; CHECK-FISL-DAG: xvmaddadp 0, 35, 36
+; CHECK-FISL-DAG: li [[C2:[0-9]+]], 32
+; CHECK-FISL-DAG: stxvd2x 34, 3, [[C2]]
+; CHECK-FISL: blr
}
declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #0
diff --git a/test/CodeGen/PowerPC/vsx-infl-copy1.ll b/test/CodeGen/PowerPC/vsx-infl-copy1.ll
new file mode 100644
index 0000000..cff7f8f
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-infl-copy1.ll
@@ -0,0 +1,133 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+@ub = external global [1024 x i32], align 4
+@uc = external global [1024 x i32], align 4
+
+; Function Attrs: noinline nounwind
+define void @_Z8example9Pj() #0 {
+entry:
+ br label %vector.body
+
+; CHECK-LABEL: @_Z8example9Pj
+; CHECK: xxlor
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %43, %vector.body ]
+ %vec.phi20 = phi <4 x i32> [ zeroinitializer, %entry ], [ %44, %vector.body ]
+ %vec.phi21 = phi <4 x i32> [ zeroinitializer, %entry ], [ %45, %vector.body ]
+ %vec.phi23 = phi <4 x i32> [ zeroinitializer, %entry ], [ %46, %vector.body ]
+ %vec.phi24 = phi <4 x i32> [ zeroinitializer, %entry ], [ %47, %vector.body ]
+ %vec.phi25 = phi <4 x i32> [ zeroinitializer, %entry ], [ %48, %vector.body ]
+ %vec.phi26 = phi <4 x i32> [ zeroinitializer, %entry ], [ %49, %vector.body ]
+ %vec.phi27 = phi <4 x i32> [ zeroinitializer, %entry ], [ %50, %vector.body ]
+ %vec.phi28 = phi <4 x i32> [ zeroinitializer, %entry ], [ %51, %vector.body ]
+ %vec.phi29 = phi <4 x i32> [ zeroinitializer, %entry ], [ %52, %vector.body ]
+ %vec.phi30 = phi <4 x i32> [ zeroinitializer, %entry ], [ %53, %vector.body ]
+ %wide.load32 = load <4 x i32>* null, align 4
+ %.sum82 = add i64 %index, 24
+ %0 = getelementptr [1024 x i32]* @ub, i64 0, i64 %.sum82
+ %1 = bitcast i32* %0 to <4 x i32>*
+ %wide.load36 = load <4 x i32>* %1, align 4
+ %wide.load37 = load <4 x i32>* undef, align 4
+ %.sum84 = add i64 %index, 32
+ %2 = getelementptr [1024 x i32]* @ub, i64 0, i64 %.sum84
+ %3 = bitcast i32* %2 to <4 x i32>*
+ %wide.load38 = load <4 x i32>* %3, align 4
+ %.sum85 = add i64 %index, 36
+ %4 = getelementptr [1024 x i32]* @ub, i64 0, i64 %.sum85
+ %5 = bitcast i32* %4 to <4 x i32>*
+ %wide.load39 = load <4 x i32>* %5, align 4
+ %6 = getelementptr [1024 x i32]* @ub, i64 0, i64 undef
+ %7 = bitcast i32* %6 to <4 x i32>*
+ %wide.load40 = load <4 x i32>* %7, align 4
+ %.sum87 = add i64 %index, 44
+ %8 = getelementptr [1024 x i32]* @ub, i64 0, i64 %.sum87
+ %9 = bitcast i32* %8 to <4 x i32>*
+ %wide.load41 = load <4 x i32>* %9, align 4
+ %10 = getelementptr inbounds [1024 x i32]* @uc, i64 0, i64 %index
+ %11 = bitcast i32* %10 to <4 x i32>*
+ %wide.load42 = load <4 x i32>* %11, align 4
+ %.sum8889 = or i64 %index, 4
+ %12 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum8889
+ %13 = bitcast i32* %12 to <4 x i32>*
+ %wide.load43 = load <4 x i32>* %13, align 4
+ %.sum9091 = or i64 %index, 8
+ %14 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum9091
+ %15 = bitcast i32* %14 to <4 x i32>*
+ %wide.load44 = load <4 x i32>* %15, align 4
+ %.sum94 = add i64 %index, 16
+ %16 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum94
+ %17 = bitcast i32* %16 to <4 x i32>*
+ %wide.load46 = load <4 x i32>* %17, align 4
+ %.sum95 = add i64 %index, 20
+ %18 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum95
+ %19 = bitcast i32* %18 to <4 x i32>*
+ %wide.load47 = load <4 x i32>* %19, align 4
+ %20 = getelementptr [1024 x i32]* @uc, i64 0, i64 undef
+ %21 = bitcast i32* %20 to <4 x i32>*
+ %wide.load48 = load <4 x i32>* %21, align 4
+ %.sum97 = add i64 %index, 28
+ %22 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum97
+ %23 = bitcast i32* %22 to <4 x i32>*
+ %wide.load49 = load <4 x i32>* %23, align 4
+ %.sum98 = add i64 %index, 32
+ %24 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum98
+ %25 = bitcast i32* %24 to <4 x i32>*
+ %wide.load50 = load <4 x i32>* %25, align 4
+ %.sum99 = add i64 %index, 36
+ %26 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum99
+ %27 = bitcast i32* %26 to <4 x i32>*
+ %wide.load51 = load <4 x i32>* %27, align 4
+ %.sum100 = add i64 %index, 40
+ %28 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum100
+ %29 = bitcast i32* %28 to <4 x i32>*
+ %wide.load52 = load <4 x i32>* %29, align 4
+ %.sum101 = add i64 %index, 44
+ %30 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum101
+ %31 = bitcast i32* %30 to <4 x i32>*
+ %wide.load53 = load <4 x i32>* %31, align 4
+ %32 = add <4 x i32> zeroinitializer, %vec.phi
+ %33 = add <4 x i32> zeroinitializer, %vec.phi20
+ %34 = add <4 x i32> %wide.load32, %vec.phi21
+ %35 = add <4 x i32> zeroinitializer, %vec.phi23
+ %36 = add <4 x i32> zeroinitializer, %vec.phi24
+ %37 = add <4 x i32> %wide.load36, %vec.phi25
+ %38 = add <4 x i32> %wide.load37, %vec.phi26
+ %39 = add <4 x i32> %wide.load38, %vec.phi27
+ %40 = add <4 x i32> %wide.load39, %vec.phi28
+ %41 = add <4 x i32> %wide.load40, %vec.phi29
+ %42 = add <4 x i32> %wide.load41, %vec.phi30
+ %43 = sub <4 x i32> %32, %wide.load42
+ %44 = sub <4 x i32> %33, %wide.load43
+ %45 = sub <4 x i32> %34, %wide.load44
+ %46 = sub <4 x i32> %35, %wide.load46
+ %47 = sub <4 x i32> %36, %wide.load47
+ %48 = sub <4 x i32> %37, %wide.load48
+ %49 = sub <4 x i32> %38, %wide.load49
+ %50 = sub <4 x i32> %39, %wide.load50
+ %51 = sub <4 x i32> %40, %wide.load51
+ %52 = sub <4 x i32> %41, %wide.load52
+ %53 = sub <4 x i32> %42, %wide.load53
+ %index.next = add i64 %index, 48
+ br i1 false, label %middle.block, label %vector.body
+
+middle.block: ; preds = %vector.body
+ %.lcssa112 = phi <4 x i32> [ %53, %vector.body ]
+ %.lcssa111 = phi <4 x i32> [ %52, %vector.body ]
+ %.lcssa110 = phi <4 x i32> [ %51, %vector.body ]
+ %.lcssa109 = phi <4 x i32> [ %50, %vector.body ]
+ %.lcssa108 = phi <4 x i32> [ %49, %vector.body ]
+ %.lcssa107 = phi <4 x i32> [ %48, %vector.body ]
+ %.lcssa106 = phi <4 x i32> [ %47, %vector.body ]
+ %.lcssa105 = phi <4 x i32> [ %46, %vector.body ]
+ %.lcssa103 = phi <4 x i32> [ %45, %vector.body ]
+ %.lcssa102 = phi <4 x i32> [ %44, %vector.body ]
+ %.lcssa = phi <4 x i32> [ %43, %vector.body ]
+ ret void
+}
+
+attributes #0 = { noinline nounwind }
+
diff --git a/test/CodeGen/PowerPC/vsx-infl-copy2.ll b/test/CodeGen/PowerPC/vsx-infl-copy2.ll
new file mode 100644
index 0000000..0f27906
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-infl-copy2.ll
@@ -0,0 +1,114 @@
+; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @_Z28test_goto_loop_unroll_factorILi22EiEvPKT0_iPKc(i32* nocapture readonly %first) #0 {
+entry:
+ br i1 false, label %loop2_start, label %if.end5
+
+; CHECK-LABEL: @_Z28test_goto_loop_unroll_factorILi22EiEvPKT0_iPKc
+
+loop2_start: ; preds = %loop2_start, %entry
+ br i1 undef, label %loop2_start, label %if.then.i31
+
+if.end5: ; preds = %entry
+ br i1 undef, label %loop_start.preheader, label %if.then.i31
+
+loop_start.preheader: ; preds = %if.end5
+ br i1 false, label %middle.block, label %vector.body
+
+vector.body: ; preds = %vector.body, %loop_start.preheader
+ %vec.phi61 = phi <4 x i32> [ %34, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi62 = phi <4 x i32> [ %35, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi63 = phi <4 x i32> [ %36, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi65 = phi <4 x i32> [ %37, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi67 = phi <4 x i32> [ %38, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi68 = phi <4 x i32> [ %39, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi69 = phi <4 x i32> [ %40, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi70 = phi <4 x i32> [ %41, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %vec.phi71 = phi <4 x i32> [ %42, %vector.body ], [ zeroinitializer, %loop_start.preheader ]
+ %.sum = add i64 0, 4
+ %wide.load72 = load <4 x i32>* null, align 4
+ %.sum109 = add i64 0, 8
+ %0 = getelementptr i32* %first, i64 %.sum109
+ %1 = bitcast i32* %0 to <4 x i32>*
+ %wide.load73 = load <4 x i32>* %1, align 4
+ %.sum110 = add i64 0, 12
+ %2 = getelementptr i32* %first, i64 %.sum110
+ %3 = bitcast i32* %2 to <4 x i32>*
+ %wide.load74 = load <4 x i32>* %3, align 4
+ %.sum112 = add i64 0, 20
+ %4 = getelementptr i32* %first, i64 %.sum112
+ %5 = bitcast i32* %4 to <4 x i32>*
+ %wide.load76 = load <4 x i32>* %5, align 4
+ %.sum114 = add i64 0, 28
+ %6 = getelementptr i32* %first, i64 %.sum114
+ %7 = bitcast i32* %6 to <4 x i32>*
+ %wide.load78 = load <4 x i32>* %7, align 4
+ %.sum115 = add i64 0, 32
+ %8 = getelementptr i32* %first, i64 %.sum115
+ %9 = bitcast i32* %8 to <4 x i32>*
+ %wide.load79 = load <4 x i32>* %9, align 4
+ %.sum116 = add i64 0, 36
+ %10 = getelementptr i32* %first, i64 %.sum116
+ %11 = bitcast i32* %10 to <4 x i32>*
+ %wide.load80 = load <4 x i32>* %11, align 4
+ %.sum117 = add i64 0, 40
+ %12 = getelementptr i32* %first, i64 %.sum117
+ %13 = bitcast i32* %12 to <4 x i32>*
+ %wide.load81 = load <4 x i32>* %13, align 4
+ %.sum118 = add i64 0, 44
+ %14 = getelementptr i32* %first, i64 %.sum118
+ %15 = bitcast i32* %14 to <4 x i32>*
+ %wide.load82 = load <4 x i32>* %15, align 4
+ %16 = mul <4 x i32> %wide.load72, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %17 = mul <4 x i32> %wide.load73, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %18 = mul <4 x i32> %wide.load74, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %19 = mul <4 x i32> %wide.load76, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %20 = mul <4 x i32> %wide.load78, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %21 = mul <4 x i32> %wide.load79, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %22 = mul <4 x i32> %wide.load80, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %23 = mul <4 x i32> %wide.load81, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %24 = mul <4 x i32> %wide.load82, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
+ %25 = add <4 x i32> %16, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %26 = add <4 x i32> %17, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %27 = add <4 x i32> %18, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %28 = add <4 x i32> %19, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %29 = add <4 x i32> %20, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %30 = add <4 x i32> %21, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %31 = add <4 x i32> %22, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %32 = add <4 x i32> %23, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %33 = add <4 x i32> %24, <i32 -1138325064, i32 -1138325064, i32 -1138325064, i32 -1138325064>
+ %34 = add nsw <4 x i32> %25, %vec.phi61
+ %35 = add nsw <4 x i32> %26, %vec.phi62
+ %36 = add nsw <4 x i32> %27, %vec.phi63
+ %37 = add nsw <4 x i32> %28, %vec.phi65
+ %38 = add nsw <4 x i32> %29, %vec.phi67
+ %39 = add nsw <4 x i32> %30, %vec.phi68
+ %40 = add nsw <4 x i32> %31, %vec.phi69
+ %41 = add nsw <4 x i32> %32, %vec.phi70
+ %42 = add nsw <4 x i32> %33, %vec.phi71
+ br i1 false, label %middle.block, label %vector.body
+
+middle.block: ; preds = %vector.body, %loop_start.preheader
+ %rdx.vec.exit.phi85 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %34, %vector.body ]
+ %rdx.vec.exit.phi86 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %35, %vector.body ]
+ %rdx.vec.exit.phi87 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %36, %vector.body ]
+ %rdx.vec.exit.phi89 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %37, %vector.body ]
+ %rdx.vec.exit.phi91 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %38, %vector.body ]
+ %rdx.vec.exit.phi92 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %39, %vector.body ]
+ %rdx.vec.exit.phi93 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %40, %vector.body ]
+ %rdx.vec.exit.phi94 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %41, %vector.body ]
+ %rdx.vec.exit.phi95 = phi <4 x i32> [ zeroinitializer, %loop_start.preheader ], [ %42, %vector.body ]
+ br i1 false, label %if.then.i31, label %loop_start.prol
+
+loop_start.prol: ; preds = %loop_start.prol, %middle.block
+ br label %loop_start.prol
+
+if.then.i31: ; preds = %middle.block, %if.end5, %loop2_start
+ unreachable
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll b/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll
new file mode 100644
index 0000000..7367672
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll
@@ -0,0 +1,172 @@
+; RUN: llc -mcpu=pwr8 -mattr=+vsx -O2 -mtriple=powerpc64le-unknown-linux-gnu < %s > %t
+; RUN: grep lxvd2x < %t | count 18
+; RUN: grep stxvd2x < %t | count 18
+; RUN: grep xxpermdi < %t | count 36
+
+@vf = global <4 x float> <float -1.500000e+00, float 2.500000e+00, float -3.500000e+00, float 4.500000e+00>, align 16
+@vd = global <2 x double> <double 3.500000e+00, double -7.500000e+00>, align 16
+@vsi = global <4 x i32> <i32 -1, i32 2, i32 -3, i32 4>, align 16
+@vui = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
+@vsll = global <2 x i64> <i64 255, i64 -937>, align 16
+@vull = global <2 x i64> <i64 1447, i64 2894>, align 16
+@res_vsi = common global <4 x i32> zeroinitializer, align 16
+@res_vui = common global <4 x i32> zeroinitializer, align 16
+@res_vf = common global <4 x float> zeroinitializer, align 16
+@res_vsll = common global <2 x i64> zeroinitializer, align 16
+@res_vull = common global <2 x i64> zeroinitializer, align 16
+@res_vd = common global <2 x double> zeroinitializer, align 16
+
+define void @test1() {
+entry:
+; CHECK-LABEL: test1
+ %__a.addr.i31 = alloca i32, align 4
+ %__b.addr.i32 = alloca <4 x i32>*, align 8
+ %__a.addr.i29 = alloca i32, align 4
+ %__b.addr.i30 = alloca <4 x float>*, align 8
+ %__a.addr.i27 = alloca i32, align 4
+ %__b.addr.i28 = alloca <2 x i64>*, align 8
+ %__a.addr.i25 = alloca i32, align 4
+ %__b.addr.i26 = alloca <2 x i64>*, align 8
+ %__a.addr.i23 = alloca i32, align 4
+ %__b.addr.i24 = alloca <2 x double>*, align 8
+ %__a.addr.i20 = alloca <4 x i32>, align 16
+ %__b.addr.i21 = alloca i32, align 4
+ %__c.addr.i22 = alloca <4 x i32>*, align 8
+ %__a.addr.i17 = alloca <4 x i32>, align 16
+ %__b.addr.i18 = alloca i32, align 4
+ %__c.addr.i19 = alloca <4 x i32>*, align 8
+ %__a.addr.i14 = alloca <4 x float>, align 16
+ %__b.addr.i15 = alloca i32, align 4
+ %__c.addr.i16 = alloca <4 x float>*, align 8
+ %__a.addr.i11 = alloca <2 x i64>, align 16
+ %__b.addr.i12 = alloca i32, align 4
+ %__c.addr.i13 = alloca <2 x i64>*, align 8
+ %__a.addr.i8 = alloca <2 x i64>, align 16
+ %__b.addr.i9 = alloca i32, align 4
+ %__c.addr.i10 = alloca <2 x i64>*, align 8
+ %__a.addr.i6 = alloca <2 x double>, align 16
+ %__b.addr.i7 = alloca i32, align 4
+ %__c.addr.i = alloca <2 x double>*, align 8
+ %__a.addr.i = alloca i32, align 4
+ %__b.addr.i = alloca <4 x i32>*, align 8
+ store i32 0, i32* %__a.addr.i, align 4
+ store <4 x i32>* @vsi, <4 x i32>** %__b.addr.i, align 8
+ %0 = load i32* %__a.addr.i, align 4
+ %1 = load <4 x i32>** %__b.addr.i, align 8
+ %2 = bitcast <4 x i32>* %1 to i8*
+ %3 = getelementptr i8* %2, i32 %0
+ %4 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %3)
+ store <4 x i32> %4, <4 x i32>* @res_vsi, align 16
+ store i32 0, i32* %__a.addr.i31, align 4
+ store <4 x i32>* @vui, <4 x i32>** %__b.addr.i32, align 8
+ %5 = load i32* %__a.addr.i31, align 4
+ %6 = load <4 x i32>** %__b.addr.i32, align 8
+ %7 = bitcast <4 x i32>* %6 to i8*
+ %8 = getelementptr i8* %7, i32 %5
+ %9 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %8)
+ store <4 x i32> %9, <4 x i32>* @res_vui, align 16
+ store i32 0, i32* %__a.addr.i29, align 4
+ store <4 x float>* @vf, <4 x float>** %__b.addr.i30, align 8
+ %10 = load i32* %__a.addr.i29, align 4
+ %11 = load <4 x float>** %__b.addr.i30, align 8
+ %12 = bitcast <4 x float>* %11 to i8*
+ %13 = getelementptr i8* %12, i32 %10
+ %14 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %13)
+ %15 = bitcast <4 x i32> %14 to <4 x float>
+ store <4 x float> %15, <4 x float>* @res_vf, align 16
+ store i32 0, i32* %__a.addr.i27, align 4
+ store <2 x i64>* @vsll, <2 x i64>** %__b.addr.i28, align 8
+ %16 = load i32* %__a.addr.i27, align 4
+ %17 = load <2 x i64>** %__b.addr.i28, align 8
+ %18 = bitcast <2 x i64>* %17 to i8*
+ %19 = getelementptr i8* %18, i32 %16
+ %20 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %19)
+ %21 = bitcast <2 x double> %20 to <2 x i64>
+ store <2 x i64> %21, <2 x i64>* @res_vsll, align 16
+ store i32 0, i32* %__a.addr.i25, align 4
+ store <2 x i64>* @vull, <2 x i64>** %__b.addr.i26, align 8
+ %22 = load i32* %__a.addr.i25, align 4
+ %23 = load <2 x i64>** %__b.addr.i26, align 8
+ %24 = bitcast <2 x i64>* %23 to i8*
+ %25 = getelementptr i8* %24, i32 %22
+ %26 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %25)
+ %27 = bitcast <2 x double> %26 to <2 x i64>
+ store <2 x i64> %27, <2 x i64>* @res_vull, align 16
+ store i32 0, i32* %__a.addr.i23, align 4
+ store <2 x double>* @vd, <2 x double>** %__b.addr.i24, align 8
+ %28 = load i32* %__a.addr.i23, align 4
+ %29 = load <2 x double>** %__b.addr.i24, align 8
+ %30 = bitcast <2 x double>* %29 to i8*
+ %31 = getelementptr i8* %30, i32 %28
+ %32 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %31)
+ store <2 x double> %32, <2 x double>* @res_vd, align 16
+ %33 = load <4 x i32>* @vsi, align 16
+ store <4 x i32> %33, <4 x i32>* %__a.addr.i20, align 16
+ store i32 0, i32* %__b.addr.i21, align 4
+ store <4 x i32>* @res_vsi, <4 x i32>** %__c.addr.i22, align 8
+ %34 = load <4 x i32>* %__a.addr.i20, align 16
+ %35 = load i32* %__b.addr.i21, align 4
+ %36 = load <4 x i32>** %__c.addr.i22, align 8
+ %37 = bitcast <4 x i32>* %36 to i8*
+ %38 = getelementptr i8* %37, i32 %35
+ call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %34, i8* %38)
+ %39 = load <4 x i32>* @vui, align 16
+ store <4 x i32> %39, <4 x i32>* %__a.addr.i17, align 16
+ store i32 0, i32* %__b.addr.i18, align 4
+ store <4 x i32>* @res_vui, <4 x i32>** %__c.addr.i19, align 8
+ %40 = load <4 x i32>* %__a.addr.i17, align 16
+ %41 = load i32* %__b.addr.i18, align 4
+ %42 = load <4 x i32>** %__c.addr.i19, align 8
+ %43 = bitcast <4 x i32>* %42 to i8*
+ %44 = getelementptr i8* %43, i32 %41
+ call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %40, i8* %44)
+ %45 = load <4 x float>* @vf, align 16
+ store <4 x float> %45, <4 x float>* %__a.addr.i14, align 16
+ store i32 0, i32* %__b.addr.i15, align 4
+ store <4 x float>* @res_vf, <4 x float>** %__c.addr.i16, align 8
+ %46 = load <4 x float>* %__a.addr.i14, align 16
+ %47 = bitcast <4 x float> %46 to <4 x i32>
+ %48 = load i32* %__b.addr.i15, align 4
+ %49 = load <4 x float>** %__c.addr.i16, align 8
+ %50 = bitcast <4 x float>* %49 to i8*
+ %51 = getelementptr i8* %50, i32 %48
+ call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %47, i8* %51) #1
+ %52 = load <2 x i64>* @vsll, align 16
+ store <2 x i64> %52, <2 x i64>* %__a.addr.i11, align 16
+ store i32 0, i32* %__b.addr.i12, align 4
+ store <2 x i64>* @res_vsll, <2 x i64>** %__c.addr.i13, align 8
+ %53 = load <2 x i64>* %__a.addr.i11, align 16
+ %54 = bitcast <2 x i64> %53 to <2 x double>
+ %55 = load i32* %__b.addr.i12, align 4
+ %56 = load <2 x i64>** %__c.addr.i13, align 8
+ %57 = bitcast <2 x i64>* %56 to i8*
+ %58 = getelementptr i8* %57, i32 %55
+ call void @llvm.ppc.vsx.stxvd2x(<2 x double> %54, i8* %58)
+ %59 = load <2 x i64>* @vull, align 16
+ store <2 x i64> %59, <2 x i64>* %__a.addr.i8, align 16
+ store i32 0, i32* %__b.addr.i9, align 4
+ store <2 x i64>* @res_vull, <2 x i64>** %__c.addr.i10, align 8
+ %60 = load <2 x i64>* %__a.addr.i8, align 16
+ %61 = bitcast <2 x i64> %60 to <2 x double>
+ %62 = load i32* %__b.addr.i9, align 4
+ %63 = load <2 x i64>** %__c.addr.i10, align 8
+ %64 = bitcast <2 x i64>* %63 to i8*
+ %65 = getelementptr i8* %64, i32 %62
+ call void @llvm.ppc.vsx.stxvd2x(<2 x double> %61, i8* %65)
+ %66 = load <2 x double>* @vd, align 16
+ store <2 x double> %66, <2 x double>* %__a.addr.i6, align 16
+ store i32 0, i32* %__b.addr.i7, align 4
+ store <2 x double>* @res_vd, <2 x double>** %__c.addr.i, align 8
+ %67 = load <2 x double>* %__a.addr.i6, align 16
+ %68 = load i32* %__b.addr.i7, align 4
+ %69 = load <2 x double>** %__c.addr.i, align 8
+ %70 = bitcast <2 x double>* %69 to i8*
+ %71 = getelementptr i8* %70, i32 %68
+ call void @llvm.ppc.vsx.stxvd2x(<2 x double> %67, i8* %71)
+ ret void
+}
+
+declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, i8*)
+declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, i8*)
+declare <2 x double> @llvm.ppc.vsx.lxvd2x(i8*)
+declare <4 x i32> @llvm.ppc.vsx.lxvw4x(i8*)
diff --git a/test/CodeGen/PowerPC/vsx-ldst.ll b/test/CodeGen/PowerPC/vsx-ldst.ll
index 0c9ebef..688187d 100644
--- a/test/CodeGen/PowerPC/vsx-ldst.ll
+++ b/test/CodeGen/PowerPC/vsx-ldst.ll
@@ -3,6 +3,16 @@
; RUN: grep lxvd2x < %t | count 3
; RUN: grep stxvw4x < %t | count 3
; RUN: grep stxvd2x < %t | count 3
+; RUN: llc -mcpu=pwr8 -mattr=+vsx -O0 -fast-isel=1 -mtriple=powerpc64-unknown-linux-gnu < %s > %t
+; RUN: grep lxvw4x < %t | count 3
+; RUN: grep lxvd2x < %t | count 3
+; RUN: grep stxvw4x < %t | count 3
+; RUN: grep stxvd2x < %t | count 3
+
+; RUN: llc -mcpu=pwr8 -mattr=+vsx -O2 -mtriple=powerpc64le-unknown-linux-gnu < %s > %t
+; RUN: grep lxvd2x < %t | count 6
+; RUN: grep stxvd2x < %t | count 6
+; RUN: grep xxpermdi < %t | count 12
@vsi = global <4 x i32> <i32 -1, i32 2, i32 -3, i32 4>, align 16
@vui = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
diff --git a/test/CodeGen/PowerPC/vsx-p8.ll b/test/CodeGen/PowerPC/vsx-p8.ll
index 81406b6..d5a1905 100644
--- a/test/CodeGen/PowerPC/vsx-p8.ll
+++ b/test/CodeGen/PowerPC/vsx-p8.ll
@@ -1,4 +1,7 @@
; RUN: llc -mcpu=pwr8 -mattr=+power8-vector < %s | FileCheck %s
+; RUN: llc -mcpu=pwr8 -mattr=+power8-vector < %s | FileCheck -check-prefix=CHECK-REG %s
+; RUN: llc -mcpu=pwr8 -mattr=+power8-vector -fast-isel -O0 < %s | FileCheck %s
+; RUN: llc -mcpu=pwr8 -mattr=+power8-vector -fast-isel -O0 < %s | FileCheck -check-prefix=CHECK-FISL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -26,17 +29,27 @@ define <4 x float> @test32u(<4 x float>* %a) {
%v = load <4 x float>* %a, align 8
ret <4 x float> %v
-; CHECK-LABEL: @test32u
-; CHECK: lxvw4x 34, 0, 3
-; CHECK: blr
+; CHECK-REG-LABEL: @test32u
+; CHECK-REG: lxvw4x 34, 0, 3
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test32u
+; CHECK-FISL: lxvw4x 0, 0, 3
+; CHECK-FISL: xxlor 34, 0, 0
+; CHECK-FISL: blr
}
define void @test33u(<4 x float>* %a, <4 x float> %b) {
store <4 x float> %b, <4 x float>* %a, align 8
ret void
-; CHECK-LABEL: @test33u
-; CHECK: stxvw4x 34, 0, 3
-; CHECK: blr
+; CHECK-REG-LABEL: @test33u
+; CHECK-REG: stxvw4x 34, 0, 3
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test33u
+; CHECK-FISL: vor 3, 2, 2
+; CHECK-FISL: stxvw4x 35, 0, 3
+; CHECK-FISL: blr
}
diff --git a/test/CodeGen/PowerPC/vsx-self-copy.ll b/test/CodeGen/PowerPC/vsx-self-copy.ll
index 23615ca..787ac4b 100644
--- a/test/CodeGen/PowerPC/vsx-self-copy.ll
+++ b/test/CodeGen/PowerPC/vsx-self-copy.ll
@@ -1,4 +1,5 @@
; RUN: llc -mcpu=pwr7 -mattr=+vsx < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 -mattr=+vsx -fast-isel -O0 < %s | FileCheck %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
diff --git a/test/CodeGen/PowerPC/vsx-spill-norwstore.ll b/test/CodeGen/PowerPC/vsx-spill-norwstore.ll
new file mode 100644
index 0000000..a3c4aa5
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx-spill-norwstore.ll
@@ -0,0 +1,63 @@
+; RUN: llc -mcpu=pwr7 -verify-machineinstrs < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+@.str1 = external unnamed_addr constant [5 x i8], align 1
+@.str10 = external unnamed_addr constant [9 x i8], align 1
+
+; Function Attrs: nounwind
+define void @main() #0 {
+; CHECK-LABEL: @main
+; Make sure that the stxvd2x passes -verify-machineinstrs
+; CHECK: stxvd2x
+
+entry:
+ %0 = tail call <8 x i16> @llvm.ppc.altivec.vupkhsb(<16 x i8> <i8 0, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1>) #0
+ %1 = tail call <8 x i16> @llvm.ppc.altivec.vupklsb(<16 x i8> <i8 0, i8 -1, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1>) #0
+ br i1 false, label %if.then.i68.i, label %check.exit69.i
+
+if.then.i68.i: ; preds = %entry
+ unreachable
+
+check.exit69.i: ; preds = %entry
+ br i1 undef, label %if.then.i63.i, label %check.exit64.i
+
+if.then.i63.i: ; preds = %check.exit69.i
+ tail call void (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str10, i64 0, i64 0), i8* getelementptr inbounds ([5 x i8]* @.str1, i64 0, i64 0)) #0
+ br label %check.exit64.i
+
+check.exit64.i: ; preds = %if.then.i63.i, %check.exit69.i
+ %2 = tail call i32 @llvm.ppc.altivec.vcmpequh.p(i32 2, <8 x i16> %0, <8 x i16> <i16 0, i16 -1, i16 -1, i16 0, i16 0, i16 0, i16 -1, i16 0>) #0
+ %tobool.i55.i = icmp eq i32 %2, 0
+ br i1 %tobool.i55.i, label %if.then.i58.i, label %check.exit59.i
+
+if.then.i58.i: ; preds = %check.exit64.i
+ unreachable
+
+check.exit59.i: ; preds = %check.exit64.i
+ %3 = tail call i32 @llvm.ppc.altivec.vcmpequh.p(i32 2, <8 x i16> %1, <8 x i16> <i16 -1, i16 0, i16 0, i16 -1, i16 -1, i16 -1, i16 0, i16 -1>) #0
+ %tobool.i50.i = icmp eq i32 %3, 0
+ br i1 %tobool.i50.i, label %if.then.i53.i, label %check.exit54.i
+
+if.then.i53.i: ; preds = %check.exit59.i
+ unreachable
+
+check.exit54.i: ; preds = %check.exit59.i
+ unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare <8 x i16> @llvm.ppc.altivec.vupkhsb(<16 x i8>) #1
+
+; Function Attrs: nounwind readnone
+declare <8 x i16> @llvm.ppc.altivec.vupklsb(<16 x i8>) #1
+
+; Function Attrs: nounwind
+declare void @printf(i8* nocapture readonly, ...) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.ppc.altivec.vcmpequh.p(i32, <8 x i16>, <8 x i16>) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/vsx-spill.ll b/test/CodeGen/PowerPC/vsx-spill.ll
index 29bc6fc..032bcf6 100644
--- a/test/CodeGen/PowerPC/vsx-spill.ll
+++ b/test/CodeGen/PowerPC/vsx-spill.ll
@@ -1,4 +1,7 @@
; RUN: llc -mcpu=pwr7 -mattr=+vsx < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-REG %s
+; RUN: llc -mcpu=pwr7 -mattr=+vsx -fast-isel -O0 < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 -mattr=+vsx -fast-isel -O0 < %s | FileCheck -check-prefix=CHECK-FISL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -7,10 +10,16 @@ entry:
call void asm sideeffect "", "~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"() nounwind
br label %return
-; CHECK: @foo1
-; CHECK: xxlor [[R1:[0-9]+]], 1, 1
-; CHECK: xxlor 1, [[R1]], [[R1]]
-; CHECK: blr
+; CHECK-REG: @foo1
+; CHECK-REG: xxlor [[R1:[0-9]+]], 1, 1
+; CHECK-REG: xxlor 1, [[R1]], [[R1]]
+; CHECK-REG: blr
+
+; CHECK-FISL: @foo1
+; CHECK-FISL: lis 0, -1
+; CHECK-FISL: ori 0, 0, 65384
+; CHECK-FISL: stxsdx 1, 1, 0
+; CHECK-FISL: blr
return: ; preds = %entry
ret double %a
@@ -22,10 +31,16 @@ entry:
call void asm sideeffect "", "~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"() nounwind
br label %return
-; CHECK: @foo2
-; CHECK: {{xxlor|xsadddp}} [[R1:[0-9]+]], 1, 1
-; CHECK: {{xxlor|xsadddp}} 1, [[R1]], [[R1]]
-; CHECK: blr
+; CHECK-REG: @foo2
+; CHECK-REG: {{xxlor|xsadddp}} [[R1:[0-9]+]], 1, 1
+; CHECK-REG: {{xxlor|xsadddp}} 1, [[R1]], [[R1]]
+; CHECK-REG: blr
+
+; CHECK-FISL: @foo2
+; CHECK-FISL: xsadddp [[R1:[0-9]+]], 1, 1
+; CHECK-FISL: stxsdx [[R1]], [[R1]], 0
+; CHECK-FISL: lxsdx [[R1]], [[R1]], 0
+; CHECK-FISL: blr
return: ; preds = %entry
ret double %b
diff --git a/test/CodeGen/PowerPC/vsx.ll b/test/CodeGen/PowerPC/vsx.ll
index 333b75a..f91ffdb 100644
--- a/test/CodeGen/PowerPC/vsx.ll
+++ b/test/CodeGen/PowerPC/vsx.ll
@@ -1,4 +1,7 @@
; RUN: llc -mcpu=pwr7 -mattr=+vsx < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-REG %s
+; RUN: llc -mcpu=pwr7 -mattr=+vsx -fast-isel -O0 < %s | FileCheck %s
+; RUN: llc -mcpu=pwr7 -mattr=+vsx -fast-isel -O0 < %s | FileCheck -check-prefix=CHECK-FISL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -47,9 +50,16 @@ entry:
%v = xor <4 x i32> %a, %b
ret <4 x i32> %v
-; CHECK-LABEL: @test5
-; CHECK: xxlxor 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test5
+; CHECK-REG: xxlxor 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test5
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxlxor 36, 36, 37
+; CHECK-FISL: vor 2, 4, 4
+; CHECK-FISL: blr
}
define <8 x i16> @test6(<8 x i16> %a, <8 x i16> %b) {
@@ -57,9 +67,16 @@ entry:
%v = xor <8 x i16> %a, %b
ret <8 x i16> %v
-; CHECK-LABEL: @test6
-; CHECK: xxlxor 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test6
+; CHECK-REG: xxlxor 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test6
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxlxor 36, 36, 37
+; CHECK-FISL: vor 2, 4, 4
+; CHECK-FISL: blr
}
define <16 x i8> @test7(<16 x i8> %a, <16 x i8> %b) {
@@ -67,9 +84,16 @@ entry:
%v = xor <16 x i8> %a, %b
ret <16 x i8> %v
-; CHECK-LABEL: @test7
-; CHECK: xxlxor 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test7
+; CHECK-REG: xxlxor 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test7
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxlxor 36, 36, 37
+; CHECK-FISL: vor 2, 4, 4
+; CHECK-FISL: blr
}
define <4 x i32> @test8(<4 x i32> %a, <4 x i32> %b) {
@@ -77,9 +101,16 @@ entry:
%v = or <4 x i32> %a, %b
ret <4 x i32> %v
-; CHECK-LABEL: @test8
-; CHECK: xxlor 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test8
+; CHECK-REG: xxlor 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test8
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxlor 36, 36, 37
+; CHECK-FISL: vor 2, 4, 4
+; CHECK-FISL: blr
}
define <8 x i16> @test9(<8 x i16> %a, <8 x i16> %b) {
@@ -87,9 +118,16 @@ entry:
%v = or <8 x i16> %a, %b
ret <8 x i16> %v
-; CHECK-LABEL: @test9
-; CHECK: xxlor 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test9
+; CHECK-REG: xxlor 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test9
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxlor 36, 36, 37
+; CHECK-FISL: vor 2, 4, 4
+; CHECK-FISL: blr
}
define <16 x i8> @test10(<16 x i8> %a, <16 x i8> %b) {
@@ -97,9 +135,16 @@ entry:
%v = or <16 x i8> %a, %b
ret <16 x i8> %v
-; CHECK-LABEL: @test10
-; CHECK: xxlor 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test10
+; CHECK-REG: xxlor 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test10
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxlor 36, 36, 37
+; CHECK-FISL: vor 2, 4, 4
+; CHECK-FISL: blr
}
define <4 x i32> @test11(<4 x i32> %a, <4 x i32> %b) {
@@ -107,9 +152,16 @@ entry:
%v = and <4 x i32> %a, %b
ret <4 x i32> %v
-; CHECK-LABEL: @test11
-; CHECK: xxland 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test11
+; CHECK-REG: xxland 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test11
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxland 36, 36, 37
+; CHECK-FISL: vor 2, 4, 4
+; CHECK-FISL: blr
}
define <8 x i16> @test12(<8 x i16> %a, <8 x i16> %b) {
@@ -117,9 +169,16 @@ entry:
%v = and <8 x i16> %a, %b
ret <8 x i16> %v
-; CHECK-LABEL: @test12
-; CHECK: xxland 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test12
+; CHECK-REG: xxland 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test12
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxland 36, 36, 37
+; CHECK-FISL: vor 2, 4, 4
+; CHECK-FISL: blr
}
define <16 x i8> @test13(<16 x i8> %a, <16 x i8> %b) {
@@ -127,9 +186,16 @@ entry:
%v = and <16 x i8> %a, %b
ret <16 x i8> %v
-; CHECK-LABEL: @test13
-; CHECK: xxland 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test13
+; CHECK-REG: xxland 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test13
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxland 36, 36, 37
+; CHECK-FISL: vor 2, 4, 4
+; CHECK-FISL: blr
}
define <4 x i32> @test14(<4 x i32> %a, <4 x i32> %b) {
@@ -138,9 +204,23 @@ entry:
%w = xor <4 x i32> %v, <i32 -1, i32 -1, i32 -1, i32 -1>
ret <4 x i32> %w
-; CHECK-LABEL: @test14
-; CHECK: xxlnor 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test14
+; CHECK-REG: xxlnor 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test14
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxlor 36, 36, 37
+; CHECK-FISL: vor 0, 4, 4
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxlnor 36, 36, 37
+; CHECK-FISL: vor 2, 4, 4
+; CHECK-FISL: lis 0, -1
+; CHECK-FISL: ori 0, 0, 65520
+; CHECK-FISL: stvx 0, 1, 0
+; CHECK-FISL: blr
}
define <8 x i16> @test15(<8 x i16> %a, <8 x i16> %b) {
@@ -149,9 +229,23 @@ entry:
%w = xor <8 x i16> %v, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
ret <8 x i16> %w
-; CHECK-LABEL: @test15
-; CHECK: xxlnor 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test15
+; CHECK-REG: xxlnor 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test15
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxlor 36, 36, 37
+; CHECK-FISL: vor 0, 4, 4
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxlnor 36, 36, 37
+; CHECK-FISL: vor 2, 4, 4
+; CHECK-FISL: lis 0, -1
+; CHECK-FISL: ori 0, 0, 65520
+; CHECK-FISL: stvx 0, 1, 0
+; CHECK-FISL: blr
}
define <16 x i8> @test16(<16 x i8> %a, <16 x i8> %b) {
@@ -160,9 +254,23 @@ entry:
%w = xor <16 x i8> %v, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
ret <16 x i8> %w
-; CHECK-LABEL: @test16
-; CHECK: xxlnor 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test16
+; CHECK-REG: xxlnor 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test16
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxlor 36, 36, 37
+; CHECK-FISL: vor 0, 4, 4
+; CHECK-FISL: vor 4, 2, 2
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: xxlnor 36, 36, 37
+; CHECK-FISL: vor 2, 4, 4
+; CHECK-FISL: lis 0, -1
+; CHECK-FISL: ori 0, 0, 65520
+; CHECK-FISL: stvx 0, 1, 0
+; CHECK-FISL: blr
}
define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
@@ -171,9 +279,21 @@ entry:
%v = and <4 x i32> %a, %w
ret <4 x i32> %v
-; CHECK-LABEL: @test17
-; CHECK: xxlandc 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test17
+; CHECK-REG: xxlandc 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test17
+; CHECK-FISL: vspltisb 4, -1
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: vor 0, 4, 4
+; CHECK-FISL: xxlxor 37, 37, 32
+; CHECK-FISL: vor 3, 5, 5
+; CHECK-FISL: vor 5, 2, 2
+; CHECK-FISL: vor 0, 3, 3
+; CHECK-FISL: xxland 37, 37, 32
+; CHECK-FISL: vor 2, 5, 5
+; CHECK-FISL: blr
}
define <8 x i16> @test18(<8 x i16> %a, <8 x i16> %b) {
@@ -182,9 +302,24 @@ entry:
%v = and <8 x i16> %a, %w
ret <8 x i16> %v
-; CHECK-LABEL: @test18
-; CHECK: xxlandc 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test18
+; CHECK-REG: xxlandc 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test18
+; CHECK-FISL: vspltisb 4, -1
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: vor 0, 4, 4
+; CHECK-FISL: xxlxor 37, 37, 32
+; CHECK-FISL: vor 4, 5, 5
+; CHECK-FISL: vor 5, 2, 2
+; CHECK-FISL: vor 0, 3, 3
+; CHECK-FISL: xxlandc 37, 37, 32
+; CHECK-FISL: vor 2, 5, 5
+; CHECK-FISL: lis 0, -1
+; CHECK-FISL: ori 0, 0, 65520
+; CHECK-FISL: stvx 4, 1, 0
+; CHECK-FISL: blr
}
define <16 x i8> @test19(<16 x i8> %a, <16 x i8> %b) {
@@ -193,9 +328,24 @@ entry:
%v = and <16 x i8> %a, %w
ret <16 x i8> %v
-; CHECK-LABEL: @test19
-; CHECK: xxlandc 34, 34, 35
-; CHECK: blr
+; CHECK-REG-LABEL: @test19
+; CHECK-REG: xxlandc 34, 34, 35
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test19
+; CHECK-FISL: vspltisb 4, -1
+; CHECK-FISL: vor 5, 3, 3
+; CHECK-FISL: vor 0, 4, 4
+; CHECK-FISL: xxlxor 37, 37, 32
+; CHECK-FISL: vor 4, 5, 5
+; CHECK-FISL: vor 5, 2, 2
+; CHECK-FISL: vor 0, 3, 3
+; CHECK-FISL: xxlandc 37, 37, 32
+; CHECK-FISL: vor 2, 5, 5
+; CHECK-FISL: lis 0, -1
+; CHECK-FISL: ori 0, 0, 65520
+; CHECK-FISL: stvx 4, 1, 0
+; CHECK-FISL: blr
}
define <4 x i32> @test20(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
@@ -204,10 +354,19 @@ entry:
%v = select <4 x i1> %m, <4 x i32> %a, <4 x i32> %b
ret <4 x i32> %v
-; CHECK-LABEL: @test20
-; CHECK: vcmpequw {{[0-9]+}}, 4, 5
-; CHECK: xxsel 34, 35, 34, {{[0-9]+}}
-; CHECK: blr
+; CHECK-REG-LABEL: @test20
+; CHECK-REG: vcmpequw {{[0-9]+}}, 4, 5
+; CHECK-REG: xxsel 34, 35, 34, {{[0-9]+}}
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test20
+; CHECK-FISL: vcmpequw 4, 4, 5
+; CHECK-FISL: vor 0, 3, 3
+; CHECK-FISL: vor 1, 2, 2
+; CHECK-FISL: vor 6, 4, 4
+; CHECK-FISL: xxsel 32, 32, 33, 38
+; CHECK-FISL: vor 2, 0, 0
+; CHECK-FISL: blr
}
define <4 x float> @test21(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d) {
@@ -216,10 +375,20 @@ entry:
%v = select <4 x i1> %m, <4 x float> %a, <4 x float> %b
ret <4 x float> %v
-; CHECK-LABEL: @test21
-; CHECK: xvcmpeqsp [[V1:[0-9]+]], 36, 37
-; CHECK: xxsel 34, 35, 34, [[V1]]
-; CHECK: blr
+; CHECK-REG-LABEL: @test21
+; CHECK-REG: xvcmpeqsp [[V1:[0-9]+]], 36, 37
+; CHECK-REG: xxsel 34, 35, 34, [[V1]]
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test21
+; CHECK-FISL: vor 0, 5, 5
+; CHECK-FISL: vor 1, 4, 4
+; CHECK-FISL: vor 6, 3, 3
+; CHECK-FISL: vor 7, 2, 2
+; CHECK-FISL: xvcmpeqsp 32, 33, 32
+; CHECK-FISL: xxsel 32, 38, 39, 32
+; CHECK-FISL: vor 2, 0, 0
+; CHECK-FISL: blr
}
define <4 x float> @test22(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d) {
@@ -228,16 +397,27 @@ entry:
%v = select <4 x i1> %m, <4 x float> %a, <4 x float> %b
ret <4 x float> %v
-; CHECK-LABEL: @test22
-; CHECK-DAG: xvcmpeqsp {{[0-9]+}}, 37, 37
-; CHECK-DAG: xvcmpeqsp {{[0-9]+}}, 36, 36
-; CHECK-DAG: xvcmpeqsp {{[0-9]+}}, 36, 37
-; CHECK-DAG: xxlnor
-; CHECK-DAG: xxlnor
-; CHECK-DAG: xxlor
-; CHECK-DAG: xxlor
-; CHECK: xxsel 34, 35, 34, {{[0-9]+}}
-; CHECK: blr
+; CHECK-REG-LABEL: @test22
+; CHECK-REG-DAG: xvcmpeqsp {{[0-9]+}}, 37, 37
+; CHECK-REG-DAG: xvcmpeqsp {{[0-9]+}}, 36, 36
+; CHECK-REG-DAG: xvcmpeqsp {{[0-9]+}}, 36, 37
+; CHECK-REG-DAG: xxlnor
+; CHECK-REG-DAG: xxlnor
+; CHECK-REG-DAG: xxlor
+; CHECK-REG-DAG: xxlor
+; CHECK-REG: xxsel 34, 35, 34, {{[0-9]+}}
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test22
+; CHECK-FISL-DAG: xvcmpeqsp {{[0-9]+}}, 33, 32
+; CHECK-FISL-DAG: xvcmpeqsp {{[0-9]+}}, 32, 32
+; CHECK-FISL-DAG: xvcmpeqsp {{[0-9]+}}, 33, 33
+; CHECK-FISL-DAG: xxlnor
+; CHECK-FISL-DAG: xxlnor
+; CHECK-FISL-DAG: xxlor
+; CHECK-FISL-DAG: xxlor
+; CHECK-FISL: xxsel 0, 38, 39, {{[0-9]+}}
+; CHECK-FISL: blr
}
define <8 x i16> @test23(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
@@ -246,10 +426,19 @@ entry:
%v = select <8 x i1> %m, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %v
-; CHECK-LABEL: @test23
-; CHECK: vcmpequh {{[0-9]+}}, 4, 5
-; CHECK: xxsel 34, 35, 34, {{[0-9]+}}
-; CHECK: blr
+; CHECK-REG-LABEL: @test23
+; CHECK-REG: vcmpequh {{[0-9]+}}, 4, 5
+; CHECK-REG: xxsel 34, 35, 34, {{[0-9]+}}
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test23
+; CHECK-FISL: vcmpequh 4, 4, 5
+; CHECK-FISL: vor 0, 3, 3
+; CHECK-FISL: vor 1, 2, 2
+; CHECK-FISL: vor 6, 4, 4
+; CHECK-FISL: xxsel 32, 32, 33, 38
+; CHECK-FISL: vor 2, 0,
+; CHECK-FISL: blr
}
define <16 x i8> @test24(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
@@ -258,10 +447,19 @@ entry:
%v = select <16 x i1> %m, <16 x i8> %a, <16 x i8> %b
ret <16 x i8> %v
-; CHECK-LABEL: @test24
-; CHECK: vcmpequb {{[0-9]+}}, 4, 5
-; CHECK: xxsel 34, 35, 34, {{[0-9]+}}
-; CHECK: blr
+; CHECK-REG-LABEL: @test24
+; CHECK-REG: vcmpequb {{[0-9]+}}, 4, 5
+; CHECK-REG: xxsel 34, 35, 34, {{[0-9]+}}
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test24
+; CHECK-FISL: vcmpequb 4, 4, 5
+; CHECK-FISL: vor 0, 3, 3
+; CHECK-FISL: vor 1, 2, 2
+; CHECK-FISL: vor 6, 4, 4
+; CHECK-FISL: xxsel 32, 32, 33, 38
+; CHECK-FISL: vor 2, 0, 0
+; CHECK-FISL: blr
}
define <2 x double> @test25(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %d) {
@@ -342,9 +540,16 @@ define <2 x i64> @test30(<2 x i64>* %a) {
%v = load <2 x i64>* %a, align 16
ret <2 x i64> %v
-; CHECK-LABEL: @test30
-; CHECK: lxvd2x 34, 0, 3
-; CHECK: blr
+; CHECK-REG-LABEL: @test30
+; CHECK-REG: lxvd2x 34, 0, 3
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test30
+; CHECK-FISL: lxvd2x 0, 0, 3
+; CHECK-FISL: xxlor 34, 0, 0
+; CHECK-FISL: vor 3, 2, 2
+; CHECK-FISL: vor 2, 3, 3
+; CHECK-FISL: blr
}
define void @test31(<2 x i64>* %a, <2 x i64> %b) {
@@ -360,18 +565,28 @@ define <4 x float> @test32(<4 x float>* %a) {
%v = load <4 x float>* %a, align 16
ret <4 x float> %v
-; CHECK-LABEL: @test32
-; CHECK: lxvw4x 34, 0, 3
-; CHECK: blr
+; CHECK-REG-LABEL: @test32
+; CHECK-REG: lxvw4x 34, 0, 3
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test32
+; CHECK-FISL: lxvw4x 0, 0, 3
+; CHECK-FISL: xxlor 34, 0, 0
+; CHECK-FISL: blr
}
define void @test33(<4 x float>* %a, <4 x float> %b) {
store <4 x float> %b, <4 x float>* %a, align 16
ret void
-; CHECK-LABEL: @test33
-; CHECK: stxvw4x 34, 0, 3
-; CHECK: blr
+; CHECK-REG-LABEL: @test33
+; CHECK-REG: stxvw4x 34, 0, 3
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test33
+; CHECK-FISL: vor 3, 2, 2
+; CHECK-FISL: stxvw4x 35, 0, 3
+; CHECK-FISL: blr
}
define <4 x float> @test32u(<4 x float>* %a) {
@@ -390,27 +605,44 @@ define void @test33u(<4 x float>* %a, <4 x float> %b) {
store <4 x float> %b, <4 x float>* %a, align 8
ret void
-; CHECK-LABEL: @test33u
-; CHECK: stxvw4x 34, 0, 3
-; CHECK: blr
+; CHECK-REG-LABEL: @test33u
+; CHECK-REG: stxvw4x 34, 0, 3
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test33u
+; CHECK-FISL: vor 3, 2, 2
+; CHECK-FISL: stxvw4x 35, 0, 3
+; CHECK-FISL: blr
}
define <4 x i32> @test34(<4 x i32>* %a) {
%v = load <4 x i32>* %a, align 16
ret <4 x i32> %v
-; CHECK-LABEL: @test34
-; CHECK: lxvw4x 34, 0, 3
-; CHECK: blr
+; CHECK-REG-LABEL: @test34
+; CHECK-REG: lxvw4x 34, 0, 3
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test34
+; CHECK-FISL: lxvw4x 0, 0, 3
+; CHECK-FISL: xxlor 34, 0, 0
+; CHECK-FISL: vor 3, 2, 2
+; CHECK-FISL: vor 2, 3, 3
+; CHECK-FISL: blr
}
define void @test35(<4 x i32>* %a, <4 x i32> %b) {
store <4 x i32> %b, <4 x i32>* %a, align 16
ret void
-; CHECK-LABEL: @test35
-; CHECK: stxvw4x 34, 0, 3
-; CHECK: blr
+; CHECK-REG-LABEL: @test35
+; CHECK-REG: stxvw4x 34, 0, 3
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test35
+; CHECK-FISL: vor 3, 2, 2
+; CHECK-FISL: stxvw4x 35, 0, 3
+; CHECK-FISL: blr
}
define <2 x double> @test40(<2 x i64> %a) {
@@ -596,37 +828,60 @@ define double @test63(<2 x double> %a) {
%v = extractelement <2 x double> %a, i32 0
ret double %v
-; CHECK-LABEL: @test63
-; CHECK: xxlor 1, 34, 34
-; CHECK: blr
+; CHECK-REG-LABEL: @test63
+; CHECK-REG: xxlor 1, 34, 34
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test63
+; CHECK-FISL: xxlor 0, 34, 34
+; CHECK-FISL: fmr 1, 0
+; CHECK-FISL: blr
}
define double @test64(<2 x double> %a) {
%v = extractelement <2 x double> %a, i32 1
ret double %v
-; CHECK-LABEL: @test64
-; CHECK: xxpermdi 1, 34, 34, 2
-; CHECK: blr
+; CHECK-REG-LABEL: @test64
+; CHECK-REG: xxpermdi 1, 34, 34, 2
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test64
+; CHECK-FISL: xxpermdi 34, 34, 34, 2
+; CHECK-FISL: xxlor 0, 34, 34
+; CHECK-FISL: fmr 1, 0
+; CHECK-FISL: blr
}
define <2 x i1> @test65(<2 x i64> %a, <2 x i64> %b) {
%w = icmp eq <2 x i64> %a, %b
ret <2 x i1> %w
-; CHECK-LABEL: @test65
-; CHECK: vcmpequw 2, 2, 3
-; CHECK: blr
+; CHECK-REG-LABEL: @test65
+; CHECK-REG: vcmpequw 2, 2, 3
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test65
+; CHECK-FISL: vor 4, 3, 3
+; CHECK-FISL: vor 5, 2, 2
+; CHECK-FISL: vcmpequw 4, 5, 4
+; CHECK-FISL: vor 2, 4, 4
+; CHECK-FISL: blr
}
define <2 x i1> @test66(<2 x i64> %a, <2 x i64> %b) {
%w = icmp ne <2 x i64> %a, %b
ret <2 x i1> %w
-; CHECK-LABEL: @test66
-; CHECK: vcmpequw {{[0-9]+}}, 2, 3
-; CHECK: xxlnor 34, {{[0-9]+}}, {{[0-9]+}}
-; CHECK: blr
+; CHECK-REG-LABEL: @test66
+; CHECK-REG: vcmpequw {{[0-9]+}}, 2, 3
+; CHECK-REG: xxlnor 34, {{[0-9]+}}, {{[0-9]+}}
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test66
+; CHECK-FISL: vcmpequw {{[0-9]+}}, 5, 4
+; CHECK-FISL: xxlnor 34, {{[0-9]+}}, {{[0-9]+}}
+; CHECK-FISL: blr
}
define <2 x i1> @test67(<2 x i64> %a, <2 x i64> %b) {
@@ -660,7 +915,7 @@ define <2 x double> @test69(<2 x i16> %a) {
; CHECK-LABEL: @test69
; CHECK: vspltisw [[V1:[0-9]+]], 8
; CHECK: vadduwm [[V2:[0-9]+]], [[V1]], [[V1]]
-; CHECK: vslw [[V3:[0-9]+]], 2, [[V2]]
+; CHECK: vslw [[V3:[0-9]+]], {{[0-9]+}}, [[V2]]
; CHECK: vsraw {{[0-9]+}}, [[V3]], [[V2]]
; CHECK: xxsldwi [[V4:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}, 1
; CHECK: xvcvsxwdp 34, [[V4]]
@@ -674,7 +929,7 @@ define <2 x double> @test70(<2 x i8> %a) {
; CHECK-LABEL: @test70
; CHECK: vspltisw [[V1:[0-9]+]], 12
; CHECK: vadduwm [[V2:[0-9]+]], [[V1]], [[V1]]
-; CHECK: vslw [[V3:[0-9]+]], 2, [[V2]]
+; CHECK: vslw [[V3:[0-9]+]], {{[0-9]+}}, [[V2]]
; CHECK: vsraw {{[0-9]+}}, [[V3]], [[V2]]
; CHECK: xxsldwi [[V4:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}, 1
; CHECK: xvcvsxwdp 34, [[V4]]
@@ -687,15 +942,24 @@ define <2 x i32> @test80(i32 %v) {
%i = add <2 x i32> %b2, <i32 2, i32 3>
ret <2 x i32> %i
-; CHECK-LABEL: @test80
-; CHECK-DAG: addi [[R1:[0-9]+]], 3, 3
-; CHECK-DAG: addi [[R2:[0-9]+]], 1, -16
-; CHECK-DAG: addi [[R3:[0-9]+]], 3, 2
-; CHECK: std [[R1]], -8(1)
-; CHECK: std [[R3]], -16(1)
-; CHECK: lxvd2x 34, 0, [[R2]]
-; CHECK-NOT: stxvd2x
-; CHECK: blr
+; CHECK-REG-LABEL: @test80
+; CHECK-REG-DAG: addi [[R1:[0-9]+]], 3, 3
+; CHECK-REG-DAG: addi [[R2:[0-9]+]], 1, -16
+; CHECK-REG-DAG: addi [[R3:[0-9]+]], 3, 2
+; CHECK-REG: std [[R1]], -8(1)
+; CHECK-REG: std [[R3]], -16(1)
+; CHECK-REG: lxvd2x 34, 0, [[R2]]
+; CHECK-REG-NOT: stxvd2x
+; CHECK-REG: blr
+
+; CHECK-FISL-LABEL: @test80
+; CHECK-FISL-DAG: addi [[R1:[0-9]+]], 3, 3
+; CHECK-FISL-DAG: addi [[R2:[0-9]+]], 1, -16
+; CHECK-FISL-DAG: addi [[R3:[0-9]+]], 3, 2
+; CHECK-FISL-DAG: std [[R1]], -8(1)
+; CHECK-FISL-DAG: std [[R3]], -16(1)
+; CHECK-FISL-DAG: lxvd2x 0, 0, [[R2]]
+; CHECK-FISL: blr
}
define <2 x double> @test81(<4 x float> %b) {
@@ -712,8 +976,11 @@ entry:
%v = select i1 %m, double %a, double %b
ret double %v
-; CHECK-LABEL: @test82
-; CHECK: xscmpudp [[REG:[0-9]+]], 3, 4
-; CHECK: beqlr [[REG]]
-}
+; CHECK-REG-LABEL: @test82
+; CHECK-REG: xscmpudp [[REG:[0-9]+]], 3, 4
+; CHECK-REG: beqlr [[REG]]
+; CHECK-FISL-LABEL: @test82
+; CHECK-FISL: xscmpudp [[REG:[0-9]+]], 3, 4
+; CHECK-FISL: beq [[REG]], {{.*}}
+}
diff --git a/test/CodeGen/PowerPC/vsx_insert_extract_le.ll b/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
new file mode 100644
index 0000000..0a9df37
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
@@ -0,0 +1,52 @@
+; RUN: llc -mcpu=pwr8 -mattr=+vsx -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+
+define <2 x double> @testi0(<2 x double>* %p1, double* %p2) {
+ %v = load <2 x double>* %p1
+ %s = load double* %p2
+ %r = insertelement <2 x double> %v, double %s, i32 0
+ ret <2 x double> %r
+
+; CHECK-LABEL: testi0
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: lxsdx 34, 0, 4
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 1, 34, 34, 0
+; CHECK: xxpermdi 34, 0, 1, 1
+}
+
+define <2 x double> @testi1(<2 x double>* %p1, double* %p2) {
+ %v = load <2 x double>* %p1
+ %s = load double* %p2
+ %r = insertelement <2 x double> %v, double %s, i32 1
+ ret <2 x double> %r
+
+; CHECK-LABEL: testi1
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: lxsdx 34, 0, 4
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 1, 34, 34, 0
+; CHECK: xxpermdi 34, 1, 0, 3
+}
+
+define double @teste0(<2 x double>* %p1) {
+ %v = load <2 x double>* %p1
+ %r = extractelement <2 x double> %v, i32 0
+ ret double %r
+
+; FIXME: Swap optimization will collapse this into lxvd2x 1, 0, 3.
+
+; CHECK-LABEL: teste0
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 1, 0, 0, 2
+}
+
+define double @teste1(<2 x double>* %p1) {
+ %v = load <2 x double>* %p1
+ %r = extractelement <2 x double> %v, i32 1
+ ret double %r
+
+; CHECK-LABEL: teste1
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: xxpermdi 1, 0, 0, 2
+}
diff --git a/test/CodeGen/PowerPC/vsx_shuffle_le.ll b/test/CodeGen/PowerPC/vsx_shuffle_le.ll
new file mode 100644
index 0000000..588cfda
--- /dev/null
+++ b/test/CodeGen/PowerPC/vsx_shuffle_le.ll
@@ -0,0 +1,207 @@
+; RUN: llc -mcpu=pwr8 -mattr=+vsx -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+
+define <2 x double> @test00(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 0>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: test00
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 34, 0, 0, 3
+}
+
+define <2 x double> @test01(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 1>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: test01
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: xxpermdi 34, 0, 0, 2
+}
+
+define <2 x double> @test02(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 2>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test02
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: lxvd2x 1, 0, 4
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 1, 1, 1, 2
+; CHECK: xxpermdi 34, 1, 0, 3
+}
+
+define <2 x double> @test03(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 0, i32 3>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test03
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: lxvd2x 1, 0, 4
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 1, 1, 1, 2
+; CHECK: xxpermdi 34, 1, 0, 1
+}
+
+define <2 x double> @test10(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 0>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test10
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 34, 0, 0, 2
+}
+
+define <2 x double> @test11(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 1>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test11
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 34, 0, 0, 0
+}
+
+define <2 x double> @test12(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 2>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test12
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: lxvd2x 1, 0, 4
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 1, 1, 1, 2
+; CHECK: xxpermdi 34, 1, 0, 2
+}
+
+define <2 x double> @test13(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 1, i32 3>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test13
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: lxvd2x 1, 0, 4
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 1, 1, 1, 2
+; CHECK: xxpermdi 34, 1, 0, 0
+}
+
+define <2 x double> @test20(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 0>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test20
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: lxvd2x 1, 0, 4
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 1, 1, 1, 2
+; CHECK: xxpermdi 34, 0, 1, 3
+}
+
+define <2 x double> @test21(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 1>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test21
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: lxvd2x 1, 0, 4
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 1, 1, 1, 2
+; CHECK: xxpermdi 34, 0, 1, 1
+}
+
+define <2 x double> @test22(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 2>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test22
+; CHECK: lxvd2x 0, 0, 4
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 34, 0, 0, 3
+}
+
+define <2 x double> @test23(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 2, i32 3>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test23
+; CHECK: lxvd2x 0, 0, 4
+; CHECK: xxpermdi 34, 0, 0, 2
+}
+
+define <2 x double> @test30(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 0>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test30
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: lxvd2x 1, 0, 4
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 1, 1, 1, 2
+; CHECK: xxpermdi 34, 0, 1, 2
+}
+
+define <2 x double> @test31(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 1>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test31
+; CHECK: lxvd2x 0, 0, 3
+; CHECK: lxvd2x 1, 0, 4
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 1, 1, 1, 2
+; CHECK: xxpermdi 34, 0, 1, 0
+}
+
+define <2 x double> @test32(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 2>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test32
+; CHECK: lxvd2x 0, 0, 4
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 34, 0, 0, 2
+}
+
+define <2 x double> @test33(<2 x double>* %p1, <2 x double>* %p2) {
+ %v1 = load <2 x double>* %p1
+ %v2 = load <2 x double>* %p2
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <2 x i32> < i32 3, i32 3>
+ ret <2 x double> %v3
+
+; CHECK-LABEL: @test33
+; CHECK: lxvd2x 0, 0, 4
+; CHECK: xxpermdi 0, 0, 0, 2
+; CHECK: xxpermdi 34, 0, 0, 0
+}
diff --git a/test/CodeGen/PowerPC/xxleqv_xxlnand_xxlorc.ll b/test/CodeGen/PowerPC/xxleqv_xxlnand_xxlorc.ll
new file mode 100644
index 0000000..4d929c6
--- /dev/null
+++ b/test/CodeGen/PowerPC/xxleqv_xxlnand_xxlorc.ll
@@ -0,0 +1,52 @@
+; Check the miscellaneous logical vector operations added in P8
+;
+; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+; Test x eqv y
+define <4 x i32> @test_xxleqv(<4 x i32> %x, <4 x i32> %y) nounwind {
+ %tmp = xor <4 x i32> %x, %y
+ %ret_val = xor <4 x i32> %tmp, < i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %ret_val
+; CHECK: xxleqv 34, 34, 35
+}
+
+; Test x xxlnand y
+define <4 x i32> @test_xxlnand(<4 x i32> %x, <4 x i32> %y) nounwind {
+ %tmp = and <4 x i32> %x, %y
+ %ret_val = xor <4 x i32> %tmp, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %ret_val
+; CHECK: xxlnand 34, 34, 35
+}
+
+; Test x xxlorc y
+define <4 x i32> @test_xxlorc(<4 x i32> %x, <4 x i32> %y) nounwind {
+ %tmp = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %ret_val = or <4 x i32> %x, %tmp
+ ret <4 x i32> %ret_val
+; CHECK: xxlorc 34, 34, 35
+}
+
+; Test x eqv y
+define <8 x i16> @test_xxleqvv8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+ %tmp = xor <8 x i16> %x, %y
+ %ret_val = xor <8 x i16> %tmp, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ret <8 x i16> %ret_val
+; CHECK: xxleqv 34, 34, 35
+}
+
+; Test x xxlnand y
+define <8 x i16> @test_xxlnandv8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+ %tmp = and <8 x i16> %x, %y
+ %ret_val = xor <8 x i16> %tmp, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ret <8 x i16> %ret_val
+; CHECK: xxlnand 34, 34, 35
+}
+
+; Test x xxlorc y
+define <8 x i16> @test_xxlorcv8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
+ %tmp = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %ret_val = or <8 x i16> %x, %tmp
+ ret <8 x i16> %ret_val
+; CHECK: xxlorc 34, 34, 35
+}
+
diff --git a/test/CodeGen/PowerPC/zext-free.ll b/test/CodeGen/PowerPC/zext-free.ll
new file mode 100644
index 0000000..080dbaa
--- /dev/null
+++ b/test/CodeGen/PowerPC/zext-free.ll
@@ -0,0 +1,37 @@
+; RUN: llc -mcpu=ppc64 < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: noreturn nounwind
+define signext i32 @_Z1fRPc(i8** nocapture dereferenceable(8) %p) #0 {
+entry:
+ %.pre = load i8** %p, align 8
+ br label %loop
+
+loop: ; preds = %loop.backedge, %entry
+ %0 = phi i8* [ %.pre, %entry ], [ %.be, %loop.backedge ]
+ %1 = load i8* %0, align 1
+ %tobool = icmp eq i8 %1, 0
+ %incdec.ptr = getelementptr inbounds i8* %0, i64 1
+ store i8* %incdec.ptr, i8** %p, align 8
+ %2 = load i8* %incdec.ptr, align 1
+ %tobool2 = icmp ne i8 %2, 0
+ %or.cond = and i1 %tobool, %tobool2
+ br i1 %or.cond, label %if.then3, label %loop.backedge
+
+if.then3: ; preds = %loop
+ %incdec.ptr4 = getelementptr inbounds i8* %0, i64 2
+ store i8* %incdec.ptr4, i8** %p, align 8
+ br label %loop.backedge
+
+loop.backedge: ; preds = %if.then3, %loop
+ %.be = phi i8* [ %incdec.ptr4, %if.then3 ], [ %incdec.ptr, %loop ]
+ br label %loop
+
+; CHECK-LABEL: @_Z1fRPc
+; CHECK-NOT: rlwinm {{[0-9]+}}, {{[0-9]+}}, 0, 24, 31
+; CHECK-NOT: clrlwi {{[0-9]+}}, {{[0-9]+}}, 24
+}
+
+attributes #0 = { noreturn nounwind }
+
diff --git a/test/CodeGen/R600/128bit-kernel-args.ll b/test/CodeGen/R600/128bit-kernel-args.ll
index d9b0ff2..557d86a 100644
--- a/test/CodeGen/R600/128bit-kernel-args.ll
+++ b/test/CodeGen/R600/128bit-kernel-args.ll
@@ -1,26 +1,27 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=SI
-; R600-CHECK: {{^}}v4i32_kernel_arg:
-; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR:[0-9]]].X, KC0[3].Y
-; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].Y, KC0[3].Z
-; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].Z, KC0[3].W
-; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].W, KC0[4].X
-; SI-CHECK: {{^}}v4i32_kernel_arg:
-; SI-CHECK: buffer_store_dwordx4
+; R600: {{^}}v4i32_kernel_arg:
+; R600-DAG: MOV {{[* ]*}}T[[GPR:[0-9]]].X, KC0[3].Y
+; R600-DAG: MOV {{[* ]*}}T[[GPR]].Y, KC0[3].Z
+; R600-DAG: MOV {{[* ]*}}T[[GPR]].Z, KC0[3].W
+; R600-DAG: MOV {{[* ]*}}T[[GPR]].W, KC0[4].X
+; SI: {{^}}v4i32_kernel_arg:
+; SI: buffer_store_dwordx4
define void @v4i32_kernel_arg(<4 x i32> addrspace(1)* %out, <4 x i32> %in) {
entry:
store <4 x i32> %in, <4 x i32> addrspace(1)* %out
ret void
}
-; R600-CHECK: {{^}}v4f32_kernel_arg:
-; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR:[0-9]]].X, KC0[3].Y
-; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].Y, KC0[3].Z
-; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].Z, KC0[3].W
-; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].W, KC0[4].X
-; SI-CHECK: {{^}}v4f32_kernel_arg:
-; SI-CHECK: buffer_store_dwordx4
+; R600: {{^}}v4f32_kernel_arg:
+; R600-DAG: MOV {{[* ]*}}T[[GPR:[0-9]]].X, KC0[3].Y
+; R600-DAG: MOV {{[* ]*}}T[[GPR]].Y, KC0[3].Z
+; R600-DAG: MOV {{[* ]*}}T[[GPR]].Z, KC0[3].W
+; R600-DAG: MOV {{[* ]*}}T[[GPR]].W, KC0[4].X
+; SI: {{^}}v4f32_kernel_arg:
+; SI: buffer_store_dwordx4
define void @v4f32_kernel_arg(<4 x float> addrspace(1)* %out, <4 x float> %in) {
entry:
store <4 x float> %in, <4 x float> addrspace(1)* %out
diff --git a/test/CodeGen/R600/32-bit-local-address-space.ll b/test/CodeGen/R600/32-bit-local-address-space.ll
index 4ff2762..6aca826 100644
--- a/test/CodeGen/R600/32-bit-local-address-space.ll
+++ b/test/CodeGen/R600/32-bit-local-address-space.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; On Southern Islands GPUs the local address space(3) uses 32-bit pointers and
; the global address space(1) uses 64-bit pointers. These tests check to make sure
@@ -130,7 +131,7 @@ define void @local_address_gep_const_offset_store(i32 addrspace(3)* %out, i32 %v
; FUNC-LABEL: {{^}}local_address_gep_large_const_offset_store:
; SI: s_add_i32 [[SPTR:s[0-9]]], s{{[0-9]+}}, 0x10004
; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
-; SI: ds_write_b32 [[VPTR]], v{{[0-9]+}} [M0]{{$}}
+; SI: ds_write_b32 [[VPTR]], v{{[0-9]+$}}
define void @local_address_gep_large_const_offset_store(i32 addrspace(3)* %out, i32 %val) {
%gep = getelementptr i32 addrspace(3)* %out, i32 16385
store i32 %val, i32 addrspace(3)* %gep, align 4
diff --git a/test/CodeGen/R600/64bit-kernel-args.ll b/test/CodeGen/R600/64bit-kernel-args.ll
index cf4e055..2e08901 100644
--- a/test/CodeGen/R600/64bit-kernel-args.ll
+++ b/test/CodeGen/R600/64bit-kernel-args.ll
@@ -1,9 +1,12 @@
-; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=amdgcn -mcpu=tahiti -verify-machineinstrs | FileCheck %s --check-prefix=GCN --check-prefix=SI
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=GCN --check-prefix=VI
-; SI-CHECK: {{^}}f64_kernel_arg:
-; SI-CHECK-DAG: s_load_dwordx2 s[{{[0-9]:[0-9]}}], s[0:1], 0x9
-; SI-CHECK-DAG: s_load_dwordx2 s[{{[0-9]:[0-9]}}], s[0:1], 0xb
-; SI-CHECK: buffer_store_dwordx2
+; GCN: {{^}}f64_kernel_arg:
+; SI-DAG: s_load_dwordx2 s[{{[0-9]:[0-9]}}], s[0:1], 0x9
+; SI-DAG: s_load_dwordx2 s[{{[0-9]:[0-9]}}], s[0:1], 0xb
+; VI-DAG: s_load_dwordx2 s[{{[0-9]:[0-9]}}], s[0:1], 0x24
+; VI-DAG: s_load_dwordx2 s[{{[0-9]:[0-9]}}], s[0:1], 0x2c
+; GCN: buffer_store_dwordx2
define void @f64_kernel_arg(double addrspace(1)* %out, double %in) {
entry:
store double %in, double addrspace(1)* %out
diff --git a/test/CodeGen/R600/add-debug.ll b/test/CodeGen/R600/add-debug.ll
index 166e0f6..a83c689 100644
--- a/test/CodeGen/R600/add-debug.ll
+++ b/test/CodeGen/R600/add-debug.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=tahiti -debug
+; RUN: llc < %s -march=amdgcn -mcpu=tahiti -debug
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -debug
; REQUIRES: asserts
; Check that SelectionDAGDumper does not crash on int_SI_if.
diff --git a/test/CodeGen/R600/add.ll b/test/CodeGen/R600/add.ll
index 767a642..3a8b97c 100644
--- a/test/CodeGen/R600/add.ll
+++ b/test/CodeGen/R600/add.ll
@@ -1,12 +1,13 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK --check-prefix=FUNC %s
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG --check-prefix=FUNC %s
+; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
;FUNC-LABEL: {{^}}test1:
-;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: v_add_i32_e32 [[REG:v[0-9]+]], {{v[0-9]+, v[0-9]+}}
-;SI-CHECK-NOT: [[REG]]
-;SI-CHECK: buffer_store_dword [[REG]],
+;SI: v_add_i32_e32 [[REG:v[0-9]+]], {{v[0-9]+, v[0-9]+}}
+;SI-NOT: [[REG]]
+;SI: buffer_store_dword [[REG]],
define void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
%a = load i32 addrspace(1)* %in
@@ -17,11 +18,11 @@ define void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
}
;FUNC-LABEL: {{^}}test2:
-;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
@@ -33,15 +34,15 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
}
;FUNC-LABEL: {{^}}test4:
-;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
@@ -53,22 +54,22 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
}
; FUNC-LABEL: {{^}}test8:
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
define void @test8(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) {
entry:
%0 = add <8 x i32> %a, %b
@@ -77,38 +78,38 @@ entry:
}
; FUNC-LABEL: {{^}}test16:
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; EG-CHECK: ADD_INT
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
-; SI-CHECK: s_add_i32
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; EG: ADD_INT
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
+; SI: s_add_i32
define void @test16(<16 x i32> addrspace(1)* %out, <16 x i32> %a, <16 x i32> %b) {
entry:
%0 = add <16 x i32> %a, %b
@@ -117,8 +118,8 @@ entry:
}
; FUNC-LABEL: {{^}}add64:
-; SI-CHECK: s_add_u32
-; SI-CHECK: s_addc_u32
+; SI: s_add_u32
+; SI: s_addc_u32
define void @add64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
%0 = add i64 %a, %b
@@ -132,7 +133,7 @@ entry:
; to a VGPR before doing the add.
; FUNC-LABEL: {{^}}add64_sgpr_vgpr:
-; SI-CHECK-NOT: v_addc_u32_e32 s
+; SI-NOT: v_addc_u32_e32 s
define void @add64_sgpr_vgpr(i64 addrspace(1)* %out, i64 %a, i64 addrspace(1)* %in) {
entry:
%0 = load i64 addrspace(1)* %in
@@ -143,8 +144,8 @@ entry:
; Test i64 add inside a branch.
; FUNC-LABEL: {{^}}add64_in_branch:
-; SI-CHECK: s_add_u32
-; SI-CHECK: s_addc_u32
+; SI: s_add_u32
+; SI: s_addc_u32
define void @add64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
entry:
%0 = icmp eq i64 %a, 0
diff --git a/test/CodeGen/R600/add_i64.ll b/test/CodeGen/R600/add_i64.ll
index 47ecf6d..1769409 100644
--- a/test/CodeGen/R600/add_i64.ll
+++ b/test/CodeGen/R600/add_i64.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
declare i32 @llvm.r600.read.tidig.x() readnone
diff --git a/test/CodeGen/R600/address-space.ll b/test/CodeGen/R600/address-space.ll
index d04afe6..74ea9f0 100644
--- a/test/CodeGen/R600/address-space.ll
+++ b/test/CodeGen/R600/address-space.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
; Test that codegenprepare understands address space sizes
@@ -9,9 +10,10 @@
; CHECK-LABEL: {{^}}do_as_ptr_calcs:
; CHECK: s_load_dword [[SREG1:s[0-9]+]],
+; CHECK: v_mov_b32_e32 [[VREG2:v[0-9]+]], [[SREG1]]
; CHECK: v_mov_b32_e32 [[VREG1:v[0-9]+]], [[SREG1]]
; CHECK-DAG: ds_read_b32 v{{[0-9]+}}, [[VREG1]] offset:12
-; CHECK-DAG: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:20
+; CHECK-DAG: ds_read_b32 v{{[0-9]+}}, [[VREG2]] offset:20
define void @do_as_ptr_calcs(%struct.foo addrspace(3)* nocapture %ptr) nounwind {
entry:
%x = getelementptr inbounds %struct.foo addrspace(3)* %ptr, i32 0, i32 1, i32 0
diff --git a/test/CodeGen/R600/and.ll b/test/CodeGen/R600/and.ll
index 9a76fce..bb7cba3 100644
--- a/test/CodeGen/R600/and.ll
+++ b/test/CodeGen/R600/and.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}test2:
; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -63,8 +64,8 @@ define void @v_and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addr
ret void
}
-; FUNC-LABEL: {{^}}v_and_constant_i32:
-; SI: v_and_b32
+; FUNC-LABEL: {{^}}v_and_constant_i32
+; SI: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, v{{[0-9]+}}
define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
%a = load i32 addrspace(1)* %aptr, align 4
%and = and i32 %a, 1234567
@@ -72,7 +73,25 @@ define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr)
ret void
}
-; FUNC-LABEL: {{^}}s_and_i64:
+; FUNC-LABEL: {{^}}v_and_inline_imm_64_i32
+; SI: v_and_b32_e32 v{{[0-9]+}}, 64, v{{[0-9]+}}
+define void @v_and_inline_imm_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %and = and i32 %a, 64
+ store i32 %and, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_and_inline_imm_neg_16_i32
+; SI: v_and_b32_e32 v{{[0-9]+}}, -16, v{{[0-9]+}}
+define void @v_and_inline_imm_neg_16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
+ %a = load i32 addrspace(1)* %aptr, align 4
+ %and = and i32 %a, -16
+ store i32 %and, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_and_i64
; SI: s_and_b64
define void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%and = and i64 %a, %b
@@ -89,8 +108,8 @@ define void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) {
ret void
}
-; FUNC-LABEL: {{^}}s_and_constant_i64:
-; SI: s_and_b64
+; FUNC-LABEL: {{^}}s_and_constant_i64
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
define void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
%and = and i64 %a, 281474976710655
store i64 %and, i64 addrspace(1)* %out, align 8
@@ -149,10 +168,129 @@ define void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %apt
ret void
}
-; FUNC-LABEL: {{^}}s_and_inline_imm_i64:
+; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64
; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 64
-define void @s_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+define void @s_and_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
%and = and i64 %a, 64
store i64 %and, i64 addrspace(1)* %out, align 8
ret void
}
+
+; FUNC-LABEL: {{^}}s_and_inline_imm_1_i64
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1
+define void @s_and_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %and = and i64 %a, 1
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_and_inline_imm_1.0_i64
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1.0
+define void @s_and_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %and = and i64 %a, 4607182418800017408
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_and_inline_imm_neg_1.0_i64
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -1.0
+define void @s_and_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %and = and i64 %a, 13830554455654793216
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_and_inline_imm_0.5_i64
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0.5
+define void @s_and_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %and = and i64 %a, 4602678819172646912
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_and_inline_imm_neg_0.5_i64
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -0.5
+define void @s_and_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %and = and i64 %a, 13826050856027422720
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_and_inline_imm_2.0_i64
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 2.0
+define void @s_and_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %and = and i64 %a, 4611686018427387904
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_and_inline_imm_neg_2.0_i64
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -2.0
+define void @s_and_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %and = and i64 %a, 13835058055282163712
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_and_inline_imm_4.0_i64
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 4.0
+define void @s_and_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %and = and i64 %a, 4616189618054758400
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_and_inline_imm_neg_4.0_i64
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -4.0
+define void @s_and_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %and = and i64 %a, 13839561654909534208
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+
+; Test with the 64-bit integer bitpattern for a 32-bit float in the
+; low 32-bits, which is not a valid 64-bit inline immmediate.
+
+; FUNC-LABEL: {{^}}s_and_inline_imm_f32_4.0_i64
+; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 4.0
+; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0{{$}}
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
+define void @s_and_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %and = and i64 %a, 1082130432
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FIXME: Copy of -1 register
+; FUNC-LABEL: {{^}}s_and_inline_imm_f32_neg_4.0_i64
+; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], -4.0
+; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -1{{$}}
+; SI-DAG: s_mov_b32 s[[K_HI_COPY:[0-9]+]], s[[K_HI]]
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI_COPY]]{{\]}}
+define void @s_and_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %and = and i64 %a, -1065353216
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; Shift into upper 32-bits
+; FUNC-LABEL: {{^}}s_and_inline_high_imm_f32_4.0_i64
+; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 4.0
+; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}}
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
+define void @s_and_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %and = and i64 %a, 4647714815446351872
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_and_inline_high_imm_f32_neg_4.0_i64
+; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -4.0
+; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}}
+; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
+define void @s_and_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
+ %and = and i64 %a, 13871086852301127680
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/anyext.ll b/test/CodeGen/R600/anyext.ll
index 23fdcbb..48d8f31 100644
--- a/test/CodeGen/R600/anyext.ll
+++ b/test/CodeGen/R600/anyext.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
; CHECK-LABEL: {{^}}anyext_i1_i32:
; CHECK: v_cndmask_b32_e64
diff --git a/test/CodeGen/R600/array-ptr-calc-i32.ll b/test/CodeGen/R600/array-ptr-calc-i32.ll
index 84d3540..33a8aee 100644
--- a/test/CodeGen/R600/array-ptr-calc-i32.ll
+++ b/test/CodeGen/R600/array-ptr-calc-i32.ll
@@ -1,5 +1,5 @@
-; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI -mattr=-promote-alloca < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s
-; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI -mattr=+promote-alloca < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=SI -mattr=-promote-alloca < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=SI -mattr=+promote-alloca < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s
declare i32 @llvm.SI.tid() nounwind readnone
declare void @llvm.AMDGPU.barrier.local() nounwind noduplicate
diff --git a/test/CodeGen/R600/array-ptr-calc-i64.ll b/test/CodeGen/R600/array-ptr-calc-i64.ll
index 75f6394..32e657d 100644
--- a/test/CodeGen/R600/array-ptr-calc-i64.ll
+++ b/test/CodeGen/R600/array-ptr-calc-i64.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare i32 @llvm.SI.tid() readnone
diff --git a/test/CodeGen/R600/atomic_cmp_swap_local.ll b/test/CodeGen/R600/atomic_cmp_swap_local.ll
index 223f4d3..6c76ad7 100644
--- a/test/CodeGen/R600/atomic_cmp_swap_local.ll
+++ b/test/CodeGen/R600/atomic_cmp_swap_local.ll
@@ -1,14 +1,17 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=SICI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=SICI -check-prefix=CIVI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=CIVI -check-prefix=GCN -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_ret_i32_offset:
-; SI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
-; SI: s_load_dword [[SWAP:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
-; SI-DAG: v_mov_b32_e32 [[VCMP:v[0-9]+]], 7
-; SI-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
-; SI-DAG: v_mov_b32_e32 [[VSWAP:v[0-9]+]], [[SWAP]]
-; SI: ds_cmpst_rtn_b32 [[RESULT:v[0-9]+]], [[VPTR]], [[VCMP]], [[VSWAP]] offset:16 [M0]
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 [[VCMP:v[0-9]+]], 7
+; SICI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SICI: s_load_dword [[SWAP:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; VI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI: s_load_dword [[SWAP:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30
+; GCN-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; GCN-DAG: v_mov_b32_e32 [[VSWAP:v[0-9]+]], [[SWAP]]
+; GCN: ds_cmpst_rtn_b32 [[RESULT:v[0-9]+]], [[VPTR]], [[VCMP]], [[VSWAP]] offset:16
+; GCN: s_endpgm
define void @lds_atomic_cmpxchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %swap) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%pair = cmpxchg i32 addrspace(3)* %gep, i32 7, i32 %swap seq_cst monotonic
@@ -18,17 +21,18 @@ define void @lds_atomic_cmpxchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrs
}
; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_ret_i64_offset:
-; SI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
-; SI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
-; SI: s_mov_b64 s{{\[}}[[LOSCMP:[0-9]+]]:[[HISCMP:[0-9]+]]{{\]}}, 7
-; SI-DAG: v_mov_b32_e32 v[[LOVCMP:[0-9]+]], s[[LOSCMP]]
-; SI-DAG: v_mov_b32_e32 v[[HIVCMP:[0-9]+]], s[[HISCMP]]
-; SI-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
-; SI-DAG: v_mov_b32_e32 v[[LOSWAPV:[0-9]+]], s[[LOSWAP]]
-; SI-DAG: v_mov_b32_e32 v[[HISWAPV:[0-9]+]], s[[HISWAP]]
-; SI: ds_cmpst_rtn_b64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVCMP]]:[[HIVCMP]]{{\]}}, v{{\[}}[[LOSWAPV]]:[[HISWAPV]]{{\]}} offset:32 [M0]
-; SI: buffer_store_dwordx2 [[RESULT]],
-; SI: s_endpgm
+; GCN-DAG: v_mov_b32_e32 v[[LOVCMP:[0-9]+]], 7
+; GCN-DAG: v_mov_b32_e32 v[[HIVCMP:[0-9]+]], 0
+; SICI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SICI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; VI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34
+; GCN-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; GCN-DAG: v_mov_b32_e32 v[[LOSWAPV:[0-9]+]], s[[LOSWAP]]
+; GCN-DAG: v_mov_b32_e32 v[[HISWAPV:[0-9]+]], s[[HISWAP]]
+; GCN: ds_cmpst_rtn_b64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVCMP]]:[[HIVCMP]]{{\]}}, v{{\[}}[[LOSWAPV]]:[[HISWAPV]]{{\]}} offset:32
+; GCN: buffer_store_dwordx2 [[RESULT]],
+; GCN: s_endpgm
define void @lds_atomic_cmpxchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr, i64 %swap) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%pair = cmpxchg i64 addrspace(3)* %gep, i64 7, i64 %swap seq_cst monotonic
@@ -39,8 +43,8 @@ define void @lds_atomic_cmpxchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrs
; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_ret_i32_bad_si_offset
; SI: ds_cmpst_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; CI: ds_cmpst_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16 [M0]
-; SI: s_endpgm
+; CIVI: ds_cmpst_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_cmpxchg_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %swap, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
@@ -52,13 +56,15 @@ define void @lds_atomic_cmpxchg_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i3
}
; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_noret_i32_offset:
-; SI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
-; SI: s_load_dword [[SWAP:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xa
-; SI-DAG: v_mov_b32_e32 [[VCMP:v[0-9]+]], 7
-; SI-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
-; SI-DAG: v_mov_b32_e32 [[VSWAP:v[0-9]+]], [[SWAP]]
-; SI: ds_cmpst_b32 [[VPTR]], [[VCMP]], [[VSWAP]] offset:16 [M0]
-; SI: s_endpgm
+; SICI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
+; SICI: s_load_dword [[SWAP:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xa
+; VI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x24
+; VI: s_load_dword [[SWAP:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x28
+; GCN-DAG: v_mov_b32_e32 [[VCMP:v[0-9]+]], 7
+; GCN-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; GCN-DAG: v_mov_b32_e32 [[VSWAP:v[0-9]+]], [[SWAP]]
+; GCN: ds_cmpst_b32 [[VPTR]], [[VCMP]], [[VSWAP]] offset:16
+; GCN: s_endpgm
define void @lds_atomic_cmpxchg_noret_i32_offset(i32 addrspace(3)* %ptr, i32 %swap) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%pair = cmpxchg i32 addrspace(3)* %gep, i32 7, i32 %swap seq_cst monotonic
@@ -67,16 +73,17 @@ define void @lds_atomic_cmpxchg_noret_i32_offset(i32 addrspace(3)* %ptr, i32 %sw
}
; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_noret_i64_offset:
-; SI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
-; SI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
-; SI: s_mov_b64 s{{\[}}[[LOSCMP:[0-9]+]]:[[HISCMP:[0-9]+]]{{\]}}, 7
-; SI-DAG: v_mov_b32_e32 v[[LOVCMP:[0-9]+]], s[[LOSCMP]]
-; SI-DAG: v_mov_b32_e32 v[[HIVCMP:[0-9]+]], s[[HISCMP]]
-; SI-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
-; SI-DAG: v_mov_b32_e32 v[[LOSWAPV:[0-9]+]], s[[LOSWAP]]
-; SI-DAG: v_mov_b32_e32 v[[HISWAPV:[0-9]+]], s[[HISWAP]]
-; SI: ds_cmpst_b64 [[VPTR]], v{{\[}}[[LOVCMP]]:[[HIVCMP]]{{\]}}, v{{\[}}[[LOSWAPV]]:[[HISWAPV]]{{\]}} offset:32 [M0]
-; SI: s_endpgm
+; SICI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
+; SICI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x24
+; VI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; GCN-DAG: v_mov_b32_e32 v[[LOVCMP:[0-9]+]], 7
+; GCN-DAG: v_mov_b32_e32 v[[HIVCMP:[0-9]+]], 0
+; GCN-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; GCN-DAG: v_mov_b32_e32 v[[LOSWAPV:[0-9]+]], s[[LOSWAP]]
+; GCN-DAG: v_mov_b32_e32 v[[HISWAPV:[0-9]+]], s[[HISWAP]]
+; GCN: ds_cmpst_b64 [[VPTR]], v{{\[}}[[LOVCMP]]:[[HIVCMP]]{{\]}}, v{{\[}}[[LOSWAPV]]:[[HISWAPV]]{{\]}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_cmpxchg_noret_i64_offset(i64 addrspace(3)* %ptr, i64 %swap) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%pair = cmpxchg i64 addrspace(3)* %gep, i64 7, i64 %swap seq_cst monotonic
diff --git a/test/CodeGen/R600/atomic_load_add.ll b/test/CodeGen/R600/atomic_load_add.ll
index f0eff21..5fe05f2 100644
--- a/test/CodeGen/R600/atomic_load_add.ll
+++ b/test/CodeGen/R600/atomic_load_add.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}atomic_add_local:
diff --git a/test/CodeGen/R600/atomic_load_sub.ll b/test/CodeGen/R600/atomic_load_sub.ll
index 61ff296..4072283 100644
--- a/test/CodeGen/R600/atomic_load_sub.ll
+++ b/test/CodeGen/R600/atomic_load_sub.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}atomic_sub_local:
; R600: LDS_SUB *
diff --git a/test/CodeGen/R600/basic-branch.ll b/test/CodeGen/R600/basic-branch.ll
index 073ab79..abdc4af 100644
--- a/test/CodeGen/R600/basic-branch.ll
+++ b/test/CodeGen/R600/basic-branch.ll
@@ -1,5 +1,6 @@
; XFAIL: *
-; RUN: llc -O0 -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -O0 -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -O0 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
; CHECK-LABEL: {{^}}test_branch(
define void @test_branch(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %val) nounwind {
diff --git a/test/CodeGen/R600/basic-loop.ll b/test/CodeGen/R600/basic-loop.ll
index 3cd609135..f0263ca 100644
--- a/test/CodeGen/R600/basic-loop.ll
+++ b/test/CodeGen/R600/basic-loop.ll
@@ -1,5 +1,5 @@
-; XFAIL: *
-; RUN: llc -O0 -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck %s
+; RUN: llc -O0 -verify-machineinstrs -march=amdgcn -mcpu=SI < %s | FileCheck %s
+; RUN: llc -O0 -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck %s
; CHECK-LABEL: {{^}}test_loop:
define void @test_loop(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %val) nounwind {
diff --git a/test/CodeGen/R600/bfi_int.ll b/test/CodeGen/R600/bfi_int.ll
index 2a0bb37..0334934 100644
--- a/test/CodeGen/R600/bfi_int.ll
+++ b/test/CodeGen/R600/bfi_int.ll
@@ -1,13 +1,14 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600 %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=SI %s
; BFI_INT Definition pattern from ISA docs
; (y & x) | (z & ~x)
;
-; R600-CHECK: {{^}}bfi_def:
-; R600-CHECK: BFI_INT
-; SI-CHECK: @bfi_def
-; SI-CHECK: v_bfi_b32
+; R600: {{^}}bfi_def:
+; R600: BFI_INT
+; SI: @bfi_def
+; SI: v_bfi_b32
define void @bfi_def(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
entry:
%0 = xor i32 %x, -1
@@ -20,10 +21,10 @@ entry:
; SHA-256 Ch function
; z ^ (x & (y ^ z))
-; R600-CHECK: {{^}}bfi_sha256_ch:
-; R600-CHECK: BFI_INT
-; SI-CHECK: @bfi_sha256_ch
-; SI-CHECK: v_bfi_b32
+; R600: {{^}}bfi_sha256_ch:
+; R600: BFI_INT
+; SI: @bfi_sha256_ch
+; SI: v_bfi_b32
define void @bfi_sha256_ch(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
entry:
%0 = xor i32 %y, %z
@@ -35,11 +36,11 @@ entry:
; SHA-256 Ma function
; ((x & z) | (y & (x | z)))
-; R600-CHECK: {{^}}bfi_sha256_ma:
-; R600-CHECK: XOR_INT * [[DST:T[0-9]+\.[XYZW]]], KC0[2].Z, KC0[2].W
-; R600-CHECK: BFI_INT * {{T[0-9]+\.[XYZW]}}, {{[[DST]]|PV\.[XYZW]}}, KC0[3].X, KC0[2].W
-; SI-CHECK: v_xor_b32_e32 [[DST:v[0-9]+]], {{s[0-9]+, v[0-9]+}}
-; SI-CHECK: v_bfi_b32 {{v[0-9]+}}, [[DST]], {{s[0-9]+, v[0-9]+}}
+; R600: {{^}}bfi_sha256_ma:
+; R600: XOR_INT * [[DST:T[0-9]+\.[XYZW]]], KC0[2].Z, KC0[2].W
+; R600: BFI_INT * {{T[0-9]+\.[XYZW]}}, {{[[DST]]|PV\.[XYZW]}}, KC0[3].X, KC0[2].W
+; SI: v_xor_b32_e32 [[DST:v[0-9]+]], {{s[0-9]+, v[0-9]+}}
+; SI: v_bfi_b32 {{v[0-9]+}}, [[DST]], {{s[0-9]+, v[0-9]+}}
define void @bfi_sha256_ma(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
entry:
diff --git a/test/CodeGen/R600/bitcast.ll b/test/CodeGen/R600/bitcast.ll
index 725d5ba..1ba64af 100644
--- a/test/CodeGen/R600/bitcast.ll
+++ b/test/CodeGen/R600/bitcast.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; This test just checks that the compiler doesn't crash.
diff --git a/test/CodeGen/R600/bswap.ll b/test/CodeGen/R600/bswap.ll
index 1c5a0c6..e93543d 100644
--- a/test/CodeGen/R600/bswap.ll
+++ b/test/CodeGen/R600/bswap.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare i32 @llvm.bswap.i32(i32) nounwind readnone
declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>) nounwind readnone
diff --git a/test/CodeGen/R600/build_vector.ll b/test/CodeGen/R600/build_vector.ll
index 9137eee..65eacf5 100644
--- a/test/CodeGen/R600/build_vector.ll
+++ b/test/CodeGen/R600/build_vector.ll
@@ -1,32 +1,33 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=SI
-; R600-CHECK: {{^}}build_vector2:
-; R600-CHECK: MOV
-; R600-CHECK: MOV
-; R600-CHECK-NOT: MOV
-; SI-CHECK: {{^}}build_vector2:
-; SI-CHECK-DAG: v_mov_b32_e32 v[[X:[0-9]]], 5
-; SI-CHECK-DAG: v_mov_b32_e32 v[[Y:[0-9]]], 6
-; SI-CHECK: buffer_store_dwordx2 v{{\[}}[[X]]:[[Y]]{{\]}}
+; R600: {{^}}build_vector2:
+; R600: MOV
+; R600: MOV
+; R600-NOT: MOV
+; SI: {{^}}build_vector2:
+; SI-DAG: v_mov_b32_e32 v[[X:[0-9]]], 5
+; SI-DAG: v_mov_b32_e32 v[[Y:[0-9]]], 6
+; SI: buffer_store_dwordx2 v{{\[}}[[X]]:[[Y]]{{\]}}
define void @build_vector2 (<2 x i32> addrspace(1)* %out) {
entry:
store <2 x i32> <i32 5, i32 6>, <2 x i32> addrspace(1)* %out
ret void
}
-; R600-CHECK: {{^}}build_vector4:
-; R600-CHECK: MOV
-; R600-CHECK: MOV
-; R600-CHECK: MOV
-; R600-CHECK: MOV
-; R600-CHECK-NOT: MOV
-; SI-CHECK: {{^}}build_vector4:
-; SI-CHECK-DAG: v_mov_b32_e32 v[[X:[0-9]]], 5
-; SI-CHECK-DAG: v_mov_b32_e32 v[[Y:[0-9]]], 6
-; SI-CHECK-DAG: v_mov_b32_e32 v[[Z:[0-9]]], 7
-; SI-CHECK-DAG: v_mov_b32_e32 v[[W:[0-9]]], 8
-; SI-CHECK: buffer_store_dwordx4 v{{\[}}[[X]]:[[W]]{{\]}}
+; R600: {{^}}build_vector4:
+; R600: MOV
+; R600: MOV
+; R600: MOV
+; R600: MOV
+; R600-NOT: MOV
+; SI: {{^}}build_vector4:
+; SI-DAG: v_mov_b32_e32 v[[X:[0-9]]], 5
+; SI-DAG: v_mov_b32_e32 v[[Y:[0-9]]], 6
+; SI-DAG: v_mov_b32_e32 v[[Z:[0-9]]], 7
+; SI-DAG: v_mov_b32_e32 v[[W:[0-9]]], 8
+; SI: buffer_store_dwordx4 v{{\[}}[[X]]:[[W]]{{\]}}
define void @build_vector4 (<4 x i32> addrspace(1)* %out) {
entry:
store <4 x i32> <i32 5, i32 6, i32 7, i32 8>, <4 x i32> addrspace(1)* %out
diff --git a/test/CodeGen/R600/call.ll b/test/CodeGen/R600/call.ll
index 1448f04..6de51f1 100644
--- a/test/CodeGen/R600/call.ll
+++ b/test/CodeGen/R600/call.ll
@@ -1,4 +1,5 @@
-; RUN: not llc -march=r600 -mcpu=SI -verify-machineinstrs< %s 2>&1 | FileCheck %s
+; RUN: not llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s 2>&1 | FileCheck %s
+; RUN: not llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s 2>&1 | FileCheck %s
; RUN: not llc -march=r600 -mcpu=cypress < %s 2>&1 | FileCheck %s
; CHECK: error: unsupported call to function external_function in test_call_external
diff --git a/test/CodeGen/R600/call_fs.ll b/test/CodeGen/R600/call_fs.ll
index 7df2240..db2cb6e 100644
--- a/test/CodeGen/R600/call_fs.ll
+++ b/test/CodeGen/R600/call_fs.ll
@@ -1,13 +1,13 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood -show-mc-encoding -o - | FileCheck --check-prefix=EG-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=rv710 -show-mc-encoding -o - | FileCheck --check-prefix=R600-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood -show-mc-encoding -o - | FileCheck --check-prefix=EG %s
+; RUN: llc < %s -march=r600 -mcpu=rv710 -show-mc-encoding -o - | FileCheck --check-prefix=R600 %s
-; EG-CHECK: {{^}}call_fs:
-; EG-CHECK: .long 257
-; EG-CHECK: CALL_FS ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0x84]
-; R600-CHECK: {{^}}call_fs:
-; R600-CHECK: .long 257
-; R600-CHECK:CALL_FS ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x89]
+; EG: {{^}}call_fs:
+; EG: .long 257
+; EG: CALL_FS ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0x84]
+; R600: {{^}}call_fs:
+; R600: .long 257
+; R600:CALL_FS ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x89]
define void @call_fs() #0 {
diff --git a/test/CodeGen/R600/cf_end.ll b/test/CodeGen/R600/cf_end.ll
index 138004d..c74ee22 100644
--- a/test/CodeGen/R600/cf_end.ll
+++ b/test/CodeGen/R600/cf_end.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood --show-mc-encoding | FileCheck --check-prefix=EG-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=caicos --show-mc-encoding | FileCheck --check-prefix=EG-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=cayman --show-mc-encoding | FileCheck --check-prefix=CM-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood --show-mc-encoding | FileCheck --check-prefix=EG %s
+; RUN: llc < %s -march=r600 -mcpu=caicos --show-mc-encoding | FileCheck --check-prefix=EG %s
+; RUN: llc < %s -march=r600 -mcpu=cayman --show-mc-encoding | FileCheck --check-prefix=CM %s
-; EG-CHECK: CF_END ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x80]
-; CM-CHECK: CF_END ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x88]
+; EG: CF_END ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x80]
+; CM: CF_END ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x88]
define void @eop() {
ret void
}
diff --git a/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll b/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
index b42b904..e16a397 100644
--- a/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
+++ b/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
@@ -1,5 +1,5 @@
; RUN: opt -codegenprepare -S -o - %s | FileCheck --check-prefix=OPT %s
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-LLC %s
+; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-LLC %s
target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
target triple = "r600--"
diff --git a/test/CodeGen/R600/commute_modifiers.ll b/test/CodeGen/R600/commute_modifiers.ll
index 30c8067..6fddb6d 100644
--- a/test/CodeGen/R600/commute_modifiers.ll
+++ b/test/CodeGen/R600/commute_modifiers.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() #1
declare float @llvm.fabs.f32(float) #1
@@ -65,7 +65,7 @@ define void @commute_add_lit_fabs_f32(float addrspace(1)* %out, float addrspace(
; FUNC-LABEL: @commute_add_fabs_f32
; SI-DAG: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_add_f32_e64 [[REG:v[0-9]+]], [[X]], |[[Y]]|
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_add_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
@@ -82,7 +82,7 @@ define void @commute_add_fabs_f32(float addrspace(1)* %out, float addrspace(1)*
; FUNC-LABEL: @commute_mul_fneg_f32
; SI-DAG: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_mul_f32_e64 [[REG:v[0-9]+]], [[X]], -[[Y]]
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_mul_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
@@ -99,7 +99,7 @@ define void @commute_mul_fneg_f32(float addrspace(1)* %out, float addrspace(1)*
; FUNC-LABEL: @commute_mul_fabs_fneg_f32
; SI-DAG: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_mul_f32_e64 [[REG:v[0-9]+]], [[X]], -|[[Y]]|
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_mul_fabs_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
@@ -118,7 +118,7 @@ define void @commute_mul_fabs_fneg_f32(float addrspace(1)* %out, float addrspace
; There's no reason to commute this.
; FUNC-LABEL: @commute_mul_fabs_x_fabs_y_f32
; SI-DAG: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_mul_f32_e64 [[REG:v[0-9]+]], |[[X]]|, |[[Y]]|
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_mul_fabs_x_fabs_y_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
@@ -136,7 +136,7 @@ define void @commute_mul_fabs_x_fabs_y_f32(float addrspace(1)* %out, float addrs
; FUNC-LABEL: @commute_mul_fabs_x_fneg_fabs_y_f32
; SI-DAG: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_mul_f32_e64 [[REG:v[0-9]+]], |[[X]]|, -|[[Y]]|
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_mul_fabs_x_fneg_fabs_y_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
@@ -158,7 +158,7 @@ define void @commute_mul_fabs_x_fneg_fabs_y_f32(float addrspace(1)* %out, float
; SI-LABEL: {{^}}fma_a_2.0_neg_b_f32
; SI-DAG: buffer_load_dword [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_fma_f32 [[RESULT:v[0-9]+]], 2.0, [[R1]], |[[R2]]|
; SI: buffer_store_dword [[RESULT]]
define void @fma_a_2.0_neg_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
diff --git a/test/CodeGen/R600/concat_vectors.ll b/test/CodeGen/R600/concat_vectors.ll
index 19992eb..6b3fae3 100644
--- a/test/CodeGen/R600/concat_vectors.ll
+++ b/test/CodeGen/R600/concat_vectors.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}test_concat_v1i32:
; 0x80f000 is the high 32 bits of the resource descriptor used by MUBUF
diff --git a/test/CodeGen/R600/copy-illegal-type.ll b/test/CodeGen/R600/copy-illegal-type.ll
index 66ea88e..56c43d2 100644
--- a/test/CodeGen/R600/copy-illegal-type.ll
+++ b/test/CodeGen/R600/copy-illegal-type.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=tahiti < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}test_copy_v4i8:
; SI: buffer_load_dword [[REG:v[0-9]+]]
diff --git a/test/CodeGen/R600/copy-to-reg.ll b/test/CodeGen/R600/copy-to-reg.ll
index f90ee78..9c1de73 100644
--- a/test/CodeGen/R600/copy-to-reg.ll
+++ b/test/CodeGen/R600/copy-to-reg.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -mattr=-promote-alloca -verify-machineinstrs < %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=-promote-alloca -verify-machineinstrs < %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-promote-alloca -verify-machineinstrs < %s
; Test that CopyToReg instructions don't have non-register operands prior
; to being emitted.
diff --git a/test/CodeGen/R600/ctlz_zero_undef.ll b/test/CodeGen/R600/ctlz_zero_undef.ll
index f699127..1a4317b 100644
--- a/test/CodeGen/R600/ctlz_zero_undef.ll
+++ b/test/CodeGen/R600/ctlz_zero_undef.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone
diff --git a/test/CodeGen/R600/ctpop.ll b/test/CodeGen/R600/ctpop.ll
index 5cfdaef..6f7d92b 100644
--- a/test/CodeGen/R600/ctpop.ll
+++ b/test/CodeGen/R600/ctpop.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC -check-prefix=VI %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.ctpop.i32(i32) nounwind readnone
@@ -8,11 +9,11 @@ declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>) nounwind readnone
declare <16 x i32> @llvm.ctpop.v16i32(<16 x i32>) nounwind readnone
; FUNC-LABEL: {{^}}s_ctpop_i32:
-; SI: s_load_dword [[SVAL:s[0-9]+]],
-; SI: s_bcnt1_i32_b32 [[SRESULT:s[0-9]+]], [[SVAL]]
-; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
-; SI: buffer_store_dword [[VRESULT]],
-; SI: s_endpgm
+; GCN: s_load_dword [[SVAL:s[0-9]+]],
+; GCN: s_bcnt1_i32_b32 [[SRESULT:s[0-9]+]], [[SVAL]]
+; GCN: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; GCN: buffer_store_dword [[VRESULT]],
+; GCN: s_endpgm
; EG: BCNT_INT
define void @s_ctpop_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
@@ -23,11 +24,10 @@ define void @s_ctpop_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
; XXX - Why 0 in register?
; FUNC-LABEL: {{^}}v_ctpop_i32:
-; SI: buffer_load_dword [[VAL:v[0-9]+]],
-; SI: v_mov_b32_e32 [[VZERO:v[0-9]+]], 0
-; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[VZERO]]
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], 0
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
; EG: BCNT_INT
define void @v_ctpop_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
@@ -38,13 +38,13 @@ define void @v_ctpop_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noali
}
; FUNC-LABEL: {{^}}v_ctpop_add_chain_i32:
-; SI: buffer_load_dword [[VAL0:v[0-9]+]],
-; SI: buffer_load_dword [[VAL1:v[0-9]+]],
-; SI: v_mov_b32_e32 [[VZERO:v[0-9]+]], 0
-; SI: v_bcnt_u32_b32_e32 [[MIDRESULT:v[0-9]+]], [[VAL1]], [[VZERO]]
-; SI-NEXT: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAL0]], [[MIDRESULT]]
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; GCN: buffer_load_dword [[VAL1:v[0-9]+]],
+; GCN: buffer_load_dword [[VAL0:v[0-9]+]],
+; GCN: v_bcnt_u32_b32_e64 [[MIDRESULT:v[0-9]+]], [[VAL1]], 0
+; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAL0]], [[MIDRESULT]]
+; VI: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL0]], [[MIDRESULT]]
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
; EG: BCNT_INT
; EG: BCNT_INT
@@ -59,11 +59,11 @@ define void @v_ctpop_add_chain_i32(i32 addrspace(1)* noalias %out, i32 addrspace
}
; FUNC-LABEL: {{^}}v_ctpop_add_sgpr_i32:
-; SI: buffer_load_dword [[VAL0:v[0-9]+]],
-; SI-NEXT: s_waitcnt
-; SI-NEXT: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL0]], s{{[0-9]+}}
-; SI-NEXT: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; GCN: buffer_load_dword [[VAL0:v[0-9]+]],
+; GCN-NEXT: s_waitcnt
+; GCN-NEXT: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL0]], s{{[0-9]+}}
+; GCN-NEXT: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
define void @v_ctpop_add_sgpr_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in0, i32 addrspace(1)* noalias %in1, i32 %sval) nounwind {
%val0 = load i32 addrspace(1)* %in0, align 4
%ctpop0 = call i32 @llvm.ctpop.i32(i32 %val0) nounwind readnone
@@ -73,9 +73,9 @@ define void @v_ctpop_add_sgpr_i32(i32 addrspace(1)* noalias %out, i32 addrspace(
}
; FUNC-LABEL: {{^}}v_ctpop_v2i32:
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: s_endpgm
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: s_endpgm
; EG: BCNT_INT
; EG: BCNT_INT
@@ -87,11 +87,11 @@ define void @v_ctpop_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrs
}
; FUNC-LABEL: {{^}}v_ctpop_v4i32:
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: s_endpgm
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: s_endpgm
; EG: BCNT_INT
; EG: BCNT_INT
@@ -105,15 +105,15 @@ define void @v_ctpop_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrs
}
; FUNC-LABEL: {{^}}v_ctpop_v8i32:
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: s_endpgm
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: s_endpgm
; EG: BCNT_INT
; EG: BCNT_INT
@@ -131,23 +131,23 @@ define void @v_ctpop_v8i32(<8 x i32> addrspace(1)* noalias %out, <8 x i32> addrs
}
; FUNC-LABEL: {{^}}v_ctpop_v16i32:
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: v_bcnt_u32_b32_e32
-; SI: s_endpgm
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: v_bcnt_u32_b32_e64
+; GCN: s_endpgm
; EG: BCNT_INT
; EG: BCNT_INT
@@ -173,10 +173,10 @@ define void @v_ctpop_v16i32(<16 x i32> addrspace(1)* noalias %out, <16 x i32> ad
}
; FUNC-LABEL: {{^}}v_ctpop_i32_add_inline_constant:
-; SI: buffer_load_dword [[VAL:v[0-9]+]],
-; SI: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], 4
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], 4
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
; EG: BCNT_INT
define void @v_ctpop_i32_add_inline_constant(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
@@ -188,10 +188,10 @@ define void @v_ctpop_i32_add_inline_constant(i32 addrspace(1)* noalias %out, i32
}
; FUNC-LABEL: {{^}}v_ctpop_i32_add_inline_constant_inv:
-; SI: buffer_load_dword [[VAL:v[0-9]+]],
-; SI: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], 4
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], 4
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
; EG: BCNT_INT
define void @v_ctpop_i32_add_inline_constant_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
@@ -203,11 +203,12 @@ define void @v_ctpop_i32_add_inline_constant_inv(i32 addrspace(1)* noalias %out,
}
; FUNC-LABEL: {{^}}v_ctpop_i32_add_literal:
-; SI: buffer_load_dword [[VAL:v[0-9]+]],
-; SI: v_mov_b32_e32 [[LIT:v[0-9]+]], 0x1869f
+; GCN: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN: v_mov_b32_e32 [[LIT:v[0-9]+]], 0x1869f
; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[LIT]]
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; VI: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[LIT]]
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
define void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
%val = load i32 addrspace(1)* %in, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
@@ -217,11 +218,11 @@ define void @v_ctpop_i32_add_literal(i32 addrspace(1)* noalias %out, i32 addrspa
}
; FUNC-LABEL: {{^}}v_ctpop_i32_add_var:
-; SI-DAG: buffer_load_dword [[VAL:v[0-9]+]],
-; SI-DAG: s_load_dword [[VAR:s[0-9]+]],
-; SI: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN-DAG: s_load_dword [[VAR:s[0-9]+]],
+; GCN: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
; EG: BCNT_INT
define void @v_ctpop_i32_add_var(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
@@ -233,11 +234,11 @@ define void @v_ctpop_i32_add_var(i32 addrspace(1)* noalias %out, i32 addrspace(1
}
; FUNC-LABEL: {{^}}v_ctpop_i32_add_var_inv:
-; SI-DAG: buffer_load_dword [[VAL:v[0-9]+]],
-; SI-DAG: s_load_dword [[VAR:s[0-9]+]],
-; SI: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]],
+; GCN-DAG: s_load_dword [[VAR:s[0-9]+]],
+; GCN: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
; EG: BCNT_INT
define void @v_ctpop_i32_add_var_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 %const) nounwind {
@@ -249,11 +250,12 @@ define void @v_ctpop_i32_add_var_inv(i32 addrspace(1)* noalias %out, i32 addrspa
}
; FUNC-LABEL: {{^}}v_ctpop_i32_add_vvar_inv:
-; SI-DAG: buffer_load_dword [[VAL:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], {{0$}}
-; SI-DAG: buffer_load_dword [[VAR:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0 offset:0x10
+; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], {{0$}}
+; GCN-DAG: buffer_load_dword [[VAR:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0 offset:16
; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; VI: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
; EG: BCNT_INT
define void @v_ctpop_i32_add_vvar_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 addrspace(1)* noalias %constptr) nounwind {
@@ -271,10 +273,11 @@ define void @v_ctpop_i32_add_vvar_inv(i32 addrspace(1)* noalias %out, i32 addrsp
; FUNC-LABEL: {{^}}ctpop_i32_in_br:
; SI: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xd
-; SI: s_bcnt1_i32_b32 [[SRESULT:s[0-9]+]], [[VAL]]
-; SI: v_mov_b32_e32 [[RESULT]], [[SRESULT]]
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; VI: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x34
+; GCN: s_bcnt1_i32_b32 [[SRESULT:s[0-9]+]], [[VAL]]
+; GCN: v_mov_b32_e32 [[RESULT]], [[SRESULT]]
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
; EG: BCNT_INT
define void @ctpop_i32_in_br(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %ctpop_arg, i32 %cond) {
entry:
diff --git a/test/CodeGen/R600/ctpop64.ll b/test/CodeGen/R600/ctpop64.ll
index 2efac8f..8bcd818 100644
--- a/test/CodeGen/R600/ctpop64.ll
+++ b/test/CodeGen/R600/ctpop64.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
declare i64 @llvm.ctpop.i64(i64) nounwind readnone
declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
@@ -8,10 +9,11 @@ declare <16 x i64> @llvm.ctpop.v16i64(<16 x i64>) nounwind readnone
; FUNC-LABEL: {{^}}s_ctpop_i64:
; SI: s_load_dwordx2 [[SVAL:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
-; SI: s_bcnt1_i32_b64 [[SRESULT:s[0-9]+]], [[SVAL]]
-; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
-; SI: buffer_store_dword [[VRESULT]],
-; SI: s_endpgm
+; VI: s_load_dwordx2 [[SVAL:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; GCN: s_bcnt1_i32_b64 [[SRESULT:s[0-9]+]], [[SVAL]]
+; GCN: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; GCN: buffer_store_dword [[VRESULT]],
+; GCN: s_endpgm
define void @s_ctpop_i64(i32 addrspace(1)* noalias %out, i64 %val) nounwind {
%ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
%truncctpop = trunc i64 %ctpop to i32
@@ -20,12 +22,12 @@ define void @s_ctpop_i64(i32 addrspace(1)* noalias %out, i64 %val) nounwind {
}
; FUNC-LABEL: {{^}}v_ctpop_i64:
-; SI: buffer_load_dwordx2 v{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}},
-; SI: v_mov_b32_e32 [[VZERO:v[0-9]+]], 0
-; SI: v_bcnt_u32_b32_e32 [[MIDRESULT:v[0-9]+]], v[[LOVAL]], [[VZERO]]
+; GCN: buffer_load_dwordx2 v{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}},
+; GCN: v_bcnt_u32_b32_e64 [[MIDRESULT:v[0-9]+]], v[[LOVAL]], 0
; SI-NEXT: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; VI-NEXT: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], v[[HIVAL]], [[MIDRESULT]]
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
define void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
%val = load i64 addrspace(1)* %in, align 8
%ctpop = call i64 @llvm.ctpop.i64(i64 %val) nounwind readnone
@@ -35,9 +37,9 @@ define void @v_ctpop_i64(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noali
}
; FUNC-LABEL: {{^}}s_ctpop_v2i64:
-; SI: s_bcnt1_i32_b64
-; SI: s_bcnt1_i32_b64
-; SI: s_endpgm
+; GCN: s_bcnt1_i32_b64
+; GCN: s_bcnt1_i32_b64
+; GCN: s_endpgm
define void @s_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> %val) nounwind {
%ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone
%truncctpop = trunc <2 x i64> %ctpop to <2 x i32>
@@ -46,11 +48,11 @@ define void @s_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> %val)
}
; FUNC-LABEL: {{^}}s_ctpop_v4i64:
-; SI: s_bcnt1_i32_b64
-; SI: s_bcnt1_i32_b64
-; SI: s_bcnt1_i32_b64
-; SI: s_bcnt1_i32_b64
-; SI: s_endpgm
+; GCN: s_bcnt1_i32_b64
+; GCN: s_bcnt1_i32_b64
+; GCN: s_bcnt1_i32_b64
+; GCN: s_bcnt1_i32_b64
+; GCN: s_endpgm
define void @s_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> %val) nounwind {
%ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %val) nounwind readnone
%truncctpop = trunc <4 x i64> %ctpop to <4 x i32>
@@ -59,11 +61,11 @@ define void @s_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> %val)
}
; FUNC-LABEL: {{^}}v_ctpop_v2i64:
-; SI: v_bcnt_u32_b32
-; SI: v_bcnt_u32_b32
-; SI: v_bcnt_u32_b32
-; SI: v_bcnt_u32_b32
-; SI: s_endpgm
+; GCN: v_bcnt_u32_b32
+; GCN: v_bcnt_u32_b32
+; GCN: v_bcnt_u32_b32
+; GCN: v_bcnt_u32_b32
+; GCN: s_endpgm
define void @v_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in) nounwind {
%val = load <2 x i64> addrspace(1)* %in, align 16
%ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone
@@ -73,15 +75,15 @@ define void @v_ctpop_v2i64(<2 x i32> addrspace(1)* noalias %out, <2 x i64> addrs
}
; FUNC-LABEL: {{^}}v_ctpop_v4i64:
-; SI: v_bcnt_u32_b32
-; SI: v_bcnt_u32_b32
-; SI: v_bcnt_u32_b32
-; SI: v_bcnt_u32_b32
-; SI: v_bcnt_u32_b32
-; SI: v_bcnt_u32_b32
-; SI: v_bcnt_u32_b32
-; SI: v_bcnt_u32_b32
-; SI: s_endpgm
+; GCN: v_bcnt_u32_b32
+; GCN: v_bcnt_u32_b32
+; GCN: v_bcnt_u32_b32
+; GCN: v_bcnt_u32_b32
+; GCN: v_bcnt_u32_b32
+; GCN: v_bcnt_u32_b32
+; GCN: v_bcnt_u32_b32
+; GCN: v_bcnt_u32_b32
+; GCN: s_endpgm
define void @v_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> addrspace(1)* noalias %in) nounwind {
%val = load <4 x i64> addrspace(1)* %in, align 32
%ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %val) nounwind readnone
@@ -95,11 +97,12 @@ define void @v_ctpop_v4i64(<4 x i32> addrspace(1)* noalias %out, <4 x i64> addrs
; FUNC-LABEL: {{^}}ctpop_i64_in_br:
; SI: s_load_dwordx2 s{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0xd
-; SI: s_bcnt1_i32_b64 [[RESULT:s[0-9]+]], {{s\[}}[[LOVAL]]:[[HIVAL]]{{\]}}
-; SI: v_mov_b32_e32 v[[VLO:[0-9]+]], [[RESULT]]
-; SI: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[HIVAL]]
-; SI: buffer_store_dwordx2 {{v\[}}[[VLO]]:[[VHI]]{{\]}}
-; SI: s_endpgm
+; VI: s_load_dwordx2 s{{\[}}[[LOVAL:[0-9]+]]:[[HIVAL:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0x34
+; GCN: s_bcnt1_i32_b64 [[RESULT:s[0-9]+]], {{s\[}}[[LOVAL]]:[[HIVAL]]{{\]}}
+; GCN: v_mov_b32_e32 v[[VLO:[0-9]+]], [[RESULT]]
+; GCN: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[HIVAL]]
+; GCN: buffer_store_dwordx2 {{v\[}}[[VLO]]:[[VHI]]{{\]}}
+; GCN: s_endpgm
define void @ctpop_i64_in_br(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %ctpop_arg, i32 %cond) {
entry:
%tmp0 = icmp eq i32 %cond, 0
diff --git a/test/CodeGen/R600/cttz_zero_undef.ll b/test/CodeGen/R600/cttz_zero_undef.ll
index c4b1463..d9d284c 100644
--- a/test/CodeGen/R600/cttz_zero_undef.ll
+++ b/test/CodeGen/R600/cttz_zero_undef.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone
diff --git a/test/CodeGen/R600/cvt_f32_ubyte.ll b/test/CodeGen/R600/cvt_f32_ubyte.ll
index 0d1db19..4d4bf93 100644
--- a/test/CodeGen/R600/cvt_f32_ubyte.ll
+++ b/test/CodeGen/R600/cvt_f32_ubyte.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}load_i8_to_f32:
; SI: buffer_load_ubyte [[LOADREG:v[0-9]+]],
@@ -22,7 +23,7 @@ define void @load_i8_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* n
; SI-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
; SI: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
define void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8> addrspace(1)* noalias %in) nounwind {
- %load = load <2 x i8> addrspace(1)* %in, align 1
+ %load = load <2 x i8> addrspace(1)* %in, align 2
%cvt = uitofp <2 x i8> %load to <2 x float>
store <2 x float> %cvt, <2 x float> addrspace(1)* %out, align 16
ret void
@@ -36,18 +37,14 @@ define void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8>
; SI-DAG: v_cvt_f32_ubyte0_e32
; SI: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
define void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8> addrspace(1)* noalias %in) nounwind {
- %load = load <3 x i8> addrspace(1)* %in, align 1
+ %load = load <3 x i8> addrspace(1)* %in, align 4
%cvt = uitofp <3 x i8> %load to <3 x float>
store <3 x float> %cvt, <3 x float> addrspace(1)* %out, align 16
ret void
}
; SI-LABEL: {{^}}load_v4i8_to_v4f32:
-; We can't use buffer_load_dword here, because the load is byte aligned, and
-; buffer_load_dword requires dword alignment.
-; SI: buffer_load_ushort
-; SI: buffer_load_ushort
-; SI: v_or_b32_e32 [[LOADREG:v[0-9]+]]
+; SI: buffer_load_dword [[LOADREG:v[0-9]+]]
; SI-NOT: bfe
; SI-NOT: lshr
; SI-DAG: v_cvt_f32_ubyte3_e32 v[[HIRESULT:[0-9]+]], [[LOADREG]]
@@ -56,6 +53,30 @@ define void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8>
; SI-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
; SI: buffer_store_dwordx4 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
define void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
+ %load = load <4 x i8> addrspace(1)* %in, align 4
+ %cvt = uitofp <4 x i8> %load to <4 x float>
+ store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; This should not be adding instructions to shift into the correct
+; position in the word for the component.
+
+; SI-LABEL: {{^}}load_v4i8_to_v4f32_unaligned:
+; SI: buffer_load_ubyte [[LOADREG3:v[0-9]+]]
+; SI: buffer_load_ubyte [[LOADREG2:v[0-9]+]]
+; SI: buffer_load_ubyte [[LOADREG1:v[0-9]+]]
+; SI: buffer_load_ubyte [[LOADREG0:v[0-9]+]]
+; SI-NOT: v_lshlrev_b32
+; SI-NOT: v_or_b32
+
+; SI-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LOADREG0]]
+; SI-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}}, [[LOADREG1]]
+; SI-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}}, [[LOADREG2]]
+; SI-DAG: v_cvt_f32_ubyte0_e32 v[[HIRESULT:[0-9]+]], [[LOADREG3]]
+
+; SI: buffer_store_dwordx4 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
+define void @load_v4i8_to_v4f32_unaligned(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
%load = load <4 x i8> addrspace(1)* %in, align 1
%cvt = uitofp <4 x i8> %load to <4 x float>
store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
@@ -125,7 +146,7 @@ define void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8>
; SI: buffer_store_dword
; SI: buffer_store_dword
define void @load_v8i8_to_v8f32(<8 x float> addrspace(1)* noalias %out, <8 x i8> addrspace(1)* noalias %in) nounwind {
- %load = load <8 x i8> addrspace(1)* %in, align 1
+ %load = load <8 x i8> addrspace(1)* %in, align 8
%cvt = uitofp <8 x i8> %load to <8 x float>
store <8 x float> %cvt, <8 x float> addrspace(1)* %out, align 16
ret void
diff --git a/test/CodeGen/R600/cvt_flr_i32_f32.ll b/test/CodeGen/R600/cvt_flr_i32_f32.ll
new file mode 100644
index 0000000..2dd3a9f
--- /dev/null
+++ b/test/CodeGen/R600/cvt_flr_i32_f32.ll
@@ -0,0 +1,86 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -enable-no-nans-fp-math -verify-machineinstrs < %s | FileCheck -check-prefix=SI-NONAN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare float @llvm.fabs.f32(float) #1
+declare float @llvm.floor.f32(float) #1
+
+; FUNC-LABEL: {{^}}cvt_flr_i32_f32_0:
+; SI-SAFE-NOT: v_cvt_flr_i32_f32
+; SI-NOT: add
+; SI-NONAN: v_cvt_flr_i32_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}
+; SI: s_endpgm
+define void @cvt_flr_i32_f32_0(i32 addrspace(1)* %out, float %x) #0 {
+ %floor = call float @llvm.floor.f32(float %x) #1
+ %cvt = fptosi float %floor to i32
+ store i32 %cvt, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}cvt_flr_i32_f32_1:
+; SI: v_add_f32_e64 [[TMP:v[0-9]+]], 1.0, s{{[0-9]+}}
+; SI-SAFE-NOT: v_cvt_flr_i32_f32
+; SI-NONAN: v_cvt_flr_i32_f32_e32 v{{[0-9]+}}, [[TMP]]
+; SI: s_endpgm
+define void @cvt_flr_i32_f32_1(i32 addrspace(1)* %out, float %x) #0 {
+ %fadd = fadd float %x, 1.0
+ %floor = call float @llvm.floor.f32(float %fadd) #1
+ %cvt = fptosi float %floor to i32
+ store i32 %cvt, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}cvt_flr_i32_f32_fabs:
+; SI-NOT: add
+; SI-SAFE-NOT: v_cvt_flr_i32_f32
+; SI-NONAN: v_cvt_flr_i32_f32_e64 v{{[0-9]+}}, |s{{[0-9]+}}|
+; SI: s_endpgm
+define void @cvt_flr_i32_f32_fabs(i32 addrspace(1)* %out, float %x) #0 {
+ %x.fabs = call float @llvm.fabs.f32(float %x) #1
+ %floor = call float @llvm.floor.f32(float %x.fabs) #1
+ %cvt = fptosi float %floor to i32
+ store i32 %cvt, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}cvt_flr_i32_f32_fneg:
+; SI-NOT: add
+; SI-SAFE-NOT: v_cvt_flr_i32_f32
+; SI-NONAN: v_cvt_flr_i32_f32_e64 v{{[0-9]+}}, -s{{[0-9]+}}
+; SI: s_endpgm
+define void @cvt_flr_i32_f32_fneg(i32 addrspace(1)* %out, float %x) #0 {
+ %x.fneg = fsub float -0.000000e+00, %x
+ %floor = call float @llvm.floor.f32(float %x.fneg) #1
+ %cvt = fptosi float %floor to i32
+ store i32 %cvt, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}cvt_flr_i32_f32_fabs_fneg:
+; SI-NOT: add
+; SI-SAFE-NOT: v_cvt_flr_i32_f32
+; SI-NONAN: v_cvt_flr_i32_f32_e64 v{{[0-9]+}}, -|s{{[0-9]+}}|
+; SI: s_endpgm
+define void @cvt_flr_i32_f32_fabs_fneg(i32 addrspace(1)* %out, float %x) #0 {
+ %x.fabs = call float @llvm.fabs.f32(float %x) #1
+ %x.fabs.fneg = fsub float -0.000000e+00, %x.fabs
+ %floor = call float @llvm.floor.f32(float %x.fabs.fneg) #1
+ %cvt = fptosi float %floor to i32
+ store i32 %cvt, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}no_cvt_flr_i32_f32_0:
+; SI-NOT: v_cvt_flr_i32_f32
+; SI: v_floor_f32
+; SI: v_cvt_u32_f32_e32
+; SI: s_endpgm
+define void @no_cvt_flr_i32_f32_0(i32 addrspace(1)* %out, float %x) #0 {
+ %floor = call float @llvm.floor.f32(float %x) #1
+ %cvt = fptoui float %floor to i32
+ store i32 %cvt, i32 addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/cvt_rpi_i32_f32.ll b/test/CodeGen/R600/cvt_rpi_i32_f32.ll
new file mode 100644
index 0000000..864ac40
--- /dev/null
+++ b/test/CodeGen/R600/cvt_rpi_i32_f32.ll
@@ -0,0 +1,83 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -enable-no-nans-fp-math -verify-machineinstrs < %s | FileCheck -check-prefix=SI-NONAN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
+
+declare float @llvm.fabs.f32(float) #1
+declare float @llvm.floor.f32(float) #1
+
+; FUNC-LABEL: {{^}}cvt_rpi_i32_f32:
+; SI-SAFE-NOT: v_cvt_rpi_i32_f32
+; SI-NONAN: v_cvt_rpi_i32_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}
+; SI: s_endpgm
+define void @cvt_rpi_i32_f32(i32 addrspace(1)* %out, float %x) #0 {
+ %fadd = fadd float %x, 0.5
+ %floor = call float @llvm.floor.f32(float %fadd) #1
+ %cvt = fptosi float %floor to i32
+ store i32 %cvt, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}cvt_rpi_i32_f32_fabs:
+; SI-SAFE-NOT: v_cvt_rpi_i32_f32
+; SI-NONAN: v_cvt_rpi_i32_f32_e64 v{{[0-9]+}}, |s{{[0-9]+}}|{{$}}
+; SI: s_endpgm
+define void @cvt_rpi_i32_f32_fabs(i32 addrspace(1)* %out, float %x) #0 {
+ %x.fabs = call float @llvm.fabs.f32(float %x) #1
+ %fadd = fadd float %x.fabs, 0.5
+ %floor = call float @llvm.floor.f32(float %fadd) #1
+ %cvt = fptosi float %floor to i32
+ store i32 %cvt, i32 addrspace(1)* %out
+ ret void
+}
+
+; FIXME: This doesn't work because it forms fsub 0.5, x
+; FUNC-LABEL: {{^}}cvt_rpi_i32_f32_fneg:
+; XSI-NONAN: v_cvt_rpi_i32_f32_e64 v{{[0-9]+}}, -s{{[0-9]+}}
+; SI: v_sub_f32_e64 [[TMP:v[0-9]+]], 0.5, s{{[0-9]+}}
+; SI-SAFE-NOT: v_cvt_flr_i32_f32
+; SI-NONAN: v_cvt_flr_i32_f32_e32 {{v[0-9]+}}, [[TMP]]
+; SI: s_endpgm
+define void @cvt_rpi_i32_f32_fneg(i32 addrspace(1)* %out, float %x) #0 {
+ %x.fneg = fsub float -0.000000e+00, %x
+ %fadd = fadd float %x.fneg, 0.5
+ %floor = call float @llvm.floor.f32(float %fadd) #1
+ %cvt = fptosi float %floor to i32
+ store i32 %cvt, i32 addrspace(1)* %out
+ ret void
+}
+
+; FIXME: This doesn't work for same reason as above
+; FUNC-LABEL: {{^}}cvt_rpi_i32_f32_fabs_fneg:
+; SI-SAFE-NOT: v_cvt_rpi_i32_f32
+; XSI-NONAN: v_cvt_rpi_i32_f32_e64 v{{[0-9]+}}, -|s{{[0-9]+}}|
+
+; SI: v_sub_f32_e64 [[TMP:v[0-9]+]], 0.5, |s{{[0-9]+}}|
+; SI-SAFE-NOT: v_cvt_flr_i32_f32
+; SI-NONAN: v_cvt_flr_i32_f32_e32 {{v[0-9]+}}, [[TMP]]
+; SI: s_endpgm
+define void @cvt_rpi_i32_f32_fabs_fneg(i32 addrspace(1)* %out, float %x) #0 {
+ %x.fabs = call float @llvm.fabs.f32(float %x) #1
+ %x.fabs.fneg = fsub float -0.000000e+00, %x.fabs
+ %fadd = fadd float %x.fabs.fneg, 0.5
+ %floor = call float @llvm.floor.f32(float %fadd) #1
+ %cvt = fptosi float %floor to i32
+ store i32 %cvt, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}no_cvt_rpi_i32_f32_0:
+; SI-NOT: v_cvt_rpi_i32_f32
+; SI: v_add_f32
+; SI: v_floor_f32
+; SI: v_cvt_u32_f32
+; SI: s_endpgm
+define void @no_cvt_rpi_i32_f32_0(i32 addrspace(1)* %out, float %x) #0 {
+ %fadd = fadd float %x, 0.5
+ %floor = call float @llvm.floor.f32(float %fadd) #1
+ %cvt = fptoui float %floor to i32
+ store i32 %cvt, i32 addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/default-fp-mode.ll b/test/CodeGen/R600/default-fp-mode.ll
index 935bf97..da8e914 100644
--- a/test/CodeGen/R600/default-fp-mode.ll
+++ b/test/CodeGen/R600/default-fp-mode.ll
@@ -1,10 +1,17 @@
-; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals,+fp64-denormals < %s | FileCheck -check-prefix=FP64-DENORMAL -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -mattr=+fp32-denormals,-fp64-denormals < %s | FileCheck -check-prefix=FP32-DENORMAL -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -mattr=+fp32-denormals,+fp64-denormals < %s | FileCheck -check-prefix=BOTH-DENORMAL -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals,-fp64-denormals < %s | FileCheck -check-prefix=NO-DENORMAL -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -mattr=+fp64-denormals < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=-fp32-denormals,+fp64-denormals < %s | FileCheck -check-prefix=FP64-DENORMAL -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=+fp32-denormals,-fp64-denormals < %s | FileCheck -check-prefix=FP32-DENORMAL -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=+fp32-denormals,+fp64-denormals < %s | FileCheck -check-prefix=BOTH-DENORMAL -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=-fp32-denormals,-fp64-denormals < %s | FileCheck -check-prefix=NO-DENORMAL -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=-fp32-denormals < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=+fp64-denormals < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-fp32-denormals,+fp64-denormals < %s | FileCheck -check-prefix=FP64-DENORMAL -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+fp32-denormals,-fp64-denormals < %s | FileCheck -check-prefix=FP32-DENORMAL -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+fp32-denormals,+fp64-denormals < %s | FileCheck -check-prefix=BOTH-DENORMAL -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-fp32-denormals,-fp64-denormals < %s | FileCheck -check-prefix=NO-DENORMAL -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-fp32-denormals < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+fp64-denormals < %s | FileCheck -check-prefix=DEFAULT -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}test_kernel:
diff --git a/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll b/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll
index f334062..41afd50 100644
--- a/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll
+++ b/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI --check-prefix=CHECK %s
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=CI --check-prefix=CHECK %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI --check-prefix=CHECK %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=CI --check-prefix=CHECK %s
declare i32 @llvm.r600.read.tidig.x() #0
declare void @llvm.AMDGPU.barrier.local() #1
diff --git a/test/CodeGen/R600/ds_read2.ll b/test/CodeGen/R600/ds_read2.ll
index 6e0c8be..c06b0b1 100644
--- a/test/CodeGen/R600/ds_read2.ll
+++ b/test/CodeGen/R600/ds_read2.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -strict-whitespace -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -strict-whitespace -check-prefix=SI %s
; FIXME: We don't get cases where the address was an SGPR because we
; get a copy to the address register for each one.
diff --git a/test/CodeGen/R600/ds_read2_offset_order.ll b/test/CodeGen/R600/ds_read2_offset_order.ll
new file mode 100644
index 0000000..44306bc
--- /dev/null
+++ b/test/CodeGen/R600/ds_read2_offset_order.ll
@@ -0,0 +1,45 @@
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -strict-whitespace -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -strict-whitespace -check-prefix=SI %s
+
+; XFAIL: *
+
+@lds = addrspace(3) global [512 x float] undef, align 4
+
+; SI-LABEL: {{^}}offset_order:
+
+; SI: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:56
+; SI: ds_read2st64_b32 v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset0:0 offset1:4
+; SI: ds_read2_b32 v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset0:2 offset1:3
+; SI: ds_read2_b32 v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} offset0:11 offset1:1
+
+define void @offset_order(float addrspace(1)* %out) {
+entry:
+ %ptr0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 0
+ %val0 = load float addrspace(3)* %ptr0
+
+ %ptr1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 256
+ %val1 = load float addrspace(3)* %ptr1
+ %add1 = fadd float %val0, %val1
+
+ %ptr2 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 3
+ %val2 = load float addrspace(3)* %ptr2
+ %add2 = fadd float %add1, %val2
+
+ %ptr3 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 2
+ %val3 = load float addrspace(3)* %ptr3
+ %add3 = fadd float %add2, %val3
+
+ %ptr4 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 12
+ %val4 = load float addrspace(3)* %ptr4
+ %add4 = fadd float %add3, %val4
+
+ %ptr5 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 14
+ %val5 = load float addrspace(3)* %ptr5
+ %add5 = fadd float %add4, %val5
+
+ %ptr6 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 11
+ %val6 = load float addrspace(3)* %ptr6
+ %add6 = fadd float %add5, %val6
+ store float %add6, float addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/ds_read2st64.ll b/test/CodeGen/R600/ds_read2st64.ll
index 3e98e59..efd875e 100644
--- a/test/CodeGen/R600/ds_read2st64.ll
+++ b/test/CodeGen/R600/ds_read2st64.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s
@lds = addrspace(3) global [512 x float] undef, align 4
@lds.f64 = addrspace(3) global [512 x double] undef, align 8
@@ -65,8 +65,8 @@ define void @simple_read2st64_f32_max_offset(float addrspace(1)* %out, float add
; SI-LABEL: @simple_read2st64_f32_over_max_offset
; SI-NOT: ds_read2st64_b32
-; SI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:256
; SI: v_add_i32_e32 [[BIGADD:v[0-9]+]], 0x10000, {{v[0-9]+}}
+; SI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:256
; SI: ds_read_b32 {{v[0-9]+}}, [[BIGADD]]
; SI: s_endpgm
define void @simple_read2st64_f32_over_max_offset(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
@@ -197,8 +197,8 @@ define void @simple_read2st64_f64_max_offset(double addrspace(1)* %out, double a
; SI-LABEL: @simple_read2st64_f64_over_max_offset
; SI-NOT: ds_read2st64_b64
-; SI: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset:512
; SI: v_add_i32_e32 [[BIGADD:v[0-9]+]], 0x10000, {{v[0-9]+}}
+; SI: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset:512
; SI: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, [[BIGADD]]
; SI: s_endpgm
define void @simple_read2st64_f64_over_max_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
diff --git a/test/CodeGen/R600/ds_write2.ll b/test/CodeGen/R600/ds_write2.ll
index 1807fb5..e2db81a 100644
--- a/test/CodeGen/R600/ds_write2.ll
+++ b/test/CodeGen/R600/ds_write2.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -strict-whitespace -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -strict-whitespace -check-prefix=SI %s
@lds = addrspace(3) global [512 x float] undef, align 4
@lds.f64 = addrspace(3) global [512 x double] undef, align 8
@@ -7,7 +7,7 @@
; SI-LABEL: @simple_write2_one_val_f32
; SI-DAG: buffer_load_dword [[VAL:v[0-9]+]]
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
-; SI: ds_write2_b32 [[VPTR]], [[VAL]], [[VAL]] offset0:0 offset1:8 [M0]
+; SI: ds_write2_b32 [[VPTR]], [[VAL]], [[VAL]] offset0:0 offset1:8
; SI: s_endpgm
define void @simple_write2_one_val_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
@@ -23,9 +23,9 @@ define void @simple_write2_one_val_f32(float addrspace(1)* %C, float addrspace(1
; SI-LABEL: @simple_write2_two_val_f32
; SI-DAG: buffer_load_dword [[VAL0:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dword [[VAL1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[VAL1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
-; SI: ds_write2_b32 [[VPTR]], [[VAL0]], [[VAL1]] offset0:0 offset1:8 [M0]
+; SI: ds_write2_b32 [[VPTR]], [[VAL0]], [[VAL1]] offset0:0 offset1:8
; SI: s_endpgm
define void @simple_write2_two_val_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
@@ -84,7 +84,7 @@ define void @simple_write2_two_val_f32_volatile_1(float addrspace(1)* %C, float
; SI: buffer_load_dwordx2 v{{\[}}[[VAL0:[0-9]+]]:{{[0-9]+\]}}
; SI: buffer_load_dwordx2 v{{\[[0-9]+}}:[[VAL1:[0-9]+]]{{\]}}
; SI: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
-; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset0:0 offset1:8 [M0]
+; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset0:0 offset1:8
; SI: s_endpgm
define void @simple_write2_two_val_subreg2_mixed_f32(float addrspace(1)* %C, <2 x float> addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
@@ -105,7 +105,7 @@ define void @simple_write2_two_val_subreg2_mixed_f32(float addrspace(1)* %C, <2
; SI-LABEL: @simple_write2_two_val_subreg2_f32
; SI-DAG: buffer_load_dwordx2 v{{\[}}[[VAL0:[0-9]+]]:[[VAL1:[0-9]+]]{{\]}}
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
-; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset0:0 offset1:8 [M0]
+; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset0:0 offset1:8
; SI: s_endpgm
define void @simple_write2_two_val_subreg2_f32(float addrspace(1)* %C, <2 x float> addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
@@ -124,7 +124,7 @@ define void @simple_write2_two_val_subreg2_f32(float addrspace(1)* %C, <2 x floa
; SI-LABEL: @simple_write2_two_val_subreg4_f32
; SI-DAG: buffer_load_dwordx4 v{{\[}}[[VAL0:[0-9]+]]:[[VAL1:[0-9]+]]{{\]}}
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
-; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset0:0 offset1:8 [M0]
+; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset0:0 offset1:8
; SI: s_endpgm
define void @simple_write2_two_val_subreg4_f32(float addrspace(1)* %C, <4 x float> addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
@@ -142,9 +142,9 @@ define void @simple_write2_two_val_subreg4_f32(float addrspace(1)* %C, <4 x floa
; SI-LABEL: @simple_write2_two_val_max_offset_f32
; SI-DAG: buffer_load_dword [[VAL0:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dword [[VAL1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[VAL1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
-; SI: ds_write2_b32 [[VPTR]], [[VAL0]], [[VAL1]] offset0:0 offset1:255 [M0]
+; SI: ds_write2_b32 [[VPTR]], [[VAL0]], [[VAL1]] offset0:0 offset1:255
; SI: s_endpgm
define void @simple_write2_two_val_max_offset_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
@@ -268,7 +268,7 @@ define void @write2_ptr_subreg_arg_two_val_f32(float addrspace(1)* %C, float add
; SI-LABEL: @simple_write2_one_val_f64
; SI: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]],
; SI: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 3, v{{[0-9]+}}
-; SI: ds_write2_b64 [[VPTR]], [[VAL]], [[VAL]] offset0:0 offset1:8 [M0]
+; SI: ds_write2_b64 [[VPTR]], [[VAL]], [[VAL]] offset0:0 offset1:8
; SI: s_endpgm
define void @simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
@@ -285,8 +285,8 @@ define void @simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace
; SI-LABEL: @misaligned_simple_write2_one_val_f64
; SI-DAG: buffer_load_dwordx2 v{{\[}}[[VAL0:[0-9]+]]:[[VAL1:[0-9]+]]{{\]}}
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 3, v{{[0-9]+}}
-; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset0:0 offset1:1 [M0]
-; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset0:14 offset1:15 [M0]
+; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset0:0 offset1:1
+; SI: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset0:14 offset1:15
; SI: s_endpgm
define void @misaligned_simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
@@ -302,9 +302,9 @@ define void @misaligned_simple_write2_one_val_f64(double addrspace(1)* %C, doubl
; SI-LABEL: @simple_write2_two_val_f64
; SI-DAG: buffer_load_dwordx2 [[VAL0:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dwordx2 [[VAL1:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x8
+; SI-DAG: buffer_load_dwordx2 [[VAL1:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 3, v{{[0-9]+}}
-; SI: ds_write2_b64 [[VPTR]], [[VAL0]], [[VAL1]] offset0:0 offset1:8 [M0]
+; SI: ds_write2_b64 [[VPTR]], [[VAL0]], [[VAL1]] offset0:0 offset1:8
; SI: s_endpgm
define void @simple_write2_two_val_f64(double addrspace(1)* %C, double addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
diff --git a/test/CodeGen/R600/ds_write2st64.ll b/test/CodeGen/R600/ds_write2st64.ll
index 4cafb7c..0f1c662 100644
--- a/test/CodeGen/R600/ds_write2st64.ll
+++ b/test/CodeGen/R600/ds_write2st64.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s
@lds = addrspace(3) global [512 x float] undef, align 4
@@ -7,7 +7,7 @@
; SI-LABEL: @simple_write2st64_one_val_f32_0_1
; SI-DAG: buffer_load_dword [[VAL:v[0-9]+]]
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
-; SI: ds_write2st64_b32 [[VPTR]], [[VAL]], [[VAL]] offset0:0 offset1:1 [M0]
+; SI: ds_write2st64_b32 [[VPTR]], [[VAL]], [[VAL]] offset0:0 offset1:1
; SI: s_endpgm
define void @simple_write2st64_one_val_f32_0_1(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
@@ -23,9 +23,9 @@ define void @simple_write2st64_one_val_f32_0_1(float addrspace(1)* %C, float add
; SI-LABEL: @simple_write2st64_two_val_f32_2_5
; SI-DAG: buffer_load_dword [[VAL0:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dword [[VAL1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[VAL1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
-; SI: ds_write2st64_b32 [[VPTR]], [[VAL0]], [[VAL1]] offset0:2 offset1:5 [M0]
+; SI: ds_write2st64_b32 [[VPTR]], [[VAL0]], [[VAL1]] offset0:2 offset1:5
; SI: s_endpgm
define void @simple_write2st64_two_val_f32_2_5(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
@@ -44,9 +44,9 @@ define void @simple_write2st64_two_val_f32_2_5(float addrspace(1)* %C, float add
; SI-LABEL: @simple_write2st64_two_val_max_offset_f32
; SI-DAG: buffer_load_dword [[VAL0:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dword [[VAL1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[VAL1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
-; SI: ds_write2st64_b32 [[VPTR]], [[VAL0]], [[VAL1]] offset0:0 offset1:255 [M0]
+; SI: ds_write2st64_b32 [[VPTR]], [[VAL0]], [[VAL1]] offset0:0 offset1:255
; SI: s_endpgm
define void @simple_write2st64_two_val_max_offset_f32(float addrspace(1)* %C, float addrspace(1)* %in, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
@@ -64,9 +64,9 @@ define void @simple_write2st64_two_val_max_offset_f32(float addrspace(1)* %C, fl
; SI-LABEL: @simple_write2st64_two_val_max_offset_f64
; SI-DAG: buffer_load_dwordx2 [[VAL0:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dwordx2 [[VAL1:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x8
+; SI-DAG: buffer_load_dwordx2 [[VAL1:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8
; SI-DAG: v_add_i32_e32 [[VPTR:v[0-9]+]],
-; SI: ds_write2st64_b64 [[VPTR]], [[VAL0]], [[VAL1]] offset0:4 offset1:127 [M0]
+; SI: ds_write2st64_b64 [[VPTR]], [[VAL0]], [[VAL1]] offset0:4 offset1:127
; SI: s_endpgm
define void @simple_write2st64_two_val_max_offset_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
diff --git a/test/CodeGen/R600/elf.ll b/test/CodeGen/R600/elf.ll
index 6c521d0..aca3109 100644
--- a/test/CodeGen/R600/elf.ll
+++ b/test/CodeGen/R600/elf.ll
@@ -1,15 +1,21 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs -filetype=obj | llvm-readobj -s - | FileCheck --check-prefix=ELF-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs -o - | FileCheck --check-prefix=CONFIG-CHECK %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs -filetype=obj | llvm-readobj -s -symbols - | FileCheck --check-prefix=ELF %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs -o - | FileCheck --check-prefix=CONFIG %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs -filetype=obj | llvm-readobj -s -symbols - | FileCheck --check-prefix=ELF %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs -o - | FileCheck --check-prefix=CONFIG %s
-; ELF-CHECK: Format: ELF32
-; ELF-CHECK: Name: .AMDGPU.config
-; ELF-CHECK: Type: SHT_PROGBITS
+; ELF: Format: ELF32
+; ELF: Name: .AMDGPU.config
+; ELF: Type: SHT_PROGBITS
-; CONFIG-CHECK: .align 256
-; CONFIG-CHECK: test:
-; CONFIG-CHECK: .section .AMDGPU.config
-; CONFIG-CHECK-NEXT: .long 45096
-; CONFIG-CHECK-NEXT: .long 0
+; ELF: Symbol {
+; ELF: Name: test
+; ELF: Binding: Global
+
+; CONFIG: .align 256
+; CONFIG: test:
+; CONFIG: .section .AMDGPU.config
+; CONFIG-NEXT: .long 45096
+; CONFIG-NEXT: .long 0
define void @test(i32 %p) #0 {
%i = add i32 %p, 2
%r = bitcast i32 %i to float
diff --git a/test/CodeGen/R600/elf.r600.ll b/test/CodeGen/R600/elf.r600.ll
index 4436c07..51cd085 100644
--- a/test/CodeGen/R600/elf.r600.ll
+++ b/test/CodeGen/R600/elf.r600.ll
@@ -1,14 +1,14 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood -filetype=obj | llvm-readobj -s - | FileCheck --check-prefix=ELF-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=redwood -o - | FileCheck --check-prefix=CONFIG-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood -filetype=obj | llvm-readobj -s - | FileCheck --check-prefix=ELF %s
+; RUN: llc < %s -march=r600 -mcpu=redwood -o - | FileCheck --check-prefix=CONFIG %s
-; ELF-CHECK: Format: ELF32
-; ELF-CHECK: Name: .AMDGPU.config
+; ELF: Format: ELF32
+; ELF: Name: .AMDGPU.config
-; CONFIG-CHECK: .section .AMDGPU.config
-; CONFIG-CHECK-NEXT: .long 166100
-; CONFIG-CHECK-NEXT: .long 2
-; CONFIG-CHECK-NEXT: .long 165900
-; CONFIG-CHECK-NEXT: .long 0
+; CONFIG: .section .AMDGPU.config
+; CONFIG-NEXT: .long 166100
+; CONFIG-NEXT: .long 2
+; CONFIG-NEXT: .long 165900
+; CONFIG-NEXT: .long 0
define void @test(float addrspace(1)* %out, i32 %p) {
%i = add i32 %p, 2
%r = bitcast i32 %i to float
diff --git a/test/CodeGen/R600/empty-function.ll b/test/CodeGen/R600/empty-function.ll
index d4ff803..b5593eb 100644
--- a/test/CodeGen/R600/empty-function.ll
+++ b/test/CodeGen/R600/empty-function.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; Make sure we don't assert on empty functions
diff --git a/test/CodeGen/R600/endcf-loop-header.ll b/test/CodeGen/R600/endcf-loop-header.ll
new file mode 100644
index 0000000..e3c5b3c
--- /dev/null
+++ b/test/CodeGen/R600/endcf-loop-header.ll
@@ -0,0 +1,34 @@
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s
+
+; This tests that the llvm.SI.end.cf intrinsic is not inserted into the
+; loop block. This intrinsic will be lowered to s_or_b64 by the code
+; generator.
+
+; CHECK-LABEL: {{^}}test:
+
+; This is was lowered from the llvm.SI.end.cf intrinsic:
+; CHECK: s_or_b64 exec, exec
+
+; CHECK: [[LOOP_LABEL:[0-9A-Za-z_]+]]: ; %loop{{$}}
+; CHECK-NOT: s_or_b64 exec, exec
+; CHECK: s_cbranch_execnz [[LOOP_LABEL]]
+define void @test(i32 addrspace(1)* %out, i32 %cond) {
+entry:
+ %tmp0 = icmp eq i32 %cond, 0
+ br i1 %tmp0, label %if, label %loop
+
+if:
+ store i32 0, i32 addrspace(1)* %out
+ br label %loop
+
+loop:
+ %tmp1 = phi i32 [0, %entry], [0, %if], [%inc, %loop]
+ %inc = add i32 %tmp1, %cond
+ %tmp2 = icmp ugt i32 %inc, 10
+ br i1 %tmp2, label %done, label %loop
+
+done:
+ %tmp3 = getelementptr i32 addrspace(1)* %out, i64 1
+ store i32 %inc, i32 addrspace(1)* %tmp3
+ ret void
+}
diff --git a/test/CodeGen/R600/extload-private.ll b/test/CodeGen/R600/extload-private.ll
new file mode 100644
index 0000000..fec8682
--- /dev/null
+++ b/test/CodeGen/R600/extload-private.ll
@@ -0,0 +1,46 @@
+; RUN: llc < %s -march=amdgcn -mcpu=SI -mattr=-promote-alloca -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-promote-alloca -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+
+; FUNC-LABEL: {{^}}load_i8_sext_private:
+; SI: buffer_load_sbyte v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen
+define void @load_i8_sext_private(i32 addrspace(1)* %out) {
+entry:
+ %tmp0 = alloca i8
+ %tmp1 = load i8* %tmp0
+ %tmp2 = sext i8 %tmp1 to i32
+ store i32 %tmp2, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}load_i8_zext_private:
+; SI: buffer_load_ubyte v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen
+define void @load_i8_zext_private(i32 addrspace(1)* %out) {
+entry:
+ %tmp0 = alloca i8
+ %tmp1 = load i8* %tmp0
+ %tmp2 = zext i8 %tmp1 to i32
+ store i32 %tmp2, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}load_i16_sext_private:
+; SI: buffer_load_sshort v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen
+define void @load_i16_sext_private(i32 addrspace(1)* %out) {
+entry:
+ %tmp0 = alloca i16
+ %tmp1 = load i16* %tmp0
+ %tmp2 = sext i16 %tmp1 to i32
+ store i32 %tmp2, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}load_i16_zext_private:
+; SI: buffer_load_ushort v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen
+define void @load_i16_zext_private(i32 addrspace(1)* %out) {
+entry:
+ %tmp0 = alloca i16
+ %tmp1 = load i16* %tmp0
+ %tmp2 = zext i16 %tmp1 to i32
+ store i32 %tmp2, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/extload.ll b/test/CodeGen/R600/extload.ll
index 5bda8f8..77e5dc3 100644
--- a/test/CodeGen/R600/extload.ll
+++ b/test/CodeGen/R600/extload.ll
@@ -1,9 +1,11 @@
; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}anyext_load_i8:
-; EG: AND_INT
-; EG: 255
+; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+.[XYZW]]],
+; EG: VTX_READ_32 [[VAL]]
+
define void @anyext_load_i8(i8 addrspace(1)* nocapture noalias %out, i8 addrspace(1)* nocapture noalias %src) nounwind {
%cast = bitcast i8 addrspace(1)* %src to i32 addrspace(1)*
%load = load i32 addrspace(1)* %cast, align 1
@@ -14,10 +16,9 @@ define void @anyext_load_i8(i8 addrspace(1)* nocapture noalias %out, i8 addrspac
}
; FUNC-LABEL: {{^}}anyext_load_i16:
-; EG: AND_INT
-; EG: AND_INT
-; EG-DAG: 65535
-; EG-DAG: -65536
+; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+.[XYZW]]],
+; EG: VTX_READ_32 [[VAL]]
+
define void @anyext_load_i16(i16 addrspace(1)* nocapture noalias %out, i16 addrspace(1)* nocapture noalias %src) nounwind {
%cast = bitcast i16 addrspace(1)* %src to i32 addrspace(1)*
%load = load i32 addrspace(1)* %cast, align 1
@@ -28,8 +29,8 @@ define void @anyext_load_i16(i16 addrspace(1)* nocapture noalias %out, i16 addrs
}
; FUNC-LABEL: {{^}}anyext_load_lds_i8:
-; EG: AND_INT
-; EG: 255
+; EG: LDS_READ_RET {{.*}}, [[VAL:T[0-9]+.[XYZW]]]
+; EG: LDS_WRITE * [[VAL]]
define void @anyext_load_lds_i8(i8 addrspace(3)* nocapture noalias %out, i8 addrspace(3)* nocapture noalias %src) nounwind {
%cast = bitcast i8 addrspace(3)* %src to i32 addrspace(3)*
%load = load i32 addrspace(3)* %cast, align 1
@@ -40,10 +41,8 @@ define void @anyext_load_lds_i8(i8 addrspace(3)* nocapture noalias %out, i8 addr
}
; FUNC-LABEL: {{^}}anyext_load_lds_i16:
-; EG: AND_INT
-; EG: AND_INT
-; EG-DAG: 65535
-; EG-DAG: -65536
+; EG: LDS_READ_RET {{.*}}, [[VAL:T[0-9]+.[XYZW]]]
+; EG: LDS_WRITE * [[VAL]]
define void @anyext_load_lds_i16(i16 addrspace(3)* nocapture noalias %out, i16 addrspace(3)* nocapture noalias %src) nounwind {
%cast = bitcast i16 addrspace(3)* %src to i32 addrspace(3)*
%load = load i32 addrspace(3)* %cast, align 1
@@ -52,72 +51,3 @@ define void @anyext_load_lds_i16(i16 addrspace(3)* nocapture noalias %out, i16 a
store <2 x i16> %x, <2 x i16> addrspace(3)* %castOut, align 1
ret void
}
-
-; FUNC-LABEL: {{^}}sextload_global_i8_to_i64:
-; SI: buffer_load_sbyte [[LOAD:v[0-9]+]],
-; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[LOAD]]
-; SI: buffer_store_dwordx2
-define void @sextload_global_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
- %a = load i8 addrspace(1)* %in, align 8
- %ext = sext i8 %a to i64
- store i64 %ext, i64 addrspace(1)* %out, align 8
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_i16_to_i64:
-; SI: buffer_load_sshort [[LOAD:v[0-9]+]],
-; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[LOAD]]
-; SI: buffer_store_dwordx2
-define void @sextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
- %a = load i16 addrspace(1)* %in, align 8
- %ext = sext i16 %a to i64
- store i64 %ext, i64 addrspace(1)* %out, align 8
- ret void
-}
-
-; FUNC-LABEL: {{^}}sextload_global_i32_to_i64:
-; SI: buffer_load_dword [[LOAD:v[0-9]+]],
-; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[LOAD]]
-; SI: buffer_store_dwordx2
-define void @sextload_global_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %a = load i32 addrspace(1)* %in, align 8
- %ext = sext i32 %a to i64
- store i64 %ext, i64 addrspace(1)* %out, align 8
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_i8_to_i64:
-; SI-DAG: s_mov_b32 [[ZERO:s[0-9]+]], 0{{$}}
-; SI-DAG: buffer_load_ubyte [[LOAD:v[0-9]+]],
-; SI: v_mov_b32_e32 {{v[0-9]+}}, [[ZERO]]
-; SI: buffer_store_dwordx2
-define void @zextload_global_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
- %a = load i8 addrspace(1)* %in, align 8
- %ext = zext i8 %a to i64
- store i64 %ext, i64 addrspace(1)* %out, align 8
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_i16_to_i64:
-; SI-DAG: s_mov_b32 [[ZERO:s[0-9]+]], 0{{$}}
-; SI-DAG: buffer_load_ushort [[LOAD:v[0-9]+]],
-; SI: v_mov_b32_e32 {{v[0-9]+}}, [[ZERO]]
-; SI: buffer_store_dwordx2
-define void @zextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
- %a = load i16 addrspace(1)* %in, align 8
- %ext = zext i16 %a to i64
- store i64 %ext, i64 addrspace(1)* %out, align 8
- ret void
-}
-
-; FUNC-LABEL: {{^}}zextload_global_i32_to_i64:
-; SI-DAG: s_mov_b32 [[ZERO:s[0-9]+]], 0{{$}}
-; SI-DAG: buffer_load_dword [[LOAD:v[0-9]+]],
-; SI: v_mov_b32_e32 {{v[0-9]+}}, [[ZERO]]
-; SI: buffer_store_dwordx2
-define void @zextload_global_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
- %a = load i32 addrspace(1)* %in, align 8
- %ext = zext i32 %a to i64
- store i64 %ext, i64 addrspace(1)* %out, align 8
- ret void
-}
diff --git a/test/CodeGen/R600/extract_vector_elt_i16.ll b/test/CodeGen/R600/extract_vector_elt_i16.ll
index efdc1c8..0774a9a 100644
--- a/test/CodeGen/R600/extract_vector_elt_i16.ll
+++ b/test/CodeGen/R600/extract_vector_elt_i16.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}extract_vector_elt_v2i16:
; SI: buffer_load_ushort
diff --git a/test/CodeGen/R600/fabs.f64.ll b/test/CodeGen/R600/fabs.f64.ll
index d2ba320..d87c082 100644
--- a/test/CodeGen/R600/fabs.f64.ll
+++ b/test/CodeGen/R600/fabs.f64.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
diff --git a/test/CodeGen/R600/fabs.ll b/test/CodeGen/R600/fabs.ll
index 06cc97f..419a73d 100644
--- a/test/CodeGen/R600/fabs.ll
+++ b/test/CodeGen/R600/fabs.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
@@ -10,7 +11,7 @@
; R600-NOT: AND
; R600: |PV.{{[XYZW]}}|
-; SI: v_and_b32
+; GCN: v_and_b32
define void @fabs_fn_free(float addrspace(1)* %out, i32 %in) {
%bc= bitcast i32 %in to float
@@ -23,7 +24,7 @@ define void @fabs_fn_free(float addrspace(1)* %out, i32 %in) {
; R600-NOT: AND
; R600: |PV.{{[XYZW]}}|
-; SI: v_and_b32
+; GCN: v_and_b32
define void @fabs_free(float addrspace(1)* %out, i32 %in) {
%bc= bitcast i32 %in to float
@@ -35,7 +36,7 @@ define void @fabs_free(float addrspace(1)* %out, i32 %in) {
; FUNC-LABEL: {{^}}fabs_f32:
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
-; SI: v_and_b32
+; GCN: v_and_b32
define void @fabs_f32(float addrspace(1)* %out, float %in) {
%fabs = call float @llvm.fabs.f32(float %in)
store float %fabs, float addrspace(1)* %out
@@ -46,8 +47,8 @@ define void @fabs_f32(float addrspace(1)* %out, float %in) {
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
-; SI: v_and_b32
-; SI: v_and_b32
+; GCN: v_and_b32
+; GCN: v_and_b32
define void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
%fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in)
store <2 x float> %fabs, <2 x float> addrspace(1)* %out
@@ -60,20 +61,21 @@ define void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
; R600: |{{(PV|T[0-9])\.[XYZW]}}|
-; SI: v_and_b32
-; SI: v_and_b32
-; SI: v_and_b32
-; SI: v_and_b32
+; GCN: v_and_b32
+; GCN: v_and_b32
+; GCN: v_and_b32
+; GCN: v_and_b32
define void @fabs_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
%fabs = call <4 x float> @llvm.fabs.v4f32(<4 x float> %in)
store <4 x float> %fabs, <4 x float> addrspace(1)* %out
ret void
}
-; SI-LABEL: {{^}}fabs_fn_fold:
+; GCN-LABEL: {{^}}fabs_fn_fold:
; SI: s_load_dword [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
-; SI-NOT: and
-; SI: v_mul_f32_e64 v{{[0-9]+}}, |[[ABS_VALUE]]|, v{{[0-9]+}}
+; VI: s_load_dword [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c
+; GCN-NOT: and
+; GCN: v_mul_f32_e64 v{{[0-9]+}}, |[[ABS_VALUE]]|, v{{[0-9]+}}
define void @fabs_fn_fold(float addrspace(1)* %out, float %in0, float %in1) {
%fabs = call float @fabs(float %in0)
%fmul = fmul float %fabs, %in1
@@ -81,10 +83,11 @@ define void @fabs_fn_fold(float addrspace(1)* %out, float %in0, float %in1) {
ret void
}
-; SI-LABEL: {{^}}fabs_fold:
+; GCN-LABEL: {{^}}fabs_fold:
; SI: s_load_dword [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
-; SI-NOT: and
-; SI: v_mul_f32_e64 v{{[0-9]+}}, |[[ABS_VALUE]]|, v{{[0-9]+}}
+; VI: s_load_dword [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c
+; GCN-NOT: and
+; GCN: v_mul_f32_e64 v{{[0-9]+}}, |[[ABS_VALUE]]|, v{{[0-9]+}}
define void @fabs_fold(float addrspace(1)* %out, float %in0, float %in1) {
%fabs = call float @llvm.fabs.f32(float %in0)
%fmul = fmul float %fabs, %in1
diff --git a/test/CodeGen/R600/fadd.ll b/test/CodeGen/R600/fadd.ll
index 774dd0b..365af9b 100644
--- a/test/CodeGen/R600/fadd.ll
+++ b/test/CodeGen/R600/fadd.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
; FUNC-LABEL: {{^}}fadd_f32:
; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W
diff --git a/test/CodeGen/R600/fadd64.ll b/test/CodeGen/R600/fadd64.ll
index 3ca8500..f1f6fef 100644
--- a/test/CodeGen/R600/fadd64.ll
+++ b/test/CodeGen/R600/fadd64.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tahiti -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
; CHECK: {{^}}fadd_f64:
; CHECK: v_add_f64 {{v[[0-9]+:[0-9]+]}}, {{v[[0-9]+:[0-9]+]}}, {{v[[0-9]+:[0-9]+]}}
diff --git a/test/CodeGen/R600/fceil.ll b/test/CodeGen/R600/fceil.ll
index 56dc796..f23e891 100644
--- a/test/CodeGen/R600/fceil.ll
+++ b/test/CodeGen/R600/fceil.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare float @llvm.ceil.f32(float) nounwind readnone
diff --git a/test/CodeGen/R600/fceil64.ll b/test/CodeGen/R600/fceil64.ll
index 029f41d..e3244fa 100644
--- a/test/CodeGen/R600/fceil64.ll
+++ b/test/CodeGen/R600/fceil64.ll
@@ -1,5 +1,6 @@
-; RUN: llc -march=r600 -mcpu=bonaire < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
declare double @llvm.ceil.f64(double) nounwind readnone
declare <2 x double> @llvm.ceil.v2f64(<2 x double>) nounwind readnone
@@ -11,23 +12,24 @@ declare <16 x double> @llvm.ceil.v16f64(<16 x double>) nounwind readnone
; FUNC-LABEL: {{^}}fceil_f64:
; CI: v_ceil_f64_e32
; SI: s_bfe_u32 [[SEXP:s[0-9]+]], {{s[0-9]+}}, 0xb0014
+; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
; SI: s_add_i32 s{{[0-9]+}}, [[SEXP]], 0xfffffc01
; SI: s_lshr_b64
; SI: s_not_b64
; SI: s_and_b64
-; SI-DAG: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
-; SI-DAG: cmp_lt_i32
+; SI: cmp_lt_i32
; SI: cndmask_b32
; SI: cndmask_b32
; SI: cmp_gt_i32
; SI: cndmask_b32
; SI: cndmask_b32
-; SI: cmp_gt_f64
-; SI: cndmask_b32
-; SI: cmp_ne_i32
-; SI: cndmask_b32
-; SI: cndmask_b32
+; SI-DAG: v_cmp_gt_f64
+; SI-DAG: v_cmp_lg_f64
+; SI: s_and_b64
+; SI: v_cndmask_b32
+; SI: v_cndmask_b32
; SI: v_add_f64
+; SI: s_endpgm
define void @fceil_f64(double addrspace(1)* %out, double %x) {
%y = call double @llvm.ceil.f64(double %x) nounwind readnone
store double %y, double addrspace(1)* %out
diff --git a/test/CodeGen/R600/fcmp64.ll b/test/CodeGen/R600/fcmp64.ll
index dc24443..9dc8b50 100644
--- a/test/CodeGen/R600/fcmp64.ll
+++ b/test/CodeGen/R600/fcmp64.ll
@@ -1,7 +1,8 @@
-; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tahiti -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
; CHECK-LABEL: {{^}}flt_f64:
-; CHECK: v_cmp_lt_f64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
+; CHECK: v_cmp_nge_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define void @flt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double addrspace(1)* %in1
@@ -13,7 +14,7 @@ define void @flt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
}
; CHECK-LABEL: {{^}}fle_f64:
-; CHECK: v_cmp_le_f64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
+; CHECK: v_cmp_ngt_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define void @fle_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double addrspace(1)* %in1
@@ -25,7 +26,7 @@ define void @fle_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
}
; CHECK-LABEL: {{^}}fgt_f64:
-; CHECK: v_cmp_gt_f64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
+; CHECK: v_cmp_nle_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define void @fgt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double addrspace(1)* %in1
@@ -37,7 +38,7 @@ define void @fgt_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
}
; CHECK-LABEL: {{^}}fge_f64:
-; CHECK: v_cmp_ge_f64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
+; CHECK: v_cmp_nlt_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define void @fge_f64(i32 addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double addrspace(1)* %in1
@@ -61,7 +62,7 @@ define void @fne_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
}
; CHECK-LABEL: {{^}}feq_f64:
-; CHECK: v_cmp_eq_f64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
+; CHECK: v_cmp_nlg_f64_e32 vcc, {{v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
define void @feq_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
%r0 = load double addrspace(1)* %in1
diff --git a/test/CodeGen/R600/fconst64.ll b/test/CodeGen/R600/fconst64.ll
index 097c89f..28e0c90 100644
--- a/test/CodeGen/R600/fconst64.ll
+++ b/test/CodeGen/R600/fconst64.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tahiti -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
; CHECK: {{^}}fconst_f64:
; CHECK-DAG: s_mov_b32 {{s[0-9]+}}, 0x40140000
diff --git a/test/CodeGen/R600/fcopysign.f32.ll b/test/CodeGen/R600/fcopysign.f32.ll
index 897830e..b719d5a 100644
--- a/test/CodeGen/R600/fcopysign.f32.ll
+++ b/test/CodeGen/R600/fcopysign.f32.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
@@ -10,12 +11,14 @@ declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) nounwind read
; FUNC-LABEL: {{^}}test_copysign_f32:
; SI: s_load_dword [[SMAG:s[0-9]+]], {{.*}} 0xb
; SI: s_load_dword [[SSIGN:s[0-9]+]], {{.*}} 0xc
-; SI-DAG: v_mov_b32_e32 [[VSIGN:v[0-9]+]], [[SSIGN]]
-; SI-DAG: v_mov_b32_e32 [[VMAG:v[0-9]+]], [[SMAG]]
-; SI-DAG: s_mov_b32 [[SCONST:s[0-9]+]], 0x7fffffff
-; SI: v_bfi_b32 [[RESULT:v[0-9]+]], [[SCONST]], [[VMAG]], [[VSIGN]]
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; VI: s_load_dword [[SMAG:s[0-9]+]], {{.*}} 0x2c
+; VI: s_load_dword [[SSIGN:s[0-9]+]], {{.*}} 0x30
+; GCN-DAG: v_mov_b32_e32 [[VSIGN:v[0-9]+]], [[SSIGN]]
+; GCN-DAG: v_mov_b32_e32 [[VMAG:v[0-9]+]], [[SMAG]]
+; GCN-DAG: s_mov_b32 [[SCONST:s[0-9]+]], 0x7fffffff
+; GCN: v_bfi_b32 [[RESULT:v[0-9]+]], [[SCONST]], [[VMAG]], [[VSIGN]]
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
; EG: BFI_INT
define void @test_copysign_f32(float addrspace(1)* %out, float %mag, float %sign) nounwind {
@@ -25,7 +28,7 @@ define void @test_copysign_f32(float addrspace(1)* %out, float %mag, float %sign
}
; FUNC-LABEL: {{^}}test_copysign_v2f32:
-; SI: s_endpgm
+; GCN: s_endpgm
; EG: BFI_INT
; EG: BFI_INT
@@ -36,7 +39,7 @@ define void @test_copysign_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %ma
}
; FUNC-LABEL: {{^}}test_copysign_v4f32:
-; SI: s_endpgm
+; GCN: s_endpgm
; EG: BFI_INT
; EG: BFI_INT
diff --git a/test/CodeGen/R600/fcopysign.f64.ll b/test/CodeGen/R600/fcopysign.f64.ll
index 90f0ce3..3d8c559 100644
--- a/test/CodeGen/R600/fcopysign.f64.ll
+++ b/test/CodeGen/R600/fcopysign.f64.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
declare double @llvm.copysign.f64(double, double) nounwind readnone
declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) nounwind readnone
@@ -7,13 +8,15 @@ declare <4 x double> @llvm.copysign.v4f64(<4 x double>, <4 x double>) nounwind r
; FUNC-LABEL: {{^}}test_copysign_f64:
; SI-DAG: s_load_dwordx2 s{{\[}}[[SMAG_LO:[0-9]+]]:[[SMAG_HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: s_load_dwordx2 s{{\[}}[[SSIGN_LO:[0-9]+]]:[[SSIGN_HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
-; SI-DAG: v_mov_b32_e32 v[[VSIGN_HI:[0-9]+]], s[[SSIGN_HI]]
-; SI-DAG: v_mov_b32_e32 v[[VMAG_HI:[0-9]+]], s[[SMAG_HI]]
-; SI-DAG: s_mov_b32 [[SCONST:s[0-9]+]], 0x7fffffff
-; SI: v_bfi_b32 v[[VRESULT_HI:[0-9]+]], [[SCONST]], v[[VMAG_HI]], v[[VSIGN_HI]]
-; SI: v_mov_b32_e32 v[[VMAG_LO:[0-9]+]], s[[SMAG_LO]]
-; SI: buffer_store_dwordx2 v{{\[}}[[VMAG_LO]]:[[VRESULT_HI]]{{\]}}
-; SI: s_endpgm
+; VI-DAG: s_load_dwordx2 s{{\[}}[[SMAG_LO:[0-9]+]]:[[SMAG_HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI-DAG: s_load_dwordx2 s{{\[}}[[SSIGN_LO:[0-9]+]]:[[SSIGN_HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34
+; GCN-DAG: v_mov_b32_e32 v[[VSIGN_HI:[0-9]+]], s[[SSIGN_HI]]
+; GCN-DAG: v_mov_b32_e32 v[[VMAG_HI:[0-9]+]], s[[SMAG_HI]]
+; GCN-DAG: s_mov_b32 [[SCONST:s[0-9]+]], 0x7fffffff
+; GCN: v_bfi_b32 v[[VRESULT_HI:[0-9]+]], [[SCONST]], v[[VMAG_HI]], v[[VSIGN_HI]]
+; GCN: v_mov_b32_e32 v[[VMAG_LO:[0-9]+]], s[[SMAG_LO]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[VMAG_LO]]:[[VRESULT_HI]]{{\]}}
+; GCN: s_endpgm
define void @test_copysign_f64(double addrspace(1)* %out, double %mag, double %sign) nounwind {
%result = call double @llvm.copysign.f64(double %mag, double %sign)
store double %result, double addrspace(1)* %out, align 8
@@ -21,7 +24,7 @@ define void @test_copysign_f64(double addrspace(1)* %out, double %mag, double %s
}
; FUNC-LABEL: {{^}}test_copysign_v2f64:
-; SI: s_endpgm
+; GCN: s_endpgm
define void @test_copysign_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %mag, <2 x double> %sign) nounwind {
%result = call <2 x double> @llvm.copysign.v2f64(<2 x double> %mag, <2 x double> %sign)
store <2 x double> %result, <2 x double> addrspace(1)* %out, align 8
@@ -29,7 +32,7 @@ define void @test_copysign_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %
}
; FUNC-LABEL: {{^}}test_copysign_v4f64:
-; SI: s_endpgm
+; GCN: s_endpgm
define void @test_copysign_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %mag, <4 x double> %sign) nounwind {
%result = call <4 x double> @llvm.copysign.v4f64(<4 x double> %mag, <4 x double> %sign)
store <4 x double> %result, <4 x double> addrspace(1)* %out, align 8
diff --git a/test/CodeGen/R600/fdiv.f64.ll b/test/CodeGen/R600/fdiv.f64.ll
new file mode 100644
index 0000000..6367f32
--- /dev/null
+++ b/test/CodeGen/R600/fdiv.f64.ll
@@ -0,0 +1,96 @@
+; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=COMMON %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=COMMON %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=COMMON %s
+
+
+; COMMON-LABEL: {{^}}fdiv_f64:
+; COMMON-DAG: buffer_load_dwordx2 [[NUM:v\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0
+; COMMON-DAG: buffer_load_dwordx2 [[DEN:v\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0 offset:8
+; CI-DAG: v_div_scale_f64 [[SCALE0:v\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, [[DEN]], [[DEN]], [[NUM]]
+; CI-DAG: v_div_scale_f64 [[SCALE1:v\[[0-9]+:[0-9]+\]]], vcc, [[NUM]], [[DEN]], [[NUM]]
+
+; Check for div_scale bug workaround on SI
+; SI-DAG: v_div_scale_f64 [[SCALE0:v\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, [[DEN]], [[DEN]], [[NUM]]
+; SI-DAG: v_div_scale_f64 [[SCALE1:v\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, [[NUM]], [[DEN]], [[NUM]]
+
+; COMMON-DAG: v_rcp_f64_e32 [[RCP_SCALE0:v\[[0-9]+:[0-9]+\]]], [[SCALE0]]
+
+; SI-DAG: v_cmp_eq_i32_e32 vcc, {{v[0-9]+}}, {{v[0-9]+}}
+; SI-DAG: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], {{v[0-9]+}}, {{v[0-9]+}}
+; SI-DAG: s_xor_b64 vcc, [[CMP0]], vcc
+
+; COMMON-DAG: v_fma_f64 [[FMA0:v\[[0-9]+:[0-9]+\]]], -[[SCALE0]], [[RCP_SCALE0]], 1.0
+; COMMON-DAG: v_fma_f64 [[FMA1:v\[[0-9]+:[0-9]+\]]], [[RCP_SCALE0]], [[FMA0]], [[RCP_SCALE0]]
+; COMMON-DAG: v_fma_f64 [[FMA2:v\[[0-9]+:[0-9]+\]]], -[[SCALE0]], [[FMA1]], 1.0
+; COMMON-DAG: v_fma_f64 [[FMA3:v\[[0-9]+:[0-9]+\]]], [[FMA1]], [[FMA2]], [[FMA1]]
+; COMMON-DAG: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], [[SCALE1]], [[FMA3]]
+; COMMON-DAG: v_fma_f64 [[FMA4:v\[[0-9]+:[0-9]+\]]], -[[SCALE0]], [[MUL]], [[SCALE1]]
+; COMMON: v_div_fmas_f64 [[FMAS:v\[[0-9]+:[0-9]+\]]], [[FMA4]], [[FMA3]], [[MUL]]
+; COMMON: v_div_fixup_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[FMAS]], [[DEN]], [[NUM]]
+; COMMON: buffer_store_dwordx2 [[RESULT]]
+; COMMON: s_endpgm
+define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %in) nounwind {
+ %gep.1 = getelementptr double addrspace(1)* %in, i32 1
+ %num = load double addrspace(1)* %in
+ %den = load double addrspace(1)* %gep.1
+ %result = fdiv double %num, %den
+ store double %result, double addrspace(1)* %out
+ ret void
+}
+
+; COMMON-LABEL: {{^}}fdiv_f64_s_v:
+define void @fdiv_f64_s_v(double addrspace(1)* %out, double addrspace(1)* %in, double %num) nounwind {
+ %den = load double addrspace(1)* %in
+ %result = fdiv double %num, %den
+ store double %result, double addrspace(1)* %out
+ ret void
+}
+
+; COMMON-LABEL: {{^}}fdiv_f64_v_s:
+define void @fdiv_f64_v_s(double addrspace(1)* %out, double addrspace(1)* %in, double %den) nounwind {
+ %num = load double addrspace(1)* %in
+ %result = fdiv double %num, %den
+ store double %result, double addrspace(1)* %out
+ ret void
+}
+
+; COMMON-LABEL: {{^}}fdiv_f64_s_s:
+define void @fdiv_f64_s_s(double addrspace(1)* %out, double %num, double %den) nounwind {
+ %result = fdiv double %num, %den
+ store double %result, double addrspace(1)* %out
+ ret void
+}
+
+; COMMON-LABEL: {{^}}v_fdiv_v2f64:
+define void @v_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in) nounwind {
+ %gep.1 = getelementptr <2 x double> addrspace(1)* %in, i32 1
+ %num = load <2 x double> addrspace(1)* %in
+ %den = load <2 x double> addrspace(1)* %gep.1
+ %result = fdiv <2 x double> %num, %den
+ store <2 x double> %result, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; COMMON-LABEL: {{^}}s_fdiv_v2f64:
+define void @s_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %num, <2 x double> %den) {
+ %result = fdiv <2 x double> %num, %den
+ store <2 x double> %result, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; COMMON-LABEL: {{^}}v_fdiv_v4f64:
+define void @v_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) nounwind {
+ %gep.1 = getelementptr <4 x double> addrspace(1)* %in, i32 1
+ %num = load <4 x double> addrspace(1)* %in
+ %den = load <4 x double> addrspace(1)* %gep.1
+ %result = fdiv <4 x double> %num, %den
+ store <4 x double> %result, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+; COMMON-LABEL: {{^}}s_fdiv_v4f64:
+define void @s_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %num, <4 x double> %den) {
+ %result = fdiv <4 x double> %num, %den
+ store <4 x double> %result, <4 x double> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fdiv.ll b/test/CodeGen/R600/fdiv.ll
index 5321fdb..603287f 100644
--- a/test/CodeGen/R600/fdiv.ll
+++ b/test/CodeGen/R600/fdiv.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 %s
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; These tests check that fdiv is expanded correctly and also test that the
; scheduler is scheduling the RECIP_IEEE and MUL_IEEE instructions in separate
diff --git a/test/CodeGen/R600/fdiv64.ll b/test/CodeGen/R600/fdiv64.ll
deleted file mode 100644
index d424898..0000000
--- a/test/CodeGen/R600/fdiv64.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
-
-; CHECK: {{^}}fdiv_f64:
-; CHECK: v_rcp_f64_e32 {{v\[[0-9]+:[0-9]+\]}}
-; CHECK: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}
-
-define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
- double addrspace(1)* %in2) {
- %r0 = load double addrspace(1)* %in1
- %r1 = load double addrspace(1)* %in2
- %r2 = fdiv double %r0, %r1
- store double %r2, double addrspace(1)* %out
- ret void
-}
diff --git a/test/CodeGen/R600/ffloor.f64.ll b/test/CodeGen/R600/ffloor.f64.ll
new file mode 100644
index 0000000..745ad3b
--- /dev/null
+++ b/test/CodeGen/R600/ffloor.f64.ll
@@ -0,0 +1,106 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+
+declare double @llvm.floor.f64(double) nounwind readnone
+declare <2 x double> @llvm.floor.v2f64(<2 x double>) nounwind readnone
+declare <3 x double> @llvm.floor.v3f64(<3 x double>) nounwind readnone
+declare <4 x double> @llvm.floor.v4f64(<4 x double>) nounwind readnone
+declare <8 x double> @llvm.floor.v8f64(<8 x double>) nounwind readnone
+declare <16 x double> @llvm.floor.v16f64(<16 x double>) nounwind readnone
+
+; FUNC-LABEL: {{^}}ffloor_f64:
+; CI: v_floor_f64_e32
+
+; SI: s_bfe_u32 [[SEXP:s[0-9]+]], {{s[0-9]+}}, 0xb0014
+; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
+; SI: s_add_i32 s{{[0-9]+}}, [[SEXP]], 0xfffffc01
+; SI: s_lshr_b64
+; SI: s_not_b64
+; SI: s_and_b64
+; SI: cmp_lt_i32
+; SI: cndmask_b32
+; SI: cndmask_b32
+; SI: cmp_gt_i32
+; SI: cndmask_b32
+; SI: cndmask_b32
+; SI-DAG: v_cmp_lt_f64
+; SI-DAG: v_cmp_lg_f64
+; SI-DAG: s_and_b64
+; SI-DAG: v_cndmask_b32
+; SI-DAG: v_cndmask_b32
+; SI: v_add_f64
+; SI: s_endpgm
+define void @ffloor_f64(double addrspace(1)* %out, double %x) {
+ %y = call double @llvm.floor.f64(double %x) nounwind readnone
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}ffloor_v2f64:
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+define void @ffloor_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
+ %y = call <2 x double> @llvm.floor.v2f64(<2 x double> %x) nounwind readnone
+ store <2 x double> %y, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FIXME-FUNC-LABEL: {{^}}ffloor_v3f64:
+; FIXME-CI: v_floor_f64_e32
+; FIXME-CI: v_floor_f64_e32
+; FIXME-CI: v_floor_f64_e32
+; define void @ffloor_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
+; %y = call <3 x double> @llvm.floor.v3f64(<3 x double> %x) nounwind readnone
+; store <3 x double> %y, <3 x double> addrspace(1)* %out
+; ret void
+; }
+
+; FUNC-LABEL: {{^}}ffloor_v4f64:
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+define void @ffloor_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
+ %y = call <4 x double> @llvm.floor.v4f64(<4 x double> %x) nounwind readnone
+ store <4 x double> %y, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}ffloor_v8f64:
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+define void @ffloor_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
+ %y = call <8 x double> @llvm.floor.v8f64(<8 x double> %x) nounwind readnone
+ store <8 x double> %y, <8 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}ffloor_v16f64:
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+; CI: v_floor_f64_e32
+define void @ffloor_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
+ %y = call <16 x double> @llvm.floor.v16f64(<16 x double> %x) nounwind readnone
+ store <16 x double> %y, <16 x double> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/ffloor.ll b/test/CodeGen/R600/ffloor.ll
index 166f705..61c46ac 100644
--- a/test/CodeGen/R600/ffloor.ll
+++ b/test/CodeGen/R600/ffloor.ll
@@ -1,104 +1,49 @@
-; RUN: llc -march=r600 -mcpu=bonaire < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-
-declare double @llvm.floor.f64(double) nounwind readnone
-declare <2 x double> @llvm.floor.v2f64(<2 x double>) nounwind readnone
-declare <3 x double> @llvm.floor.v3f64(<3 x double>) nounwind readnone
-declare <4 x double> @llvm.floor.v4f64(<4 x double>) nounwind readnone
-declare <8 x double> @llvm.floor.v8f64(<8 x double>) nounwind readnone
-declare <16 x double> @llvm.floor.v16f64(<16 x double>) nounwind readnone
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+
+; FUNC-LABEL: {{^}}floor_f32:
+; SI: v_floor_f32_e32
+; R600: FLOOR
+define void @floor_f32(float addrspace(1)* %out, float %in) {
+ %tmp = call float @llvm.floor.f32(float %in) #0
+ store float %tmp, float addrspace(1)* %out
+ ret void
+}
-; FUNC-LABEL: {{^}}ffloor_f64:
-; CI: v_floor_f64_e32
+; FUNC-LABEL: {{^}}floor_v2f32:
+; SI: v_floor_f32_e32
+; SI: v_floor_f32_e32
-; SI: s_bfe_u32 [[SEXP:s[0-9]+]], {{s[0-9]+}}, 0xb0014
-; SI: s_add_i32 s{{[0-9]+}}, [[SEXP]], 0xfffffc01
-; SI: s_lshr_b64
-; SI: s_not_b64
-; SI: s_and_b64
-; SI-DAG: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
-; SI-DAG: cmp_lt_i32
-; SI: cndmask_b32
-; SI: cndmask_b32
-; SI: cmp_gt_i32
-; SI: cndmask_b32
-; SI: cndmask_b32
-; SI: cmp_lt_f64
-; SI: cndmask_b32
-; SI: cmp_ne_i32
-; SI: cndmask_b32
-; SI: cndmask_b32
-; SI: v_add_f64
-define void @ffloor_f64(double addrspace(1)* %out, double %x) {
- %y = call double @llvm.floor.f64(double %x) nounwind readnone
- store double %y, double addrspace(1)* %out
+define void @floor_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
+ %tmp = call <2 x float> @llvm.floor.v2f32(<2 x float> %in) #0
+ store <2 x float> %tmp, <2 x float> addrspace(1)* %out
ret void
}
-; FUNC-LABEL: {{^}}ffloor_v2f64:
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-define void @ffloor_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %x) {
- %y = call <2 x double> @llvm.floor.v2f64(<2 x double> %x) nounwind readnone
- store <2 x double> %y, <2 x double> addrspace(1)* %out
+; FUNC-LABEL: {{^}}floor_v4f32:
+; SI: v_floor_f32_e32
+; SI: v_floor_f32_e32
+; SI: v_floor_f32_e32
+; SI: v_floor_f32_e32
+
+; R600: FLOOR
+; R600: FLOOR
+; R600: FLOOR
+; R600: FLOOR
+define void @floor_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
+ %tmp = call <4 x float> @llvm.floor.v4f32(<4 x float> %in) #0
+ store <4 x float> %tmp, <4 x float> addrspace(1)* %out
ret void
}
-; FIXME-FUNC-LABEL: {{^}}ffloor_v3f64:
-; FIXME-CI: v_floor_f64_e32
-; FIXME-CI: v_floor_f64_e32
-; FIXME-CI: v_floor_f64_e32
-; define void @ffloor_v3f64(<3 x double> addrspace(1)* %out, <3 x double> %x) {
-; %y = call <3 x double> @llvm.floor.v3f64(<3 x double> %x) nounwind readnone
-; store <3 x double> %y, <3 x double> addrspace(1)* %out
-; ret void
-; }
+; Function Attrs: nounwind readonly
+declare float @llvm.floor.f32(float) #0
-; FUNC-LABEL: {{^}}ffloor_v4f64:
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-define void @ffloor_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %x) {
- %y = call <4 x double> @llvm.floor.v4f64(<4 x double> %x) nounwind readnone
- store <4 x double> %y, <4 x double> addrspace(1)* %out
- ret void
-}
+; Function Attrs: nounwind readonly
+declare <2 x float> @llvm.floor.v2f32(<2 x float>) #0
-; FUNC-LABEL: {{^}}ffloor_v8f64:
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-define void @ffloor_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %x) {
- %y = call <8 x double> @llvm.floor.v8f64(<8 x double> %x) nounwind readnone
- store <8 x double> %y, <8 x double> addrspace(1)* %out
- ret void
-}
+; Function Attrs: nounwind readonly
+declare <4 x float> @llvm.floor.v4f32(<4 x float>) #0
-; FUNC-LABEL: {{^}}ffloor_v16f64:
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-; CI: v_floor_f64_e32
-define void @ffloor_v16f64(<16 x double> addrspace(1)* %out, <16 x double> %x) {
- %y = call <16 x double> @llvm.floor.v16f64(<16 x double> %x) nounwind readnone
- store <16 x double> %y, <16 x double> addrspace(1)* %out
- ret void
-}
+attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/R600/flat-address-space.ll b/test/CodeGen/R600/flat-address-space.ll
index fc5af7c..2e98bf5 100644
--- a/test/CodeGen/R600/flat-address-space.ll
+++ b/test/CodeGen/R600/flat-address-space.ll
@@ -1,5 +1,7 @@
-; RUN: llc -O0 -march=r600 -mcpu=bonaire -mattr=-promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-NO-PROMOTE %s
-; RUN: llc -O0 -march=r600 -mcpu=bonaire -mattr=+promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-PROMOTE %s
+; RUN: llc -O0 -march=amdgcn -mcpu=bonaire -mattr=-promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-NO-PROMOTE %s
+; RUN: llc -O0 -march=amdgcn -mcpu=bonaire -mattr=+promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-PROMOTE %s
+; RUN: llc -O0 -march=amdgcn -mcpu=tonga -mattr=-promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-NO-PROMOTE %s
+; RUN: llc -O0 -march=amdgcn -mcpu=tonga -mattr=+promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-PROMOTE %s
; Disable optimizations in case there are optimizations added that
; specialize away generic pointer accesses.
diff --git a/test/CodeGen/R600/floor.ll b/test/CodeGen/R600/floor.ll
index 67e86c4..c6bfb85 100644
--- a/test/CodeGen/R600/floor.ll
+++ b/test/CodeGen/R600/floor.ll
@@ -1,7 +1,6 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-
-;CHECK: FLOOR * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s
+; CHECK: FLOOR * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test(<4 x float> inreg %reg0) #0 {
%r0 = extractelement <4 x float> %reg0, i32 0
%r1 = call float @floor(float %r0)
@@ -13,4 +12,4 @@ define void @test(<4 x float> inreg %reg0) #0 {
declare float @floor(float) readonly
declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
-attributes #0 = { "ShaderType"="0" } \ No newline at end of file
+attributes #0 = { "ShaderType"="0" }
diff --git a/test/CodeGen/R600/fma-combine.ll b/test/CodeGen/R600/fma-combine.ll
new file mode 100644
index 0000000..9aac90c
--- /dev/null
+++ b/test/CodeGen/R600/fma-combine.ll
@@ -0,0 +1,368 @@
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -fp-contract=fast < %s | FileCheck -check-prefix=SI-FASTFMAF -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs -fp-contract=fast < %s | FileCheck -check-prefix=SI-SLOWFMAF -check-prefix=SI -check-prefix=FUNC %s
+
+declare i32 @llvm.r600.read.tidig.x() #0
+declare double @llvm.fabs.f64(double) #0
+declare double @llvm.fma.f64(double, double, double) #0
+declare float @llvm.fma.f32(float, float, float) #0
+
+; (fadd (fmul x, y), z) -> (fma x, y, z)
+; FUNC-LABEL: {{^}}combine_to_fma_f64_0:
+; SI-DAG: buffer_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+; SI: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[C]]
+; SI: buffer_store_dwordx2 [[RESULT]]
+define void @combine_to_fma_f64_0(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+
+ %a = load double addrspace(1)* %gep.0
+ %b = load double addrspace(1)* %gep.1
+ %c = load double addrspace(1)* %gep.2
+
+ %mul = fmul double %a, %b
+ %fma = fadd double %mul, %c
+ store double %fma, double addrspace(1)* %gep.out
+ ret void
+}
+
+; (fadd (fmul x, y), z) -> (fma x, y, z)
+; FUNC-LABEL: {{^}}combine_to_fma_f64_0_2use:
+; SI-DAG: buffer_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+; SI-DAG: buffer_load_dwordx2 [[D:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:24{{$}}
+; SI-DAG: v_fma_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[C]]
+; SI-DAG: v_fma_f64 [[RESULT1:v\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[D]]
+; SI-DAG: buffer_store_dwordx2 [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_store_dwordx2 [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI: s_endpgm
+define void @combine_to_fma_f64_0_2use(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr double addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr double addrspace(1)* %gep.out.0, i32 1
+
+ %a = load double addrspace(1)* %gep.0
+ %b = load double addrspace(1)* %gep.1
+ %c = load double addrspace(1)* %gep.2
+ %d = load double addrspace(1)* %gep.3
+
+ %mul = fmul double %a, %b
+ %fma0 = fadd double %mul, %c
+ %fma1 = fadd double %mul, %d
+ store double %fma0, double addrspace(1)* %gep.out.0
+ store double %fma1, double addrspace(1)* %gep.out.1
+ ret void
+}
+
+; (fadd x, (fmul y, z)) -> (fma y, z, x)
+; FUNC-LABEL: {{^}}combine_to_fma_f64_1:
+; SI-DAG: buffer_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+; SI: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[C]]
+; SI: buffer_store_dwordx2 [[RESULT]]
+define void @combine_to_fma_f64_1(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+
+ %a = load double addrspace(1)* %gep.0
+ %b = load double addrspace(1)* %gep.1
+ %c = load double addrspace(1)* %gep.2
+
+ %mul = fmul double %a, %b
+ %fma = fadd double %c, %mul
+ store double %fma, double addrspace(1)* %gep.out
+ ret void
+}
+
+; (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
+; FUNC-LABEL: {{^}}combine_to_fma_fsub_0_f64:
+; SI-DAG: buffer_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+; SI: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[A]], [[B]], -[[C]]
+; SI: buffer_store_dwordx2 [[RESULT]]
+define void @combine_to_fma_fsub_0_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+
+ %a = load double addrspace(1)* %gep.0
+ %b = load double addrspace(1)* %gep.1
+ %c = load double addrspace(1)* %gep.2
+
+ %mul = fmul double %a, %b
+ %fma = fsub double %mul, %c
+ store double %fma, double addrspace(1)* %gep.out
+ ret void
+}
+
+; (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
+; FUNC-LABEL: {{^}}combine_to_fma_fsub_f64_0_2use:
+; SI-DAG: buffer_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+; SI-DAG: buffer_load_dwordx2 [[D:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:24{{$}}
+; SI-DAG: v_fma_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], [[A]], [[B]], -[[C]]
+; SI-DAG: v_fma_f64 [[RESULT1:v\[[0-9]+:[0-9]+\]]], [[A]], [[B]], -[[D]]
+; SI-DAG: buffer_store_dwordx2 [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_store_dwordx2 [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI: s_endpgm
+define void @combine_to_fma_fsub_f64_0_2use(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr double addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr double addrspace(1)* %gep.out.0, i32 1
+
+ %a = load double addrspace(1)* %gep.0
+ %b = load double addrspace(1)* %gep.1
+ %c = load double addrspace(1)* %gep.2
+ %d = load double addrspace(1)* %gep.3
+
+ %mul = fmul double %a, %b
+ %fma0 = fsub double %mul, %c
+ %fma1 = fsub double %mul, %d
+ store double %fma0, double addrspace(1)* %gep.out.0
+ store double %fma1, double addrspace(1)* %gep.out.1
+ ret void
+}
+
+; (fsub x, (fmul y, z)) -> (fma (fneg y), z, x)
+; FUNC-LABEL: {{^}}combine_to_fma_fsub_1_f64:
+; SI-DAG: buffer_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+; SI: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], -[[A]], [[B]], [[C]]
+; SI: buffer_store_dwordx2 [[RESULT]]
+define void @combine_to_fma_fsub_1_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+
+ %a = load double addrspace(1)* %gep.0
+ %b = load double addrspace(1)* %gep.1
+ %c = load double addrspace(1)* %gep.2
+
+ %mul = fmul double %a, %b
+ %fma = fsub double %c, %mul
+ store double %fma, double addrspace(1)* %gep.out
+ ret void
+}
+
+; (fsub x, (fmul y, z)) -> (fma (fneg y), z, x)
+; FUNC-LABEL: {{^}}combine_to_fma_fsub_1_f64_2use:
+; SI-DAG: buffer_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+; SI-DAG: buffer_load_dwordx2 [[D:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:24{{$}}
+; SI-DAG: v_fma_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], -[[A]], [[B]], [[C]]
+; SI-DAG: v_fma_f64 [[RESULT1:v\[[0-9]+:[0-9]+\]]], -[[A]], [[B]], [[D]]
+; SI-DAG: buffer_store_dwordx2 [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_store_dwordx2 [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI: s_endpgm
+define void @combine_to_fma_fsub_1_f64_2use(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr double addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr double addrspace(1)* %gep.out.0, i32 1
+
+ %a = load double addrspace(1)* %gep.0
+ %b = load double addrspace(1)* %gep.1
+ %c = load double addrspace(1)* %gep.2
+ %d = load double addrspace(1)* %gep.3
+
+ %mul = fmul double %a, %b
+ %fma0 = fsub double %c, %mul
+ %fma1 = fsub double %d, %mul
+ store double %fma0, double addrspace(1)* %gep.out.0
+ store double %fma1, double addrspace(1)* %gep.out.1
+ ret void
+}
+
+; (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z))
+; FUNC-LABEL: {{^}}combine_to_fma_fsub_2_f64:
+; SI-DAG: buffer_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+; SI: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], -[[A]], [[B]], -[[C]]
+; SI: buffer_store_dwordx2 [[RESULT]]
+define void @combine_to_fma_fsub_2_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+
+ %a = load double addrspace(1)* %gep.0
+ %b = load double addrspace(1)* %gep.1
+ %c = load double addrspace(1)* %gep.2
+
+ %mul = fmul double %a, %b
+ %mul.neg = fsub double -0.0, %mul
+ %fma = fsub double %mul.neg, %c
+
+ store double %fma, double addrspace(1)* %gep.out
+ ret void
+}
+
+; (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z))
+; FUNC-LABEL: {{^}}combine_to_fma_fsub_2_f64_2uses_neg:
+; SI-DAG: buffer_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+; SI-DAG: v_fma_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], -[[A]], [[B]], -[[C]]
+; SI-DAG: v_fma_f64 [[RESULT1:v\[[0-9]+:[0-9]+\]]], -[[A]], [[B]], -[[D]]
+; SI-DAG: buffer_store_dwordx2 [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_store_dwordx2 [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI: s_endpgm
+define void @combine_to_fma_fsub_2_f64_2uses_neg(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr double addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr double addrspace(1)* %gep.out.0, i32 1
+
+ %a = load double addrspace(1)* %gep.0
+ %b = load double addrspace(1)* %gep.1
+ %c = load double addrspace(1)* %gep.2
+ %d = load double addrspace(1)* %gep.3
+
+ %mul = fmul double %a, %b
+ %mul.neg = fsub double -0.0, %mul
+ %fma0 = fsub double %mul.neg, %c
+ %fma1 = fsub double %mul.neg, %d
+
+ store double %fma0, double addrspace(1)* %gep.out.0
+ store double %fma1, double addrspace(1)* %gep.out.1
+ ret void
+}
+
+; (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z))
+; FUNC-LABEL: {{^}}combine_to_fma_fsub_2_f64_2uses_mul:
+; SI-DAG: buffer_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+; SI-DAG: v_fma_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], -[[A]], [[B]], -[[C]]
+; SI-DAG: v_fma_f64 [[RESULT1:v\[[0-9]+:[0-9]+\]]], [[A]], [[B]], -[[D]]
+; SI-DAG: buffer_store_dwordx2 [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_store_dwordx2 [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI: s_endpgm
+define void @combine_to_fma_fsub_2_f64_2uses_mul(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr double addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr double addrspace(1)* %gep.out.0, i32 1
+
+ %a = load double addrspace(1)* %gep.0
+ %b = load double addrspace(1)* %gep.1
+ %c = load double addrspace(1)* %gep.2
+ %d = load double addrspace(1)* %gep.3
+
+ %mul = fmul double %a, %b
+ %mul.neg = fsub double -0.0, %mul
+ %fma0 = fsub double %mul.neg, %c
+ %fma1 = fsub double %mul, %d
+
+ store double %fma0, double addrspace(1)* %gep.out.0
+ store double %fma1, double addrspace(1)* %gep.out.1
+ ret void
+}
+
+; fold (fsub (fma x, y, (fmul u, v)), z) -> (fma x, y (fma u, v, (fneg z)))
+
+; FUNC-LABEL: {{^}}aggressive_combine_to_fma_fsub_0_f64:
+; SI-DAG: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dwordx2 [[Y:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dwordx2 [[Z:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+; SI-DAG: buffer_load_dwordx2 [[U:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:24{{$}}
+; SI-DAG: buffer_load_dwordx2 [[V:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:32{{$}}
+; SI: v_fma_f64 [[FMA0:v\[[0-9]+:[0-9]+\]]], [[U]], [[V]], -[[Z]]
+; SI: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[X]], [[Y]], [[FMA0]]
+; SI: buffer_store_dwordx2 [[RESULT]]
+define void @aggressive_combine_to_fma_fsub_0_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
+ %gep.4 = getelementptr double addrspace(1)* %gep.0, i32 4
+ %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+
+ %x = load double addrspace(1)* %gep.0
+ %y = load double addrspace(1)* %gep.1
+ %z = load double addrspace(1)* %gep.2
+ %u = load double addrspace(1)* %gep.3
+ %v = load double addrspace(1)* %gep.4
+
+ %tmp0 = fmul double %u, %v
+ %tmp1 = call double @llvm.fma.f64(double %x, double %y, double %tmp0) #0
+ %tmp2 = fsub double %tmp1, %z
+
+ store double %tmp2, double addrspace(1)* %gep.out
+ ret void
+}
+
+; fold (fsub x, (fma y, z, (fmul u, v)))
+; -> (fma (fneg y), z, (fma (fneg u), v, x))
+
+; FUNC-LABEL: {{^}}aggressive_combine_to_fma_fsub_1_f64:
+; SI-DAG: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dwordx2 [[Y:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dwordx2 [[Z:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+; SI-DAG: buffer_load_dwordx2 [[U:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:24{{$}}
+; SI-DAG: buffer_load_dwordx2 [[V:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:32{{$}}
+; SI: v_fma_f64 [[FMA0:v\[[0-9]+:[0-9]+\]]], -[[U]], [[V]], [[X]]
+; SI: v_fma_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], -[[Y]], [[Z]], [[FMA0]]
+; SI: buffer_store_dwordx2 [[RESULT]]
+define void @aggressive_combine_to_fma_fsub_1_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
+ %gep.4 = getelementptr double addrspace(1)* %gep.0, i32 4
+ %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+
+ %x = load double addrspace(1)* %gep.0
+ %y = load double addrspace(1)* %gep.1
+ %z = load double addrspace(1)* %gep.2
+ %u = load double addrspace(1)* %gep.3
+ %v = load double addrspace(1)* %gep.4
+
+ %tmp0 = fmul double %u, %v
+ %tmp1 = call double @llvm.fma.f64(double %y, double %z, double %tmp0) #0
+ %tmp2 = fsub double %x, %tmp1
+
+ store double %tmp2, double addrspace(1)* %gep.out
+ ret void
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/R600/fma.f64.ll b/test/CodeGen/R600/fma.f64.ll
index 4b0ab76..bca312b 100644
--- a/test/CodeGen/R600/fma.f64.ll
+++ b/test/CodeGen/R600/fma.f64.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare double @llvm.fma.f64(double, double, double) nounwind readnone
declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
diff --git a/test/CodeGen/R600/fma.ll b/test/CodeGen/R600/fma.ll
index 637e799..f3861ff 100644
--- a/test/CodeGen/R600/fma.ll
+++ b/test/CodeGen/R600/fma.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare float @llvm.fma.f32(float, float, float) nounwind readnone
diff --git a/test/CodeGen/R600/fmax3.ll b/test/CodeGen/R600/fmax3.ll
index cf371b3..629c032 100644
--- a/test/CodeGen/R600/fmax3.ll
+++ b/test/CodeGen/R600/fmax3.ll
@@ -1,11 +1,12 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare float @llvm.maxnum.f32(float, float) nounwind readnone
; SI-LABEL: {{^}}test_fmax3_olt_0:
-; SI: buffer_load_dword [[REGA:v[0-9]+]]
-; SI: buffer_load_dword [[REGB:v[0-9]+]]
; SI: buffer_load_dword [[REGC:v[0-9]+]]
+; SI: buffer_load_dword [[REGB:v[0-9]+]]
+; SI: buffer_load_dword [[REGA:v[0-9]+]]
; SI: v_max3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
@@ -21,8 +22,8 @@ define void @test_fmax3_olt_0(float addrspace(1)* %out, float addrspace(1)* %apt
; Commute operand of second fmax
; SI-LABEL: {{^}}test_fmax3_olt_1:
-; SI: buffer_load_dword [[REGA:v[0-9]+]]
; SI: buffer_load_dword [[REGB:v[0-9]+]]
+; SI: buffer_load_dword [[REGA:v[0-9]+]]
; SI: buffer_load_dword [[REGC:v[0-9]+]]
; SI: v_max3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
; SI: buffer_store_dword [[RESULT]],
diff --git a/test/CodeGen/R600/fmax_legacy.f64.ll b/test/CodeGen/R600/fmax_legacy.f64.ll
new file mode 100644
index 0000000..762853d
--- /dev/null
+++ b/test/CodeGen/R600/fmax_legacy.f64.ll
@@ -0,0 +1,67 @@
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; Make sure we don't try to form FMAX_LEGACY nodes with f64
+
+declare i32 @llvm.r600.read.tidig.x() #1
+
+; FUNC-LABEL: @test_fmax_legacy_uge_f64
+define void @test_fmax_legacy_uge_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+
+ %a = load double addrspace(1)* %gep.0, align 8
+ %b = load double addrspace(1)* %gep.1, align 8
+
+ %cmp = fcmp uge double %a, %b
+ %val = select i1 %cmp, double %a, double %b
+ store double %val, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_fmax_legacy_oge_f64
+define void @test_fmax_legacy_oge_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+
+ %a = load double addrspace(1)* %gep.0, align 8
+ %b = load double addrspace(1)* %gep.1, align 8
+
+ %cmp = fcmp oge double %a, %b
+ %val = select i1 %cmp, double %a, double %b
+ store double %val, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_fmax_legacy_ugt_f64
+define void @test_fmax_legacy_ugt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+
+ %a = load double addrspace(1)* %gep.0, align 8
+ %b = load double addrspace(1)* %gep.1, align 8
+
+ %cmp = fcmp ugt double %a, %b
+ %val = select i1 %cmp, double %a, double %b
+ store double %val, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_fmax_legacy_ogt_f64
+define void @test_fmax_legacy_ogt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+
+ %a = load double addrspace(1)* %gep.0, align 8
+ %b = load double addrspace(1)* %gep.1, align 8
+
+ %cmp = fcmp ogt double %a, %b
+ %val = select i1 %cmp, double %a, double %b
+ store double %val, double addrspace(1)* %out, align 8
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/fmax_legacy.ll b/test/CodeGen/R600/fmax_legacy.ll
index e9d837b..46f0e98 100644
--- a/test/CodeGen/R600/fmax_legacy.ll
+++ b/test/CodeGen/R600/fmax_legacy.ll
@@ -1,12 +1,17 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=SI-SAFE -check-prefix=FUNC %s
+; RUN: llc -enable-no-nans-fp-math -enable-unsafe-fp-math -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI-NONAN -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; FIXME: Should replace unsafe-fp-math with no signed zeros.
+
declare i32 @llvm.r600.read.tidig.x() #1
; FUNC-LABEL: @test_fmax_legacy_uge_f32
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
-; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+
; EG: MAX
define void @test_fmax_legacy_uge_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
@@ -24,8 +29,9 @@ define void @test_fmax_legacy_uge_f32(float addrspace(1)* %out, float addrspace(
; FUNC-LABEL: @test_fmax_legacy_oge_f32
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
-; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
; EG: MAX
define void @test_fmax_legacy_oge_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
@@ -43,8 +49,9 @@ define void @test_fmax_legacy_oge_f32(float addrspace(1)* %out, float addrspace(
; FUNC-LABEL: @test_fmax_legacy_ugt_f32
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
-; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
; EG: MAX
define void @test_fmax_legacy_ugt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
@@ -62,8 +69,9 @@ define void @test_fmax_legacy_ugt_f32(float addrspace(1)* %out, float addrspace(
; FUNC-LABEL: @test_fmax_legacy_ogt_f32
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
-; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
; EG: MAX
define void @test_fmax_legacy_ogt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
@@ -79,5 +87,30 @@ define void @test_fmax_legacy_ogt_f32(float addrspace(1)* %out, float addrspace(
ret void
}
+
+; FUNC-LABEL: @test_fmax_legacy_ogt_f32_multi_use
+; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; SI-NOT: v_max_
+; SI: v_cmp_gt_f32
+; SI-NEXT: v_cndmask_b32
+; SI-NOT: v_max_
+
+; EG: MAX
+define void @test_fmax_legacy_ogt_f32_multi_use(float addrspace(1)* %out0, i1 addrspace(1)* %out1, float addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+
+ %a = load float addrspace(1)* %gep.0, align 4
+ %b = load float addrspace(1)* %gep.1, align 4
+
+ %cmp = fcmp ogt float %a, %b
+ %val = select i1 %cmp, float %a, float %b
+ store float %val, float addrspace(1)* %out0, align 4
+ store i1 %cmp, i1addrspace(1)* %out1
+ ret void
+}
+
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/fmaxnum.f64.ll b/test/CodeGen/R600/fmaxnum.f64.ll
index 51cbf4d..de563ce 100644
--- a/test/CodeGen/R600/fmaxnum.f64.ll
+++ b/test/CodeGen/R600/fmaxnum.f64.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare double @llvm.maxnum.f64(double, double) #0
declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>) #0
diff --git a/test/CodeGen/R600/fmaxnum.ll b/test/CodeGen/R600/fmaxnum.ll
index 01d30b0..c105598 100644
--- a/test/CodeGen/R600/fmaxnum.ll
+++ b/test/CodeGen/R600/fmaxnum.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare float @llvm.maxnum.f32(float, float) #0
declare <2 x float> @llvm.maxnum.v2f32(<2 x float>, <2 x float>) #0
diff --git a/test/CodeGen/R600/fmin3.ll b/test/CodeGen/R600/fmin3.ll
index 7420368..e3acb31 100644
--- a/test/CodeGen/R600/fmin3.ll
+++ b/test/CodeGen/R600/fmin3.ll
@@ -1,11 +1,13 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare float @llvm.minnum.f32(float, float) nounwind readnone
; SI-LABEL: {{^}}test_fmin3_olt_0:
-; SI: buffer_load_dword [[REGA:v[0-9]+]]
-; SI: buffer_load_dword [[REGB:v[0-9]+]]
; SI: buffer_load_dword [[REGC:v[0-9]+]]
+; SI: buffer_load_dword [[REGB:v[0-9]+]]
+; SI: buffer_load_dword [[REGA:v[0-9]+]]
; SI: v_min3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
@@ -21,8 +23,8 @@ define void @test_fmin3_olt_0(float addrspace(1)* %out, float addrspace(1)* %apt
; Commute operand of second fmin
; SI-LABEL: {{^}}test_fmin3_olt_1:
-; SI: buffer_load_dword [[REGA:v[0-9]+]]
; SI: buffer_load_dword [[REGB:v[0-9]+]]
+; SI: buffer_load_dword [[REGA:v[0-9]+]]
; SI: buffer_load_dword [[REGC:v[0-9]+]]
; SI: v_min3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
; SI: buffer_store_dword [[RESULT]],
diff --git a/test/CodeGen/R600/fmin_legacy.f64.ll b/test/CodeGen/R600/fmin_legacy.f64.ll
new file mode 100644
index 0000000..83043cd
--- /dev/null
+++ b/test/CodeGen/R600/fmin_legacy.f64.ll
@@ -0,0 +1,77 @@
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare i32 @llvm.r600.read.tidig.x() #1
+
+; FUNC-LABEL: @test_fmin_legacy_f64
+define void @test_fmin_legacy_f64(<4 x double> addrspace(1)* %out, <4 x double> inreg %reg0) #0 {
+ %r0 = extractelement <4 x double> %reg0, i32 0
+ %r1 = extractelement <4 x double> %reg0, i32 1
+ %r2 = fcmp uge double %r0, %r1
+ %r3 = select i1 %r2, double %r1, double %r0
+ %vec = insertelement <4 x double> undef, double %r3, i32 0
+ store <4 x double> %vec, <4 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: @test_fmin_legacy_ule_f64
+define void @test_fmin_legacy_ule_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+
+ %a = load double addrspace(1)* %gep.0, align 8
+ %b = load double addrspace(1)* %gep.1, align 8
+
+ %cmp = fcmp ule double %a, %b
+ %val = select i1 %cmp, double %a, double %b
+ store double %val, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_fmin_legacy_ole_f64
+define void @test_fmin_legacy_ole_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+
+ %a = load double addrspace(1)* %gep.0, align 8
+ %b = load double addrspace(1)* %gep.1, align 8
+
+ %cmp = fcmp ole double %a, %b
+ %val = select i1 %cmp, double %a, double %b
+ store double %val, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_fmin_legacy_olt_f64
+define void @test_fmin_legacy_olt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+
+ %a = load double addrspace(1)* %gep.0, align 8
+ %b = load double addrspace(1)* %gep.1, align 8
+
+ %cmp = fcmp olt double %a, %b
+ %val = select i1 %cmp, double %a, double %b
+ store double %val, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @test_fmin_legacy_ult_f64
+define void @test_fmin_legacy_ult_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+
+ %a = load double addrspace(1)* %gep.0, align 8
+ %b = load double addrspace(1)* %gep.1, align 8
+
+ %cmp = fcmp ult double %a, %b
+ %val = select i1 %cmp, double %a, double %b
+ store double %val, double addrspace(1)* %out, align 8
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/fmin_legacy.ll b/test/CodeGen/R600/fmin_legacy.ll
index 2fbdb6b..5014f6c 100644
--- a/test/CodeGen/R600/fmin_legacy.ll
+++ b/test/CodeGen/R600/fmin_legacy.ll
@@ -1,11 +1,15 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -enable-no-nans-fp-math -enable-unsafe-fp-math -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI-NONAN -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; FIXME: Should replace unsafe-fp-math with no signed zeros.
+
declare i32 @llvm.r600.read.tidig.x() #1
; FUNC-LABEL: @test_fmin_legacy_f32
; EG: MIN *
-; SI: v_min_legacy_f32_e32
+; SI-SAFE: v_min_legacy_f32_e32
+; SI-NONAN: v_min_f32_e32
define void @test_fmin_legacy_f32(<4 x float> addrspace(1)* %out, <4 x float> inreg %reg0) #0 {
%r0 = extractelement <4 x float> %reg0, i32 0
%r1 = extractelement <4 x float> %reg0, i32 1
@@ -18,8 +22,9 @@ define void @test_fmin_legacy_f32(<4 x float> addrspace(1)* %out, <4 x float> in
; FUNC-LABEL: @test_fmin_legacy_ule_f32
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
-; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
define void @test_fmin_legacy_ule_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
@@ -36,8 +41,9 @@ define void @test_fmin_legacy_ule_f32(float addrspace(1)* %out, float addrspace(
; FUNC-LABEL: @test_fmin_legacy_ole_f32
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
-; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
define void @test_fmin_legacy_ole_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
@@ -54,8 +60,9 @@ define void @test_fmin_legacy_ole_f32(float addrspace(1)* %out, float addrspace(
; FUNC-LABEL: @test_fmin_legacy_olt_f32
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
-; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
define void @test_fmin_legacy_olt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
@@ -72,8 +79,9 @@ define void @test_fmin_legacy_olt_f32(float addrspace(1)* %out, float addrspace(
; FUNC-LABEL: @test_fmin_legacy_ult_f32
; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
-; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
define void @test_fmin_legacy_ult_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
@@ -88,5 +96,28 @@ define void @test_fmin_legacy_ult_f32(float addrspace(1)* %out, float addrspace(
ret void
}
+; FUNC-LABEL: @test_fmin_legacy_ole_f32_multi_use
+; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; SI-NOT: v_min
+; SI: v_cmp_le_f32
+; SI-NEXT: v_cndmask_b32
+; SI-NOT: v_min
+; SI: s_endpgm
+define void @test_fmin_legacy_ole_f32_multi_use(float addrspace(1)* %out0, i1 addrspace(1)* %out1, float addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+
+ %a = load float addrspace(1)* %gep.0, align 4
+ %b = load float addrspace(1)* %gep.1, align 4
+
+ %cmp = fcmp ole float %a, %b
+ %val0 = select i1 %cmp, float %a, float %b
+ store float %val0, float addrspace(1)* %out0, align 4
+ store i1 %cmp, i1 addrspace(1)* %out1
+ ret void
+}
+
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/fminnum.f64.ll b/test/CodeGen/R600/fminnum.f64.ll
index 11b0c20..0f929d6 100644
--- a/test/CodeGen/R600/fminnum.f64.ll
+++ b/test/CodeGen/R600/fminnum.f64.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare double @llvm.minnum.f64(double, double) #0
declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>) #0
diff --git a/test/CodeGen/R600/fminnum.ll b/test/CodeGen/R600/fminnum.ll
index 65adab6..6b93b83 100644
--- a/test/CodeGen/R600/fminnum.ll
+++ b/test/CodeGen/R600/fminnum.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare float @llvm.minnum.f32(float, float) #0
declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>) #0
diff --git a/test/CodeGen/R600/fmul.ll b/test/CodeGen/R600/fmul.ll
index eabb271..6c09aa2 100644
--- a/test/CodeGen/R600/fmul.ll
+++ b/test/CodeGen/R600/fmul.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
diff --git a/test/CodeGen/R600/fmul64.ll b/test/CodeGen/R600/fmul64.ll
index 0a5f707..9d7787c 100644
--- a/test/CodeGen/R600/fmul64.ll
+++ b/test/CodeGen/R600/fmul64.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
; FUNC-LABEL: {{^}}fmul_f64:
; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
diff --git a/test/CodeGen/R600/fmuladd.ll b/test/CodeGen/R600/fmuladd.ll
index 16003a5..2b70863 100644
--- a/test/CodeGen/R600/fmuladd.ll
+++ b/test/CodeGen/R600/fmuladd.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck %s
declare float @llvm.fmuladd.f32(float, float, float)
declare double @llvm.fmuladd.f64(double, double, double)
@@ -33,7 +33,7 @@ define void @fmuladd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
; CHECK-LABEL: {{^}}fmuladd_2.0_a_b_f32
; CHECK-DAG: buffer_load_dword [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; CHECK: v_mad_f32 [[RESULT:v[0-9]+]], 2.0, [[R1]], [[R2]]
; CHECK: buffer_store_dword [[RESULT]]
define void @fmuladd_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
@@ -52,7 +52,7 @@ define void @fmuladd_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %
; CHECK-LABEL: {{^}}fmuladd_a_2.0_b_f32
; CHECK-DAG: buffer_load_dword [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; CHECK: v_mad_f32 [[RESULT:v[0-9]+]], 2.0, [[R1]], [[R2]]
; CHECK: buffer_store_dword [[RESULT]]
define void @fmuladd_a_2.0_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
@@ -71,7 +71,7 @@ define void @fmuladd_a_2.0_b_f32(float addrspace(1)* %out, float addrspace(1)* %
; CHECK-LABEL: {{^}}fadd_a_a_b_f32:
; CHECK-DAG: buffer_load_dword [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; CHECK: v_mad_f32 [[RESULT:v[0-9]+]], 2.0, [[R1]], [[R2]]
; CHECK: buffer_store_dword [[RESULT]]
define void @fadd_a_a_b_f32(float addrspace(1)* %out,
@@ -93,7 +93,7 @@ define void @fadd_a_a_b_f32(float addrspace(1)* %out,
; CHECK-LABEL: {{^}}fadd_b_a_a_f32:
; CHECK-DAG: buffer_load_dword [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; CHECK: v_mad_f32 [[RESULT:v[0-9]+]], 2.0, [[R1]], [[R2]]
; CHECK: buffer_store_dword [[RESULT]]
define void @fadd_b_a_a_f32(float addrspace(1)* %out,
@@ -115,7 +115,7 @@ define void @fadd_b_a_a_f32(float addrspace(1)* %out,
; CHECK-LABEL: {{^}}fmuladd_neg_2.0_a_b_f32
; CHECK-DAG: buffer_load_dword [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; CHECK: v_mad_f32 [[RESULT:v[0-9]+]], -2.0, [[R1]], [[R2]]
; CHECK: buffer_store_dword [[RESULT]]
define void @fmuladd_neg_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
@@ -135,7 +135,7 @@ define void @fmuladd_neg_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1
; CHECK-LABEL: {{^}}fmuladd_neg_2.0_neg_a_b_f32
; CHECK-DAG: buffer_load_dword [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; CHECK: v_mad_f32 [[RESULT:v[0-9]+]], 2.0, [[R1]], [[R2]]
; CHECK: buffer_store_dword [[RESULT]]
define void @fmuladd_neg_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
@@ -157,7 +157,7 @@ define void @fmuladd_neg_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspa
; CHECK-LABEL: {{^}}fmuladd_2.0_neg_a_b_f32
; CHECK-DAG: buffer_load_dword [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; CHECK: v_mad_f32 [[RESULT:v[0-9]+]], -2.0, [[R1]], [[R2]]
; CHECK: buffer_store_dword [[RESULT]]
define void @fmuladd_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
@@ -179,7 +179,7 @@ define void @fmuladd_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspace(1
; CHECK-LABEL: {{^}}fmuladd_2.0_a_neg_b_f32
; CHECK-DAG: buffer_load_dword [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; CHECK-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; CHECK: v_mad_f32 [[RESULT:v[0-9]+]], 2.0, [[R1]], -[[R2]]
; CHECK: buffer_store_dword [[RESULT]]
define void @fmuladd_2.0_a_neg_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
diff --git a/test/CodeGen/R600/fnearbyint.ll b/test/CodeGen/R600/fnearbyint.ll
index 1c1d731..4fa9ada 100644
--- a/test/CodeGen/R600/fnearbyint.ll
+++ b/test/CodeGen/R600/fnearbyint.ll
@@ -1,5 +1,6 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s
; This should have the exactly the same output as the test for rint,
; so no need to check anything.
diff --git a/test/CodeGen/R600/fneg-fabs.f64.ll b/test/CodeGen/R600/fneg-fabs.f64.ll
index 555f4cc..7e6ede6 100644
--- a/test/CodeGen/R600/fneg-fabs.f64.ll
+++ b/test/CodeGen/R600/fneg-fabs.f64.ll
@@ -1,12 +1,11 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FIXME: Check something here. Currently it seems fabs + fneg aren't
; into 2 modifiers, although theoretically that should work.
; FUNC-LABEL: {{^}}fneg_fabs_fadd_f64:
-; SI: v_mov_b32_e32 [[IMMREG:v[0-9]+]], 0x7fffffff
-; SI: v_and_b32_e32 v[[FABS:[0-9]+]], {{s[0-9]+}}, [[IMMREG]]
-; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+}}:[[FABS]]{{\]}}
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -|v{{\[[0-9]+:[0-9]+\]}}|
define void @fneg_fabs_fadd_f64(double addrspace(1)* %out, double %x, double %y) {
%fabs = call double @llvm.fabs.f64(double %x)
%fsub = fsub double -0.000000e+00, %fabs
@@ -56,8 +55,8 @@ define void @fneg_fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
}
; FUNC-LABEL: {{^}}fneg_fabs_f64:
-; SI: s_load_dwordx2
; SI: s_load_dwordx2 s{{\[}}[[LO_X:[0-9]+]]:[[HI_X:[0-9]+]]{{\]}}
+; SI: s_load_dwordx2
; SI: v_mov_b32_e32 [[IMMREG:v[0-9]+]], 0x80000000
; SI-DAG: v_or_b32_e32 v[[HI_V:[0-9]+]], s[[HI_X]], [[IMMREG]]
; SI-DAG: v_mov_b32_e32 v[[LO_V:[0-9]+]], s[[LO_X]]
diff --git a/test/CodeGen/R600/fneg-fabs.ll b/test/CodeGen/R600/fneg-fabs.ll
index 3cc832f..4fde048 100644
--- a/test/CodeGen/R600/fneg-fabs.ll
+++ b/test/CodeGen/R600/fneg-fabs.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}fneg_fabs_fadd_f32:
diff --git a/test/CodeGen/R600/fneg.f64.ll b/test/CodeGen/R600/fneg.f64.ll
index 7aa08a9..aa6df20 100644
--- a/test/CodeGen/R600/fneg.f64.ll
+++ b/test/CodeGen/R600/fneg.f64.ll
@@ -1,7 +1,8 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}fneg_f64:
-; SI: v_xor_b32
+; GCN: v_xor_b32
define void @fneg_f64(double addrspace(1)* %out, double %in) {
%fneg = fsub double -0.000000e+00, %in
store double %fneg, double addrspace(1)* %out
@@ -9,8 +10,8 @@ define void @fneg_f64(double addrspace(1)* %out, double %in) {
}
; FUNC-LABEL: {{^}}fneg_v2f64:
-; SI: v_xor_b32
-; SI: v_xor_b32
+; GCN: v_xor_b32
+; GCN: v_xor_b32
define void @fneg_v2f64(<2 x double> addrspace(1)* nocapture %out, <2 x double> %in) {
%fneg = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %in
store <2 x double> %fneg, <2 x double> addrspace(1)* %out
@@ -23,10 +24,10 @@ define void @fneg_v2f64(<2 x double> addrspace(1)* nocapture %out, <2 x double>
; R600: -PV
; R600: -PV
-; SI: v_xor_b32
-; SI: v_xor_b32
-; SI: v_xor_b32
-; SI: v_xor_b32
+; GCN: v_xor_b32
+; GCN: v_xor_b32
+; GCN: v_xor_b32
+; GCN: v_xor_b32
define void @fneg_v4f64(<4 x double> addrspace(1)* nocapture %out, <4 x double> %in) {
%fneg = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %in
store <4 x double> %fneg, <4 x double> addrspace(1)* %out
@@ -38,8 +39,7 @@ define void @fneg_v4f64(<4 x double> addrspace(1)* nocapture %out, <4 x double>
; unless the target returns true for isNegFree()
; FUNC-LABEL: {{^}}fneg_free_f64:
-; FIXME: Unnecessary copy to VGPRs
-; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, -{{v\[[0-9]+:[0-9]+\]$}}
+; GCN: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, 0, -{{s\[[0-9]+:[0-9]+\]$}}
define void @fneg_free_f64(double addrspace(1)* %out, i64 %in) {
%bc = bitcast i64 %in to double
%fsub = fsub double 0.0, %bc
@@ -47,10 +47,11 @@ define void @fneg_free_f64(double addrspace(1)* %out, i64 %in) {
ret void
}
-; SI-LABEL: {{^}}fneg_fold_f64:
+; GCN-LABEL: {{^}}fneg_fold_f64:
; SI: s_load_dwordx2 [[NEG_VALUE:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
-; SI-NOT: xor
-; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, -[[NEG_VALUE]], [[NEG_VALUE]]
+; VI: s_load_dwordx2 [[NEG_VALUE:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; GCN-NOT: xor
+; GCN: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, -[[NEG_VALUE]], [[NEG_VALUE]]
define void @fneg_fold_f64(double addrspace(1)* %out, double %in) {
%fsub = fsub double -0.0, %in
%fmul = fmul double %fsub, %in
diff --git a/test/CodeGen/R600/fneg.ll b/test/CodeGen/R600/fneg.ll
index c20cf24..a0fd539 100644
--- a/test/CodeGen/R600/fneg.ll
+++ b/test/CodeGen/R600/fneg.ll
@@ -1,10 +1,11 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}fneg_f32:
; R600: -PV
-; SI: v_xor_b32
+; GCN: v_xor_b32
define void @fneg_f32(float addrspace(1)* %out, float %in) {
%fneg = fsub float -0.000000e+00, %in
store float %fneg, float addrspace(1)* %out
@@ -15,8 +16,8 @@ define void @fneg_f32(float addrspace(1)* %out, float %in) {
; R600: -PV
; R600: -PV
-; SI: v_xor_b32
-; SI: v_xor_b32
+; GCN: v_xor_b32
+; GCN: v_xor_b32
define void @fneg_v2f32(<2 x float> addrspace(1)* nocapture %out, <2 x float> %in) {
%fneg = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %in
store <2 x float> %fneg, <2 x float> addrspace(1)* %out
@@ -29,10 +30,10 @@ define void @fneg_v2f32(<2 x float> addrspace(1)* nocapture %out, <2 x float> %i
; R600: -PV
; R600: -PV
-; SI: v_xor_b32
-; SI: v_xor_b32
-; SI: v_xor_b32
-; SI: v_xor_b32
+; GCN: v_xor_b32
+; GCN: v_xor_b32
+; GCN: v_xor_b32
+; GCN: v_xor_b32
define void @fneg_v4f32(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) {
%fneg = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %in
store <4 x float> %fneg, <4 x float> addrspace(1)* %out
@@ -48,7 +49,7 @@ define void @fneg_v4f32(<4 x float> addrspace(1)* nocapture %out, <4 x float> %i
; R600: -KC0[2].Z
; XXX: We could use v_add_f32_e64 with the negate bit here instead.
-; SI: v_sub_f32_e64 v{{[0-9]}}, 0.0, s{{[0-9]+$}}
+; GCN: v_sub_f32_e64 v{{[0-9]}}, 0, s{{[0-9]+$}}
define void @fneg_free_f32(float addrspace(1)* %out, i32 %in) {
%bc = bitcast i32 %in to float
%fsub = fsub float 0.0, %bc
@@ -58,8 +59,9 @@ define void @fneg_free_f32(float addrspace(1)* %out, i32 %in) {
; FUNC-LABEL: {{^}}fneg_fold_f32:
; SI: s_load_dword [[NEG_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
-; SI-NOT: xor
-; SI: v_mul_f32_e64 v{{[0-9]+}}, -[[NEG_VALUE]], [[NEG_VALUE]]
+; VI: s_load_dword [[NEG_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c
+; GCN-NOT: xor
+; GCN: v_mul_f32_e64 v{{[0-9]+}}, -[[NEG_VALUE]], [[NEG_VALUE]]
define void @fneg_fold_f32(float addrspace(1)* %out, float %in) {
%fsub = fsub float -0.0, %in
%fmul = fmul float %fsub, %in
diff --git a/test/CodeGen/R600/fp-classify.ll b/test/CodeGen/R600/fp-classify.ll
new file mode 100644
index 0000000..4fac517
--- /dev/null
+++ b/test/CodeGen/R600/fp-classify.ll
@@ -0,0 +1,131 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare i1 @llvm.AMDGPU.class.f32(float, i32) #1
+declare i1 @llvm.AMDGPU.class.f64(double, i32) #1
+declare i32 @llvm.r600.read.tidig.x() #1
+declare float @llvm.fabs.f32(float) #1
+declare double @llvm.fabs.f64(double) #1
+
+; SI-LABEL: {{^}}test_isinf_pattern:
+; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x204{{$}}
+; SI: v_cmp_class_f32_e32 vcc, s{{[0-9]+}}, [[MASK]]
+; SI-NOT: v_cmp
+; SI: s_endpgm
+define void @test_isinf_pattern(i32 addrspace(1)* nocapture %out, float %x) #0 {
+ %fabs = tail call float @llvm.fabs.f32(float %x) #1
+ %cmp = fcmp oeq float %fabs, 0x7FF0000000000000
+ %ext = zext i1 %cmp to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_not_isinf_pattern_0:
+; SI-NOT: v_cmp_class
+; SI: s_endpgm
+define void @test_not_isinf_pattern_0(i32 addrspace(1)* nocapture %out, float %x) #0 {
+ %fabs = tail call float @llvm.fabs.f32(float %x) #1
+ %cmp = fcmp ueq float %fabs, 0x7FF0000000000000
+ %ext = zext i1 %cmp to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_not_isinf_pattern_1:
+; SI-NOT: v_cmp_class
+; SI: s_endpgm
+define void @test_not_isinf_pattern_1(i32 addrspace(1)* nocapture %out, float %x) #0 {
+ %fabs = tail call float @llvm.fabs.f32(float %x) #1
+ %cmp = fcmp oeq float %fabs, 0xFFF0000000000000
+ %ext = zext i1 %cmp to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_isfinite_pattern_0:
+; SI-NOT: v_cmp
+; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1f8{{$}}
+; SI: v_cmp_class_f32_e32 vcc, s{{[0-9]+}}, [[MASK]]
+; SI-NOT: v_cmp
+; SI: s_endpgm
+define void @test_isfinite_pattern_0(i32 addrspace(1)* nocapture %out, float %x) #0 {
+ %ord = fcmp ord float %x, 0.000000e+00
+ %x.fabs = tail call float @llvm.fabs.f32(float %x) #1
+ %ninf = fcmp une float %x.fabs, 0x7FF0000000000000
+ %and = and i1 %ord, %ninf
+ %ext = zext i1 %and to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; Use negative infinity
+; SI-LABEL: {{^}}test_isfinite_not_pattern_0:
+; SI-NOT: v_cmp_class_f32
+; SI: s_endpgm
+define void @test_isfinite_not_pattern_0(i32 addrspace(1)* nocapture %out, float %x) #0 {
+ %ord = fcmp ord float %x, 0.000000e+00
+ %x.fabs = tail call float @llvm.fabs.f32(float %x) #1
+ %ninf = fcmp une float %x.fabs, 0xFFF0000000000000
+ %and = and i1 %ord, %ninf
+ %ext = zext i1 %and to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; No fabs
+; SI-LABEL: {{^}}test_isfinite_not_pattern_1:
+; SI-NOT: v_cmp_class_f32
+; SI: s_endpgm
+define void @test_isfinite_not_pattern_1(i32 addrspace(1)* nocapture %out, float %x) #0 {
+ %ord = fcmp ord float %x, 0.000000e+00
+ %ninf = fcmp une float %x, 0x7FF0000000000000
+ %and = and i1 %ord, %ninf
+ %ext = zext i1 %and to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; fabs of different value
+; SI-LABEL: {{^}}test_isfinite_not_pattern_2:
+; SI-NOT: v_cmp_class_f32
+; SI: s_endpgm
+define void @test_isfinite_not_pattern_2(i32 addrspace(1)* nocapture %out, float %x, float %y) #0 {
+ %ord = fcmp ord float %x, 0.000000e+00
+ %x.fabs = tail call float @llvm.fabs.f32(float %y) #1
+ %ninf = fcmp une float %x.fabs, 0x7FF0000000000000
+ %and = and i1 %ord, %ninf
+ %ext = zext i1 %and to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; Wrong ordered compare type
+; SI-LABEL: {{^}}test_isfinite_not_pattern_3:
+; SI-NOT: v_cmp_class_f32
+; SI: s_endpgm
+define void @test_isfinite_not_pattern_3(i32 addrspace(1)* nocapture %out, float %x) #0 {
+ %ord = fcmp uno float %x, 0.000000e+00
+ %x.fabs = tail call float @llvm.fabs.f32(float %x) #1
+ %ninf = fcmp une float %x.fabs, 0x7FF0000000000000
+ %and = and i1 %ord, %ninf
+ %ext = zext i1 %and to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; Wrong unordered compare
+; SI-LABEL: {{^}}test_isfinite_not_pattern_4:
+; SI-NOT: v_cmp_class_f32
+; SI: s_endpgm
+define void @test_isfinite_not_pattern_4(i32 addrspace(1)* nocapture %out, float %x) #0 {
+ %ord = fcmp ord float %x, 0.000000e+00
+ %x.fabs = tail call float @llvm.fabs.f32(float %x) #1
+ %ninf = fcmp one float %x.fabs, 0x7FF0000000000000
+ %and = and i1 %ord, %ninf
+ %ext = zext i1 %and to i32
+ store i32 %ext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/fp16_to_fp.ll b/test/CodeGen/R600/fp16_to_fp.ll
index ec3e051..da78f61 100644
--- a/test/CodeGen/R600/fp16_to_fp.ll
+++ b/test/CodeGen/R600/fp16_to_fp.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare float @llvm.convert.from.fp16.f32(i16) nounwind readnone
declare double @llvm.convert.from.fp16.f64(i16) nounwind readnone
diff --git a/test/CodeGen/R600/fp32_to_fp16.ll b/test/CodeGen/R600/fp32_to_fp16.ll
index e86ee62..c3c65ae 100644
--- a/test/CodeGen/R600/fp32_to_fp16.ll
+++ b/test/CodeGen/R600/fp32_to_fp16.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare i16 @llvm.convert.to.fp16.f32(float) nounwind readnone
diff --git a/test/CodeGen/R600/fp_to_sint.f64.ll b/test/CodeGen/R600/fp_to_sint.f64.ll
index 09edb40..e641847 100644
--- a/test/CodeGen/R600/fp_to_sint.f64.ll
+++ b/test/CodeGen/R600/fp_to_sint.f64.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
diff --git a/test/CodeGen/R600/fp_to_sint.ll b/test/CodeGen/R600/fp_to_sint.ll
index c583ec3..16549c3 100644
--- a/test/CodeGen/R600/fp_to_sint.ll
+++ b/test/CodeGen/R600/fp_to_sint.ll
@@ -1,16 +1,28 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+
+declare float @llvm.fabs.f32(float) #0
; FUNC-LABEL: {{^}}fp_to_sint_i32:
; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; SI: v_cvt_i32_f32_e32
; SI: s_endpgm
-define void @fp_to_sint_i32 (i32 addrspace(1)* %out, float %in) {
+define void @fp_to_sint_i32(i32 addrspace(1)* %out, float %in) {
%conv = fptosi float %in to i32
store i32 %conv, i32 addrspace(1)* %out
ret void
}
+; FUNC-LABEL: {{^}}fp_to_sint_i32_fabs:
+; SI: v_cvt_i32_f32_e64 v{{[0-9]+}}, |s{{[0-9]+}}|{{$}}
+define void @fp_to_sint_i32_fabs(i32 addrspace(1)* %out, float %in) {
+ %in.fabs = call float @llvm.fabs.f32(float %in) #0
+ %conv = fptosi float %in.fabs to i32
+ store i32 %conv, i32 addrspace(1)* %out
+ ret void
+}
+
; FUNC-LABEL: {{^}}fp_to_sint_v2i32:
; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
@@ -214,3 +226,5 @@ define void @fp_to_sint_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
store <4 x i64> %conv, <4 x i64> addrspace(1)* %out
ret void
}
+
+attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/R600/fp_to_uint.f64.ll b/test/CodeGen/R600/fp_to_uint.f64.ll
index 25859bb..1ffe2fa 100644
--- a/test/CodeGen/R600/fp_to_uint.f64.ll
+++ b/test/CodeGen/R600/fp_to_uint.f64.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
diff --git a/test/CodeGen/R600/fp_to_uint.ll b/test/CodeGen/R600/fp_to_uint.ll
index 91bf4b7..804d90f 100644
--- a/test/CodeGen/R600/fp_to_uint.ll
+++ b/test/CodeGen/R600/fp_to_uint.ll
@@ -1,29 +1,31 @@
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s -check-prefix=EG -check-prefix=FUNC
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s -check-prefix=SI -check-prefix=FUNC
-; FUNC-LABEL: {{^}}fp_to_uint_i32:
+; FUNC-LABEL: {{^}}fp_to_uint_f32_to_i32:
; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
+
; SI: v_cvt_u32_f32_e32
; SI: s_endpgm
-define void @fp_to_uint_i32 (i32 addrspace(1)* %out, float %in) {
+define void @fp_to_uint_f32_to_i32 (i32 addrspace(1)* %out, float %in) {
%conv = fptoui float %in to i32
store i32 %conv, i32 addrspace(1)* %out
ret void
}
-; FUNC-LABEL: {{^}}fp_to_uint_v2i32:
+; FUNC-LABEL: {{^}}fp_to_uint_v2f32_to_v2i32:
; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
; SI: v_cvt_u32_f32_e32
; SI: v_cvt_u32_f32_e32
-
-define void @fp_to_uint_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
+define void @fp_to_uint_v2f32_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
%result = fptoui <2 x float> %in to <2 x i32>
store <2 x i32> %result, <2 x i32> addrspace(1)* %out
ret void
}
-; FUNC-LABEL: {{^}}fp_to_uint_v4i32:
+; FUNC-LABEL: {{^}}fp_to_uint_v4f32_to_v4i32:
; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
@@ -33,14 +35,14 @@ define void @fp_to_uint_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
; SI: v_cvt_u32_f32_e32
; SI: v_cvt_u32_f32_e32
-define void @fp_to_uint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+define void @fp_to_uint_v4f32_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%value = load <4 x float> addrspace(1) * %in
%result = fptoui <4 x float> %value to <4 x i32>
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
-; FUNC: {{^}}fp_to_uint_i64:
+; FUNC: {{^}}fp_to_uint_f32_to_i64:
; EG-DAG: AND_INT
; EG-DAG: LSHR
; EG-DAG: SUB_INT
@@ -64,13 +66,13 @@ define void @fp_to_uint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspac
; EG-DAG: CNDE_INT
; SI: s_endpgm
-define void @fp_to_uint_i64(i64 addrspace(1)* %out, float %x) {
+define void @fp_to_uint_f32_to_i64(i64 addrspace(1)* %out, float %x) {
%conv = fptoui float %x to i64
store i64 %conv, i64 addrspace(1)* %out
ret void
}
-; FUNC: {{^}}fp_to_uint_v2i64:
+; FUNC: {{^}}fp_to_uint_v2f32_to_v2i64:
; EG-DAG: AND_INT
; EG-DAG: LSHR
; EG-DAG: SUB_INT
@@ -115,13 +117,13 @@ define void @fp_to_uint_i64(i64 addrspace(1)* %out, float %x) {
; EG-DAG: CNDE_INT
; SI: s_endpgm
-define void @fp_to_uint_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
+define void @fp_to_uint_v2f32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
%conv = fptoui <2 x float> %x to <2 x i64>
store <2 x i64> %conv, <2 x i64> addrspace(1)* %out
ret void
}
-; FUNC: {{^}}fp_to_uint_v4i64:
+; FUNC: {{^}}fp_to_uint_v4f32_to_v4i64:
; EG-DAG: AND_INT
; EG-DAG: LSHR
; EG-DAG: SUB_INT
@@ -208,7 +210,7 @@ define void @fp_to_uint_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
; EG-DAG: CNDE_INT
; SI: s_endpgm
-define void @fp_to_uint_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
+define void @fp_to_uint_v4f32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
%conv = fptoui <4 x float> %x to <4 x i64>
store <4 x i64> %conv, <4 x i64> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/R600/fpext.ll b/test/CodeGen/R600/fpext.ll
index 418395f..734a43b 100644
--- a/test/CodeGen/R600/fpext.ll
+++ b/test/CodeGen/R600/fpext.ll
@@ -1,9 +1,45 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=CHECK
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; CHECK: {{^}}fpext:
-; CHECK: v_cvt_f64_f32_e32
-define void @fpext(double addrspace(1)* %out, float %in) {
+; FUNC-LABEL: {{^}}fpext_f32_to_f64:
+; SI: v_cvt_f64_f32_e32 {{v\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
+define void @fpext_f32_to_f64(double addrspace(1)* %out, float %in) {
%result = fpext float %in to double
store double %result, double addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: {{^}}fpext_v2f32_to_v2f64:
+; SI: v_cvt_f64_f32_e32
+; SI: v_cvt_f64_f32_e32
+define void @fpext_v2f32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x float> %in) {
+ %result = fpext <2 x float> %in to <2 x double>
+ store <2 x double> %result, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}fpext_v4f32_to_v4f64:
+; SI: v_cvt_f64_f32_e32
+; SI: v_cvt_f64_f32_e32
+; SI: v_cvt_f64_f32_e32
+; SI: v_cvt_f64_f32_e32
+define void @fpext_v4f32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x float> %in) {
+ %result = fpext <4 x float> %in to <4 x double>
+ store <4 x double> %result, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}fpext_v8f32_to_v8f64:
+; SI: v_cvt_f64_f32_e32
+; SI: v_cvt_f64_f32_e32
+; SI: v_cvt_f64_f32_e32
+; SI: v_cvt_f64_f32_e32
+; SI: v_cvt_f64_f32_e32
+; SI: v_cvt_f64_f32_e32
+; SI: v_cvt_f64_f32_e32
+; SI: v_cvt_f64_f32_e32
+define void @fpext_v8f32_to_v8f64(<8 x double> addrspace(1)* %out, <8 x float> %in) {
+ %result = fpext <8 x float> %in to <8 x double>
+ store <8 x double> %result, <8 x double> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fptrunc.ll b/test/CodeGen/R600/fptrunc.ll
index 8ac8d3b..385e10e 100644
--- a/test/CodeGen/R600/fptrunc.ll
+++ b/test/CodeGen/R600/fptrunc.ll
@@ -1,9 +1,45 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=CHECK
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; CHECK: {{^}}fptrunc:
-; CHECK: v_cvt_f32_f64_e32
-define void @fptrunc(float addrspace(1)* %out, double %in) {
+; FUNC-LABEL: {{^}}fptrunc_f64_to_f32:
+; SI: v_cvt_f32_f64_e32 {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}
+define void @fptrunc_f64_to_f32(float addrspace(1)* %out, double %in) {
%result = fptrunc double %in to float
store float %result, float addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: {{^}}fptrunc_v2f64_to_v2f32:
+; SI: v_cvt_f32_f64_e32
+; SI: v_cvt_f32_f64_e32
+define void @fptrunc_v2f64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x double> %in) {
+ %result = fptrunc <2 x double> %in to <2 x float>
+ store <2 x float> %result, <2 x float> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}fptrunc_v4f64_to_v4f32:
+; SI: v_cvt_f32_f64_e32
+; SI: v_cvt_f32_f64_e32
+; SI: v_cvt_f32_f64_e32
+; SI: v_cvt_f32_f64_e32
+define void @fptrunc_v4f64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x double> %in) {
+ %result = fptrunc <4 x double> %in to <4 x float>
+ store <4 x float> %result, <4 x float> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}fptrunc_v8f64_to_v8f32:
+; SI: v_cvt_f32_f64_e32
+; SI: v_cvt_f32_f64_e32
+; SI: v_cvt_f32_f64_e32
+; SI: v_cvt_f32_f64_e32
+; SI: v_cvt_f32_f64_e32
+; SI: v_cvt_f32_f64_e32
+; SI: v_cvt_f32_f64_e32
+; SI: v_cvt_f32_f64_e32
+define void @fptrunc_v8f64_to_v8f32(<8 x float> addrspace(1)* %out, <8 x double> %in) {
+ %result = fptrunc <8 x double> %in to <8 x float>
+ store <8 x float> %result, <8 x float> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/frem.ll b/test/CodeGen/R600/frem.ll
index c846a77..02a0070 100644
--- a/test/CodeGen/R600/frem.ll
+++ b/test/CodeGen/R600/frem.ll
@@ -1,16 +1,18 @@
-; RUN: llc -march=r600 -mcpu=SI -enable-misched < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -enable-misched < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -enable-misched < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -enable-misched < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}frem_f32:
-; SI-DAG: buffer_load_dword [[X:v[0-9]+]], {{.*$}}
-; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{.*}} offset:0x10
-; SI-DAG: v_cmp
-; SI-DAG: v_mul_f32
-; SI: v_rcp_f32_e32
-; SI: v_mul_f32_e32
-; SI: v_mul_f32_e32
-; SI: v_trunc_f32_e32
-; SI: v_mad_f32
-; SI: s_endpgm
+; GCN-DAG: buffer_load_dword [[X:v[0-9]+]], {{.*$}}
+; GCN-DAG: buffer_load_dword [[Y:v[0-9]+]], {{.*}} offset:16
+; GCN-DAG: v_cmp
+; GCN-DAG: v_mul_f32
+; GCN: v_rcp_f32_e32
+; GCN: v_mul_f32_e32
+; GCN: v_mul_f32_e32
+; GCN: v_trunc_f32_e32
+; GCN: v_mad_f32
+; GCN: s_endpgm
define void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
float addrspace(1)* %in2) #0 {
%gep2 = getelementptr float addrspace(1)* %in2, i32 4
@@ -22,14 +24,14 @@ define void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
}
; FUNC-LABEL: {{^}}unsafe_frem_f32:
-; SI: buffer_load_dword [[Y:v[0-9]+]], {{.*}} offset:0x10
-; SI: buffer_load_dword [[X:v[0-9]+]], {{.*}}
-; SI: v_rcp_f32_e32 [[INVY:v[0-9]+]], [[Y]]
-; SI: v_mul_f32_e32 [[DIV:v[0-9]+]], [[INVY]], [[X]]
-; SI: v_trunc_f32_e32 [[TRUNC:v[0-9]+]], [[DIV]]
-; SI: v_mad_f32 [[RESULT:v[0-9]+]], -[[TRUNC]], [[Y]], [[X]]
-; SI: buffer_store_dword [[RESULT]]
-; SI: s_endpgm
+; GCN: buffer_load_dword [[Y:v[0-9]+]], {{.*}} offset:16
+; GCN: buffer_load_dword [[X:v[0-9]+]], {{.*}}
+; GCN: v_rcp_f32_e32 [[INVY:v[0-9]+]], [[Y]]
+; GCN: v_mul_f32_e32 [[DIV:v[0-9]+]], [[INVY]], [[X]]
+; GCN: v_trunc_f32_e32 [[TRUNC:v[0-9]+]], [[DIV]]
+; GCN: v_mad_f32 [[RESULT:v[0-9]+]], -[[TRUNC]], [[Y]], [[X]]
+; GCN: buffer_store_dword [[RESULT]]
+; GCN: s_endpgm
define void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
float addrspace(1)* %in2) #1 {
%gep2 = getelementptr float addrspace(1)* %in2, i32 4
@@ -40,11 +42,17 @@ define void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
ret void
}
-; TODO: This should check something when f64 fdiv is implemented
-; correctly
-
; FUNC-LABEL: {{^}}frem_f64:
-; SI: s_endpgm
+; GCN: buffer_load_dwordx2 [[Y:v\[[0-9]+:[0-9]+\]]], {{.*}}, 0
+; GCN: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]], {{.*}}, 0
+; GCN-DAG: v_div_fmas_f64
+; GCN-DAG: v_div_scale_f64
+; GCN-DAG: v_mul_f64
+; CI: v_trunc_f64_e32
+; CI: v_mul_f64
+; GCN: v_add_f64
+; GCN: buffer_store_dwordx2
+; GCN: s_endpgm
define void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) #0 {
%r0 = load double addrspace(1)* %in1, align 8
@@ -55,11 +63,12 @@ define void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
}
; FUNC-LABEL: {{^}}unsafe_frem_f64:
-; SI: v_rcp_f64_e32
-; SI: v_mul_f64
+; GCN: v_rcp_f64_e32
+; GCN: v_mul_f64
; SI: v_bfe_u32
-; SI: v_fma_f64
-; SI: s_endpgm
+; CI: v_trunc_f64_e32
+; GCN: v_fma_f64
+; GCN: s_endpgm
define void @unsafe_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) #1 {
%r0 = load double addrspace(1)* %in1, align 8
diff --git a/test/CodeGen/R600/fsqrt.ll b/test/CodeGen/R600/fsqrt.ll
index 1f91faf..1fdf3e4 100644
--- a/test/CodeGen/R600/fsqrt.ll
+++ b/test/CodeGen/R600/fsqrt.ll
@@ -1,4 +1,9 @@
-; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck %s
+
+; Run with unsafe-fp-math to make sure nothing tries to turn this into 1 / rsqrt(x)
; CHECK: {{^}}fsqrt_f32:
; CHECK: v_sqrt_f32_e32 {{v[0-9]+, v[0-9]+}}
diff --git a/test/CodeGen/R600/fsub.ll b/test/CodeGen/R600/fsub.ll
index 6e5ccf1..ef90fea 100644
--- a/test/CodeGen/R600/fsub.ll
+++ b/test/CodeGen/R600/fsub.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}v_fsub_f32:
diff --git a/test/CodeGen/R600/fsub64.ll b/test/CodeGen/R600/fsub64.ll
index eca1b62..2d85cc5 100644
--- a/test/CodeGen/R600/fsub64.ll
+++ b/test/CodeGen/R600/fsub64.ll
@@ -1,12 +1,107 @@
-; RUN: llc -march=r600 -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare double @llvm.fabs.f64(double) #0
; SI-LABEL: {{^}}fsub_f64:
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
double addrspace(1)* %in2) {
- %r0 = load double addrspace(1)* %in1
- %r1 = load double addrspace(1)* %in2
- %r2 = fsub double %r0, %r1
- store double %r2, double addrspace(1)* %out
- ret void
+ %r0 = load double addrspace(1)* %in1
+ %r1 = load double addrspace(1)* %in2
+ %r2 = fsub double %r0, %r1
+ store double %r2, double addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: {{^}}fsub_fabs_f64:
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -\|v\[[0-9]+:[0-9]+\]\|}}
+define void @fsub_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+ double addrspace(1)* %in2) {
+ %r0 = load double addrspace(1)* %in1
+ %r1 = load double addrspace(1)* %in2
+ %r1.fabs = call double @llvm.fabs.f64(double %r1) #0
+ %r2 = fsub double %r0, %r1.fabs
+ store double %r2, double addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: {{^}}fsub_fabs_inv_f64:
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], |v\[[0-9]+:[0-9]+\]|, -v\[[0-9]+:[0-9]+\]}}
+define void @fsub_fabs_inv_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+ double addrspace(1)* %in2) {
+ %r0 = load double addrspace(1)* %in1
+ %r1 = load double addrspace(1)* %in2
+ %r0.fabs = call double @llvm.fabs.f64(double %r0) #0
+ %r2 = fsub double %r0.fabs, %r1
+ store double %r2, double addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: {{^}}s_fsub_f64:
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
+define void @s_fsub_f64(double addrspace(1)* %out, double %a, double %b) {
+ %sub = fsub double %a, %b
+ store double %sub, double addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: {{^}}s_fsub_imm_f64:
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], 4.0, -s\[[0-9]+:[0-9]+\]}}
+define void @s_fsub_imm_f64(double addrspace(1)* %out, double %a, double %b) {
+ %sub = fsub double 4.0, %a
+ store double %sub, double addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: {{^}}s_fsub_imm_inv_f64:
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], -4.0, s\[[0-9]+:[0-9]+\]}}
+define void @s_fsub_imm_inv_f64(double addrspace(1)* %out, double %a, double %b) {
+ %sub = fsub double %a, 4.0
+ store double %sub, double addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: {{^}}s_fsub_self_f64:
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -s\[[0-9]+:[0-9]+\]}}
+define void @s_fsub_self_f64(double addrspace(1)* %out, double %a) {
+ %sub = fsub double %a, %a
+ store double %sub, double addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: {{^}}fsub_v2f64:
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
+define void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, <2 x double> %b) {
+ %sub = fsub <2 x double> %a, %b
+ store <2 x double> %sub, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: {{^}}fsub_v4f64:
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
+define void @fsub_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x double> addrspace(1)* %in, i32 1
+ %a = load <4 x double> addrspace(1)* %in
+ %b = load <4 x double> addrspace(1)* %b_ptr
+ %result = fsub <4 x double> %a, %b
+ store <4 x double> %result, <4 x double> addrspace(1)* %out
+ ret void
}
+
+; SI-LABEL: {{^}}s_fsub_v4f64:
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
+; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], s\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
+define void @s_fsub_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, <4 x double> %b) {
+ %result = fsub <4 x double> %a, %b
+ store <4 x double> %result, <4 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/R600/ftrunc.f64.ll b/test/CodeGen/R600/ftrunc.f64.ll
index fba6154..21399a8 100644
--- a/test/CodeGen/R600/ftrunc.f64.ll
+++ b/test/CodeGen/R600/ftrunc.f64.ll
@@ -1,5 +1,6 @@
-; RUN: llc -march=r600 -mcpu=bonaire < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=bonaire < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
declare double @llvm.trunc.f64(double) nounwind readnone
declare <2 x double> @llvm.trunc.v2f64(<2 x double>) nounwind readnone
@@ -23,12 +24,12 @@ define void @v_ftrunc_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
; CI: v_trunc_f64_e32
; SI: s_bfe_u32 [[SEXP:s[0-9]+]], {{s[0-9]+}}, 0xb0014
+; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
; SI: s_add_i32 s{{[0-9]+}}, [[SEXP]], 0xfffffc01
; SI: s_lshr_b64
+; SI: cmp_lt_i32
; SI: s_not_b64
; SI: s_and_b64
-; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
-; SI: cmp_lt_i32
; SI: cndmask_b32
; SI: cndmask_b32
; SI: cmp_gt_i32
diff --git a/test/CodeGen/R600/ftrunc.ll b/test/CodeGen/R600/ftrunc.ll
index 0eb1d7d..edc0860 100644
--- a/test/CodeGen/R600/ftrunc.ll
+++ b/test/CodeGen/R600/ftrunc.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG --check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI --check-prefix=FUNC %s
declare float @llvm.trunc.f32(float) nounwind readnone
declare <2 x float> @llvm.trunc.v2f32(<2 x float>) nounwind readnone
diff --git a/test/CodeGen/R600/gep-address-space.ll b/test/CodeGen/R600/gep-address-space.ll
index 036daaf..5c6920d 100644
--- a/test/CodeGen/R600/gep-address-space.ll
+++ b/test/CodeGen/R600/gep-address-space.ll
@@ -1,5 +1,6 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck --check-prefix=SI --check-prefix=CHECK %s
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs< %s | FileCheck --check-prefix=CI --check-prefix=CHECK %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck --check-prefix=SI --check-prefix=CHECK %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs< %s | FileCheck --check-prefix=CI --check-prefix=CHECK %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck --check-prefix=CI --check-prefix=CHECK %s
define void @use_gep_address_space([1024 x i32] addrspace(3)* %array) nounwind {
; CHECK-LABEL: {{^}}use_gep_address_space:
diff --git a/test/CodeGen/R600/global-directive.ll b/test/CodeGen/R600/global-directive.ll
index d1244b8..3ba12c2 100644
--- a/test/CodeGen/R600/global-directive.ll
+++ b/test/CodeGen/R600/global-directive.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; Make sure the GlobalDirective isn't merged with the function name
diff --git a/test/CodeGen/R600/global-extload-i1.ll b/test/CodeGen/R600/global-extload-i1.ll
new file mode 100644
index 0000000..67d36ce
--- /dev/null
+++ b/test/CodeGen/R600/global-extload-i1.ll
@@ -0,0 +1,302 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; XUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; FIXME: Evergreen broken
+
+; FUNC-LABEL: {{^}}zextload_global_i1_to_i32:
+; SI: buffer_load_ubyte
+; SI: buffer_store_dword
+; SI: s_endpgm
+define void @zextload_global_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %a = load i1 addrspace(1)* %in
+ %ext = zext i1 %a to i32
+ store i32 %ext, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_i1_to_i32:
+; SI: buffer_load_ubyte
+; SI: v_bfe_i32 {{v[0-9]+}}, {{v[0-9]+}}, 0, 1{{$}}
+; SI: buffer_store_dword
+; SI: s_endpgm
+define void @sextload_global_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %a = load i1 addrspace(1)* %in
+ %ext = sext i1 %a to i32
+ store i32 %ext, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v1i1_to_v1i32:
+; SI: s_endpgm
+define void @zextload_global_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i1> addrspace(1)* %in
+ %ext = zext <1 x i1> %load to <1 x i32>
+ store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v1i1_to_v1i32:
+; SI: s_endpgm
+define void @sextload_global_v1i1_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i1> addrspace(1)* %in
+ %ext = sext <1 x i1> %load to <1 x i32>
+ store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v2i1_to_v2i32:
+; SI: s_endpgm
+define void @zextload_global_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i1> addrspace(1)* %in
+ %ext = zext <2 x i1> %load to <2 x i32>
+ store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v2i1_to_v2i32:
+; SI: s_endpgm
+define void @sextload_global_v2i1_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i1> addrspace(1)* %in
+ %ext = sext <2 x i1> %load to <2 x i32>
+ store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v4i1_to_v4i32:
+; SI: s_endpgm
+define void @zextload_global_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i1> addrspace(1)* %in
+ %ext = zext <4 x i1> %load to <4 x i32>
+ store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v4i1_to_v4i32:
+; SI: s_endpgm
+define void @sextload_global_v4i1_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i1> addrspace(1)* %in
+ %ext = sext <4 x i1> %load to <4 x i32>
+ store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v8i1_to_v8i32:
+; SI: s_endpgm
+define void @zextload_global_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i1> addrspace(1)* %in
+ %ext = zext <8 x i1> %load to <8 x i32>
+ store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v8i1_to_v8i32:
+; SI: s_endpgm
+define void @sextload_global_v8i1_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i1> addrspace(1)* %in
+ %ext = sext <8 x i1> %load to <8 x i32>
+ store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v16i1_to_v16i32:
+; SI: s_endpgm
+define void @zextload_global_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i1> addrspace(1)* %in
+ %ext = zext <16 x i1> %load to <16 x i32>
+ store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v16i1_to_v16i32:
+; SI: s_endpgm
+define void @sextload_global_v16i1_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i1> addrspace(1)* %in
+ %ext = sext <16 x i1> %load to <16 x i32>
+ store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
+ ret void
+}
+
+; XFUNC-LABEL: {{^}}zextload_global_v32i1_to_v32i32:
+; XSI: s_endpgm
+; define void @zextload_global_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i1> addrspace(1)* nocapture %in) nounwind {
+; %load = load <32 x i1> addrspace(1)* %in
+; %ext = zext <32 x i1> %load to <32 x i32>
+; store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
+; ret void
+; }
+
+; XFUNC-LABEL: {{^}}sextload_global_v32i1_to_v32i32:
+; XSI: s_endpgm
+; define void @sextload_global_v32i1_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i1> addrspace(1)* nocapture %in) nounwind {
+; %load = load <32 x i1> addrspace(1)* %in
+; %ext = sext <32 x i1> %load to <32 x i32>
+; store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
+; ret void
+; }
+
+; XFUNC-LABEL: {{^}}zextload_global_v64i1_to_v64i32:
+; XSI: s_endpgm
+; define void @zextload_global_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i1> addrspace(1)* nocapture %in) nounwind {
+; %load = load <64 x i1> addrspace(1)* %in
+; %ext = zext <64 x i1> %load to <64 x i32>
+; store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
+; ret void
+; }
+
+; XFUNC-LABEL: {{^}}sextload_global_v64i1_to_v64i32:
+; XSI: s_endpgm
+; define void @sextload_global_v64i1_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i1> addrspace(1)* nocapture %in) nounwind {
+; %load = load <64 x i1> addrspace(1)* %in
+; %ext = sext <64 x i1> %load to <64 x i32>
+; store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
+; ret void
+; }
+
+; FUNC-LABEL: {{^}}zextload_global_i1_to_i64:
+; SI: buffer_load_ubyte [[LOAD:v[0-9]+]],
+; SI: v_mov_b32_e32 {{v[0-9]+}}, 0{{$}}
+; SI: buffer_store_dwordx2
+define void @zextload_global_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %a = load i1 addrspace(1)* %in
+ %ext = zext i1 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_i1_to_i64:
+; SI: buffer_load_ubyte [[LOAD:v[0-9]+]],
+; SI: v_bfe_i32 [[BFE:v[0-9]+]], {{v[0-9]+}}, 0, 1{{$}}
+; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[BFE]]
+; SI: buffer_store_dwordx2
+define void @sextload_global_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %a = load i1 addrspace(1)* %in
+ %ext = sext i1 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v1i1_to_v1i64:
+; SI: s_endpgm
+define void @zextload_global_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i1> addrspace(1)* %in
+ %ext = zext <1 x i1> %load to <1 x i64>
+ store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v1i1_to_v1i64:
+; SI: s_endpgm
+define void @sextload_global_v1i1_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i1> addrspace(1)* %in
+ %ext = sext <1 x i1> %load to <1 x i64>
+ store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v2i1_to_v2i64:
+; SI: s_endpgm
+define void @zextload_global_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i1> addrspace(1)* %in
+ %ext = zext <2 x i1> %load to <2 x i64>
+ store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v2i1_to_v2i64:
+; SI: s_endpgm
+define void @sextload_global_v2i1_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i1> addrspace(1)* %in
+ %ext = sext <2 x i1> %load to <2 x i64>
+ store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v4i1_to_v4i64:
+; SI: s_endpgm
+define void @zextload_global_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i1> addrspace(1)* %in
+ %ext = zext <4 x i1> %load to <4 x i64>
+ store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v4i1_to_v4i64:
+; SI: s_endpgm
+define void @sextload_global_v4i1_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i1> addrspace(1)* %in
+ %ext = sext <4 x i1> %load to <4 x i64>
+ store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v8i1_to_v8i64:
+; SI: s_endpgm
+define void @zextload_global_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i1> addrspace(1)* %in
+ %ext = zext <8 x i1> %load to <8 x i64>
+ store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v8i1_to_v8i64:
+; SI: s_endpgm
+define void @sextload_global_v8i1_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i1> addrspace(1)* %in
+ %ext = sext <8 x i1> %load to <8 x i64>
+ store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v16i1_to_v16i64:
+; SI: s_endpgm
+define void @zextload_global_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i1> addrspace(1)* %in
+ %ext = zext <16 x i1> %load to <16 x i64>
+ store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v16i1_to_v16i64:
+; SI: s_endpgm
+define void @sextload_global_v16i1_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i1> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i1> addrspace(1)* %in
+ %ext = sext <16 x i1> %load to <16 x i64>
+ store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
+ ret void
+}
+
+; XFUNC-LABEL: {{^}}zextload_global_v32i1_to_v32i64:
+; XSI: s_endpgm
+; define void @zextload_global_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i1> addrspace(1)* nocapture %in) nounwind {
+; %load = load <32 x i1> addrspace(1)* %in
+; %ext = zext <32 x i1> %load to <32 x i64>
+; store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
+; ret void
+; }
+
+; XFUNC-LABEL: {{^}}sextload_global_v32i1_to_v32i64:
+; XSI: s_endpgm
+; define void @sextload_global_v32i1_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i1> addrspace(1)* nocapture %in) nounwind {
+; %load = load <32 x i1> addrspace(1)* %in
+; %ext = sext <32 x i1> %load to <32 x i64>
+; store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
+; ret void
+; }
+
+; XFUNC-LABEL: {{^}}zextload_global_v64i1_to_v64i64:
+; XSI: s_endpgm
+; define void @zextload_global_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i1> addrspace(1)* nocapture %in) nounwind {
+; %load = load <64 x i1> addrspace(1)* %in
+; %ext = zext <64 x i1> %load to <64 x i64>
+; store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
+; ret void
+; }
+
+; XFUNC-LABEL: {{^}}sextload_global_v64i1_to_v64i64:
+; XSI: s_endpgm
+; define void @sextload_global_v64i1_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i1> addrspace(1)* nocapture %in) nounwind {
+; %load = load <64 x i1> addrspace(1)* %in
+; %ext = sext <64 x i1> %load to <64 x i64>
+; store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
+; ret void
+; }
diff --git a/test/CodeGen/R600/global-extload-i16.ll b/test/CodeGen/R600/global-extload-i16.ll
new file mode 100644
index 0000000..f3e3312
--- /dev/null
+++ b/test/CodeGen/R600/global-extload-i16.ll
@@ -0,0 +1,302 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; XUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; FIXME: cypress is broken because the bigger testcases spill and it's not implemented
+
+; FUNC-LABEL: {{^}}zextload_global_i16_to_i32:
+; SI: buffer_load_ushort
+; SI: buffer_store_dword
+; SI: s_endpgm
+define void @zextload_global_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+ %a = load i16 addrspace(1)* %in
+ %ext = zext i16 %a to i32
+ store i32 %ext, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_i16_to_i32:
+; SI: buffer_load_sshort
+; SI: buffer_store_dword
+; SI: s_endpgm
+define void @sextload_global_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+ %a = load i16 addrspace(1)* %in
+ %ext = sext i16 %a to i32
+ store i32 %ext, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v1i16_to_v1i32:
+; SI: buffer_load_ushort
+; SI: s_endpgm
+define void @zextload_global_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i16> addrspace(1)* %in
+ %ext = zext <1 x i16> %load to <1 x i32>
+ store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v1i16_to_v1i32:
+; SI: buffer_load_sshort
+; SI: s_endpgm
+define void @sextload_global_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i16> addrspace(1)* %in
+ %ext = sext <1 x i16> %load to <1 x i32>
+ store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v2i16_to_v2i32:
+; SI: s_endpgm
+define void @zextload_global_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i16> addrspace(1)* %in
+ %ext = zext <2 x i16> %load to <2 x i32>
+ store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v2i16_to_v2i32:
+; SI: s_endpgm
+define void @sextload_global_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i16> addrspace(1)* %in
+ %ext = sext <2 x i16> %load to <2 x i32>
+ store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v4i16_to_v4i32:
+; SI: s_endpgm
+define void @zextload_global_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i16> addrspace(1)* %in
+ %ext = zext <4 x i16> %load to <4 x i32>
+ store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v4i16_to_v4i32:
+; SI: s_endpgm
+define void @sextload_global_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i16> addrspace(1)* %in
+ %ext = sext <4 x i16> %load to <4 x i32>
+ store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v8i16_to_v8i32:
+; SI: s_endpgm
+define void @zextload_global_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i16> addrspace(1)* %in
+ %ext = zext <8 x i16> %load to <8 x i32>
+ store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v8i16_to_v8i32:
+; SI: s_endpgm
+define void @sextload_global_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i16> addrspace(1)* %in
+ %ext = sext <8 x i16> %load to <8 x i32>
+ store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v16i16_to_v16i32:
+; SI: s_endpgm
+define void @zextload_global_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i16> addrspace(1)* %in
+ %ext = zext <16 x i16> %load to <16 x i32>
+ store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v16i16_to_v16i32:
+; SI: s_endpgm
+define void @sextload_global_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i16> addrspace(1)* %in
+ %ext = sext <16 x i16> %load to <16 x i32>
+ store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v32i16_to_v32i32:
+; SI: s_endpgm
+define void @zextload_global_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <32 x i16> addrspace(1)* %in
+ %ext = zext <32 x i16> %load to <32 x i32>
+ store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v32i16_to_v32i32:
+; SI: s_endpgm
+define void @sextload_global_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <32 x i16> addrspace(1)* %in
+ %ext = sext <32 x i16> %load to <32 x i32>
+ store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v64i16_to_v64i32:
+; SI: s_endpgm
+define void @zextload_global_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <64 x i16> addrspace(1)* %in
+ %ext = zext <64 x i16> %load to <64 x i32>
+ store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v64i16_to_v64i32:
+; SI: s_endpgm
+define void @sextload_global_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <64 x i16> addrspace(1)* %in
+ %ext = sext <64 x i16> %load to <64 x i32>
+ store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_i16_to_i64:
+; SI: buffer_load_ushort v[[LO:[0-9]+]],
+; SI: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
+; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]]
+define void @zextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+ %a = load i16 addrspace(1)* %in
+ %ext = zext i16 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_i16_to_i64:
+; SI: buffer_load_sshort [[LOAD:v[0-9]+]],
+; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[LOAD]]
+; SI: buffer_store_dwordx2
+define void @sextload_global_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+ %a = load i16 addrspace(1)* %in
+ %ext = sext i16 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v1i16_to_v1i64:
+; SI: s_endpgm
+define void @zextload_global_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i16> addrspace(1)* %in
+ %ext = zext <1 x i16> %load to <1 x i64>
+ store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v1i16_to_v1i64:
+; SI: s_endpgm
+define void @sextload_global_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i16> addrspace(1)* %in
+ %ext = sext <1 x i16> %load to <1 x i64>
+ store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v2i16_to_v2i64:
+; SI: s_endpgm
+define void @zextload_global_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i16> addrspace(1)* %in
+ %ext = zext <2 x i16> %load to <2 x i64>
+ store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v2i16_to_v2i64:
+; SI: s_endpgm
+define void @sextload_global_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i16> addrspace(1)* %in
+ %ext = sext <2 x i16> %load to <2 x i64>
+ store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v4i16_to_v4i64:
+; SI: s_endpgm
+define void @zextload_global_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i16> addrspace(1)* %in
+ %ext = zext <4 x i16> %load to <4 x i64>
+ store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v4i16_to_v4i64:
+; SI: s_endpgm
+define void @sextload_global_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i16> addrspace(1)* %in
+ %ext = sext <4 x i16> %load to <4 x i64>
+ store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v8i16_to_v8i64:
+; SI: s_endpgm
+define void @zextload_global_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i16> addrspace(1)* %in
+ %ext = zext <8 x i16> %load to <8 x i64>
+ store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v8i16_to_v8i64:
+; SI: s_endpgm
+define void @sextload_global_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i16> addrspace(1)* %in
+ %ext = sext <8 x i16> %load to <8 x i64>
+ store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v16i16_to_v16i64:
+; SI: s_endpgm
+define void @zextload_global_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i16> addrspace(1)* %in
+ %ext = zext <16 x i16> %load to <16 x i64>
+ store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v16i16_to_v16i64:
+; SI: s_endpgm
+define void @sextload_global_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i16> addrspace(1)* %in
+ %ext = sext <16 x i16> %load to <16 x i64>
+ store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v32i16_to_v32i64:
+; SI: s_endpgm
+define void @zextload_global_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <32 x i16> addrspace(1)* %in
+ %ext = zext <32 x i16> %load to <32 x i64>
+ store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v32i16_to_v32i64:
+; SI: s_endpgm
+define void @sextload_global_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <32 x i16> addrspace(1)* %in
+ %ext = sext <32 x i16> %load to <32 x i64>
+ store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v64i16_to_v64i64:
+; SI: s_endpgm
+define void @zextload_global_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <64 x i16> addrspace(1)* %in
+ %ext = zext <64 x i16> %load to <64 x i64>
+ store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v64i16_to_v64i64:
+; SI: s_endpgm
+define void @sextload_global_v64i16_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i16> addrspace(1)* nocapture %in) nounwind {
+ %load = load <64 x i16> addrspace(1)* %in
+ %ext = sext <64 x i16> %load to <64 x i64>
+ store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/global-extload-i32.ll b/test/CodeGen/R600/global-extload-i32.ll
new file mode 100644
index 0000000..b3d5438
--- /dev/null
+++ b/test/CodeGen/R600/global-extload-i32.ll
@@ -0,0 +1,457 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+; FUNC-LABEL: {{^}}zextload_global_i32_to_i64:
+; SI: buffer_load_dword v[[LO:[0-9]+]],
+; SI: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
+; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]]
+define void @zextload_global_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %a = load i32 addrspace(1)* %in
+ %ext = zext i32 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_i32_to_i64:
+; SI: buffer_load_dword [[LOAD:v[0-9]+]],
+; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[LOAD]]
+; SI: buffer_store_dwordx2
+define void @sextload_global_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %a = load i32 addrspace(1)* %in
+ %ext = sext i32 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v1i32_to_v1i64:
+; SI: buffer_load_dword
+; SI: buffer_store_dwordx2
+; SI: s_endpgm
+define void @zextload_global_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i32> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i32> addrspace(1)* %in
+ %ext = zext <1 x i32> %load to <1 x i64>
+ store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v1i32_to_v1i64:
+; SI: buffer_load_dword
+; SI: v_ashrrev_i32
+; SI: buffer_store_dwordx2
+; SI: s_endpgm
+define void @sextload_global_v1i32_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i32> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i32> addrspace(1)* %in
+ %ext = sext <1 x i32> %load to <1 x i64>
+ store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v2i32_to_v2i64:
+; SI: buffer_load_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: s_endpgm
+define void @zextload_global_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i32> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i32> addrspace(1)* %in
+ %ext = zext <2 x i32> %load to <2 x i64>
+ store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v2i32_to_v2i64:
+; SI: buffer_load_dwordx2
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI: s_endpgm
+define void @sextload_global_v2i32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i32> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i32> addrspace(1)* %in
+ %ext = sext <2 x i32> %load to <2 x i64>
+ store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v4i32_to_v4i64:
+; SI: buffer_load_dwordx4
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: s_endpgm
+define void @zextload_global_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i32> addrspace(1)* %in
+ %ext = zext <4 x i32> %load to <4 x i64>
+ store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v4i32_to_v4i64:
+; SI: buffer_load_dwordx4
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI: s_endpgm
+define void @sextload_global_v4i32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i32> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i32> addrspace(1)* %in
+ %ext = sext <4 x i32> %load to <4 x i64>
+ store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v8i32_to_v8i64:
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI: s_endpgm
+define void @zextload_global_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i32> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i32> addrspace(1)* %in
+ %ext = zext <8 x i32> %load to <8 x i64>
+ store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v8i32_to_v8i64:
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+
+; SI: s_endpgm
+define void @sextload_global_v8i32_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i32> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i32> addrspace(1)* %in
+ %ext = sext <8 x i32> %load to <8 x i64>
+ store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v16i32_to_v16i64:
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI: s_endpgm
+define void @sextload_global_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i32> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i32> addrspace(1)* %in
+ %ext = sext <16 x i32> %load to <16 x i64>
+ store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v16i32_to_v16i64
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+; SI: buffer_store_dwordx2
+
+; SI: s_endpgm
+define void @zextload_global_v16i32_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i32> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i32> addrspace(1)* %in
+ %ext = zext <16 x i32> %load to <16 x i64>
+ store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v32i32_to_v32i64:
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+; SI-DAG: v_ashrrev_i32
+
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+
+; SI: s_endpgm
+define void @sextload_global_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i32> addrspace(1)* nocapture %in) nounwind {
+ %load = load <32 x i32> addrspace(1)* %in
+ %ext = sext <32 x i32> %load to <32 x i64>
+ store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v32i32_to_v32i64:
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+; SI-DAG: buffer_store_dwordx2
+
+; SI: s_endpgm
+define void @zextload_global_v32i32_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i32> addrspace(1)* nocapture %in) nounwind {
+ %load = load <32 x i32> addrspace(1)* %in
+ %ext = zext <32 x i32> %load to <32 x i64>
+ store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/global-extload-i8.ll b/test/CodeGen/R600/global-extload-i8.ll
new file mode 100644
index 0000000..4c37f3f
--- /dev/null
+++ b/test/CodeGen/R600/global-extload-i8.ll
@@ -0,0 +1,299 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+; FUNC-LABEL: {{^}}zextload_global_i8_to_i32:
+; SI: buffer_load_ubyte
+; SI: buffer_store_dword
+; SI: s_endpgm
+define void @zextload_global_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
+ %a = load i8 addrspace(1)* %in
+ %ext = zext i8 %a to i32
+ store i32 %ext, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_i8_to_i32:
+; SI: buffer_load_sbyte
+; SI: buffer_store_dword
+; SI: s_endpgm
+define void @sextload_global_i8_to_i32(i32 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
+ %a = load i8 addrspace(1)* %in
+ %ext = sext i8 %a to i32
+ store i32 %ext, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v1i8_to_v1i32:
+; SI: s_endpgm
+define void @zextload_global_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i8> addrspace(1)* %in
+ %ext = zext <1 x i8> %load to <1 x i32>
+ store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v1i8_to_v1i32:
+; SI: s_endpgm
+define void @sextload_global_v1i8_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i8> addrspace(1)* %in
+ %ext = sext <1 x i8> %load to <1 x i32>
+ store <1 x i32> %ext, <1 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v2i8_to_v2i32:
+; SI: s_endpgm
+define void @zextload_global_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i8> addrspace(1)* %in
+ %ext = zext <2 x i8> %load to <2 x i32>
+ store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v2i8_to_v2i32:
+; SI: s_endpgm
+define void @sextload_global_v2i8_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i8> addrspace(1)* %in
+ %ext = sext <2 x i8> %load to <2 x i32>
+ store <2 x i32> %ext, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v4i8_to_v4i32:
+; SI: s_endpgm
+define void @zextload_global_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i8> addrspace(1)* %in
+ %ext = zext <4 x i8> %load to <4 x i32>
+ store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v4i8_to_v4i32:
+; SI: s_endpgm
+define void @sextload_global_v4i8_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i8> addrspace(1)* %in
+ %ext = sext <4 x i8> %load to <4 x i32>
+ store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v8i8_to_v8i32:
+; SI: s_endpgm
+define void @zextload_global_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i8> addrspace(1)* %in
+ %ext = zext <8 x i8> %load to <8 x i32>
+ store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v8i8_to_v8i32:
+; SI: s_endpgm
+define void @sextload_global_v8i8_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i8> addrspace(1)* %in
+ %ext = sext <8 x i8> %load to <8 x i32>
+ store <8 x i32> %ext, <8 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v16i8_to_v16i32:
+; SI: s_endpgm
+define void @zextload_global_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i8> addrspace(1)* %in
+ %ext = zext <16 x i8> %load to <16 x i32>
+ store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v16i8_to_v16i32:
+; SI: s_endpgm
+define void @sextload_global_v16i8_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i8> addrspace(1)* %in
+ %ext = sext <16 x i8> %load to <16 x i32>
+ store <16 x i32> %ext, <16 x i32> addrspace(1)* %out
+ ret void
+}
+
+; XFUNC-LABEL: {{^}}zextload_global_v32i8_to_v32i32:
+; XSI: s_endpgm
+; define void @zextload_global_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i8> addrspace(1)* nocapture %in) nounwind {
+; %load = load <32 x i8> addrspace(1)* %in
+; %ext = zext <32 x i8> %load to <32 x i32>
+; store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
+; ret void
+; }
+
+; XFUNC-LABEL: {{^}}sextload_global_v32i8_to_v32i32:
+; XSI: s_endpgm
+; define void @sextload_global_v32i8_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i8> addrspace(1)* nocapture %in) nounwind {
+; %load = load <32 x i8> addrspace(1)* %in
+; %ext = sext <32 x i8> %load to <32 x i32>
+; store <32 x i32> %ext, <32 x i32> addrspace(1)* %out
+; ret void
+; }
+
+; XFUNC-LABEL: {{^}}zextload_global_v64i8_to_v64i32:
+; XSI: s_endpgm
+; define void @zextload_global_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i8> addrspace(1)* nocapture %in) nounwind {
+; %load = load <64 x i8> addrspace(1)* %in
+; %ext = zext <64 x i8> %load to <64 x i32>
+; store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
+; ret void
+; }
+
+; XFUNC-LABEL: {{^}}sextload_global_v64i8_to_v64i32:
+; XSI: s_endpgm
+; define void @sextload_global_v64i8_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i8> addrspace(1)* nocapture %in) nounwind {
+; %load = load <64 x i8> addrspace(1)* %in
+; %ext = sext <64 x i8> %load to <64 x i32>
+; store <64 x i32> %ext, <64 x i32> addrspace(1)* %out
+; ret void
+; }
+
+; FUNC-LABEL: {{^}}zextload_global_i8_to_i64:
+; SI: buffer_load_ubyte v[[LO:[0-9]+]],
+; SI: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
+; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]]
+define void @zextload_global_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
+ %a = load i8 addrspace(1)* %in
+ %ext = zext i8 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_i8_to_i64:
+; SI: buffer_load_sbyte [[LOAD:v[0-9]+]],
+; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, [[LOAD]]
+; SI: buffer_store_dwordx2
+define void @sextload_global_i8_to_i64(i64 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
+ %a = load i8 addrspace(1)* %in
+ %ext = sext i8 %a to i64
+ store i64 %ext, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v1i8_to_v1i64:
+; SI: s_endpgm
+define void @zextload_global_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i8> addrspace(1)* %in
+ %ext = zext <1 x i8> %load to <1 x i64>
+ store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v1i8_to_v1i64:
+; SI: s_endpgm
+define void @sextload_global_v1i8_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <1 x i8> addrspace(1)* %in
+ %ext = sext <1 x i8> %load to <1 x i64>
+ store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v2i8_to_v2i64:
+; SI: s_endpgm
+define void @zextload_global_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i8> addrspace(1)* %in
+ %ext = zext <2 x i8> %load to <2 x i64>
+ store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v2i8_to_v2i64:
+; SI: s_endpgm
+define void @sextload_global_v2i8_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <2 x i8> addrspace(1)* %in
+ %ext = sext <2 x i8> %load to <2 x i64>
+ store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v4i8_to_v4i64:
+; SI: s_endpgm
+define void @zextload_global_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i8> addrspace(1)* %in
+ %ext = zext <4 x i8> %load to <4 x i64>
+ store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v4i8_to_v4i64:
+; SI: s_endpgm
+define void @sextload_global_v4i8_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <4 x i8> addrspace(1)* %in
+ %ext = sext <4 x i8> %load to <4 x i64>
+ store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v8i8_to_v8i64:
+; SI: s_endpgm
+define void @zextload_global_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i8> addrspace(1)* %in
+ %ext = zext <8 x i8> %load to <8 x i64>
+ store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v8i8_to_v8i64:
+; SI: s_endpgm
+define void @sextload_global_v8i8_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <8 x i8> addrspace(1)* %in
+ %ext = sext <8 x i8> %load to <8 x i64>
+ store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_global_v16i8_to_v16i64:
+; SI: s_endpgm
+define void @zextload_global_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i8> addrspace(1)* %in
+ %ext = zext <16 x i8> %load to <16 x i64>
+ store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_global_v16i8_to_v16i64:
+; SI: s_endpgm
+define void @sextload_global_v16i8_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i8> addrspace(1)* nocapture %in) nounwind {
+ %load = load <16 x i8> addrspace(1)* %in
+ %ext = sext <16 x i8> %load to <16 x i64>
+ store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
+ ret void
+}
+
+; XFUNC-LABEL: {{^}}zextload_global_v32i8_to_v32i64:
+; XSI: s_endpgm
+; define void @zextload_global_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i8> addrspace(1)* nocapture %in) nounwind {
+; %load = load <32 x i8> addrspace(1)* %in
+; %ext = zext <32 x i8> %load to <32 x i64>
+; store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
+; ret void
+; }
+
+; XFUNC-LABEL: {{^}}sextload_global_v32i8_to_v32i64:
+; XSI: s_endpgm
+; define void @sextload_global_v32i8_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i8> addrspace(1)* nocapture %in) nounwind {
+; %load = load <32 x i8> addrspace(1)* %in
+; %ext = sext <32 x i8> %load to <32 x i64>
+; store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
+; ret void
+; }
+
+; XFUNC-LABEL: {{^}}zextload_global_v64i8_to_v64i64:
+; XSI: s_endpgm
+; define void @zextload_global_v64i8_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i8> addrspace(1)* nocapture %in) nounwind {
+; %load = load <64 x i8> addrspace(1)* %in
+; %ext = zext <64 x i8> %load to <64 x i64>
+; store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
+; ret void
+; }
+
+; XFUNC-LABEL: {{^}}sextload_global_v64i8_to_v64i64:
+; XSI: s_endpgm
+; define void @sextload_global_v64i8_to_v64i64(<64 x i64> addrspace(1)* %out, <64 x i8> addrspace(1)* nocapture %in) nounwind {
+; %load = load <64 x i8> addrspace(1)* %in
+; %ext = sext <64 x i8> %load to <64 x i64>
+; store <64 x i64> %ext, <64 x i64> addrspace(1)* %out
+; ret void
+; }
diff --git a/test/CodeGen/R600/global-zero-initializer.ll b/test/CodeGen/R600/global-zero-initializer.ll
index b69b061..6909c58 100644
--- a/test/CodeGen/R600/global-zero-initializer.ll
+++ b/test/CodeGen/R600/global-zero-initializer.ll
@@ -1,4 +1,5 @@
-; RUN: not llc -march=r600 -mcpu=SI < %s 2>&1 | FileCheck %s
+; RUN: not llc -march=amdgcn -mcpu=SI < %s 2>&1 | FileCheck %s
+; RUN: not llc -march=amdgcn -mcpu=tonga < %s 2>&1 | FileCheck %s
; CHECK: error: unsupported initializer for address space in load_init_global_global
diff --git a/test/CodeGen/R600/global_atomics.ll b/test/CodeGen/R600/global_atomics.ll
index 533a964..5a07a02 100644
--- a/test/CodeGen/R600/global_atomics.ll
+++ b/test/CodeGen/R600/global_atomics.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
; FUNC-LABEL: {{^}}atomic_add_i32_offset:
-; SI: buffer_atomic_add v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
+; SI: buffer_atomic_add v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_add_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32 addrspace(1)* %out, i32 4
@@ -10,7 +10,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_add_i32_ret_offset:
-; SI: buffer_atomic_add [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
+; SI: buffer_atomic_add [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_add_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
@@ -21,7 +21,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_add_i32_addr64_offset:
-; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
+; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_add_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
@@ -31,7 +31,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_add_i32_ret_addr64_offset:
-; SI: buffer_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
+; SI: buffer_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_add_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
@@ -81,7 +81,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_and_i32_offset:
-; SI: buffer_atomic_and v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
+; SI: buffer_atomic_and v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_and_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32 addrspace(1)* %out, i32 4
@@ -90,7 +90,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_and_i32_ret_offset:
-; SI: buffer_atomic_and [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
+; SI: buffer_atomic_and [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_and_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
@@ -101,7 +101,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_and_i32_addr64_offset:
-; SI: buffer_atomic_and v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
+; SI: buffer_atomic_and v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_and_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
@@ -111,7 +111,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_and_i32_ret_addr64_offset:
-; SI: buffer_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
+; SI: buffer_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_and_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
@@ -161,7 +161,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_sub_i32_offset:
-; SI: buffer_atomic_sub v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
+; SI: buffer_atomic_sub v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_sub_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32 addrspace(1)* %out, i32 4
@@ -170,7 +170,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_sub_i32_ret_offset:
-; SI: buffer_atomic_sub [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
+; SI: buffer_atomic_sub [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_sub_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
@@ -181,7 +181,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_sub_i32_addr64_offset:
-; SI: buffer_atomic_sub v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
+; SI: buffer_atomic_sub v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_sub_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
@@ -191,7 +191,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_sub_i32_ret_addr64_offset:
-; SI: buffer_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
+; SI: buffer_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
@@ -241,7 +241,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_max_i32_offset:
-; SI: buffer_atomic_smax v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
+; SI: buffer_atomic_smax v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32 addrspace(1)* %out, i32 4
@@ -250,7 +250,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_max_i32_ret_offset:
-; SI: buffer_atomic_smax [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
+; SI: buffer_atomic_smax [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_max_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
@@ -261,7 +261,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_max_i32_addr64_offset:
-; SI: buffer_atomic_smax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
+; SI: buffer_atomic_smax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_max_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
@@ -271,7 +271,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_max_i32_ret_addr64_offset:
-; SI: buffer_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
+; SI: buffer_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_max_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
@@ -321,7 +321,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_umax_i32_offset:
-; SI: buffer_atomic_umax v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
+; SI: buffer_atomic_umax v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_umax_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32 addrspace(1)* %out, i32 4
@@ -330,7 +330,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_umax_i32_ret_offset:
-; SI: buffer_atomic_umax [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
+; SI: buffer_atomic_umax [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_umax_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
@@ -341,7 +341,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_umax_i32_addr64_offset:
-; SI: buffer_atomic_umax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
+; SI: buffer_atomic_umax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_umax_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
@@ -351,7 +351,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_umax_i32_ret_addr64_offset:
-; SI: buffer_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
+; SI: buffer_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
@@ -401,7 +401,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_min_i32_offset:
-; SI: buffer_atomic_smin v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
+; SI: buffer_atomic_smin v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_min_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32 addrspace(1)* %out, i32 4
@@ -410,7 +410,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_min_i32_ret_offset:
-; SI: buffer_atomic_smin [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
+; SI: buffer_atomic_smin [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_min_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
@@ -421,7 +421,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_min_i32_addr64_offset:
-; SI: buffer_atomic_smin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
+; SI: buffer_atomic_smin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_min_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
@@ -431,7 +431,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_min_i32_ret_addr64_offset:
-; SI: buffer_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
+; SI: buffer_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_min_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
@@ -481,7 +481,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_umin_i32_offset:
-; SI: buffer_atomic_umin v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
+; SI: buffer_atomic_umin v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_umin_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32 addrspace(1)* %out, i32 4
@@ -490,7 +490,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_umin_i32_ret_offset:
-; SI: buffer_atomic_umin [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
+; SI: buffer_atomic_umin [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_umin_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
@@ -501,7 +501,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_umin_i32_addr64_offset:
-; SI: buffer_atomic_umin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
+; SI: buffer_atomic_umin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_umin_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
@@ -511,7 +511,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_umin_i32_ret_addr64_offset:
-; SI: buffer_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
+; SI: buffer_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
@@ -561,7 +561,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_or_i32_offset:
-; SI: buffer_atomic_or v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
+; SI: buffer_atomic_or v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_or_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32 addrspace(1)* %out, i32 4
@@ -570,7 +570,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_or_i32_ret_offset:
-; SI: buffer_atomic_or [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
+; SI: buffer_atomic_or [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_or_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
@@ -581,7 +581,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_or_i32_addr64_offset:
-; SI: buffer_atomic_or v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
+; SI: buffer_atomic_or v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_or_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
@@ -591,7 +591,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_or_i32_ret_addr64_offset:
-; SI: buffer_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
+; SI: buffer_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_or_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
@@ -641,7 +641,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_xchg_i32_offset:
-; SI: buffer_atomic_swap v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
+; SI: buffer_atomic_swap v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_xchg_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32 addrspace(1)* %out, i32 4
@@ -650,7 +650,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_offset:
-; SI: buffer_atomic_swap [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
+; SI: buffer_atomic_swap [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_xchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
@@ -661,7 +661,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64_offset:
-; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
+; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_xchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
@@ -671,7 +671,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_addr64_offset:
-; SI: buffer_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
+; SI: buffer_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
@@ -721,7 +721,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_xor_i32_offset:
-; SI: buffer_atomic_xor v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
+; SI: buffer_atomic_xor v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_xor_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
%gep = getelementptr i32 addrspace(1)* %out, i32 4
@@ -730,7 +730,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_xor_i32_ret_offset:
-; SI: buffer_atomic_xor [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
+; SI: buffer_atomic_xor [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc {{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_xor_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
@@ -741,7 +741,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_xor_i32_addr64_offset:
-; SI: buffer_atomic_xor v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
+; SI: buffer_atomic_xor v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_xor_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
@@ -751,7 +751,7 @@ entry:
}
; FUNC-LABEL: {{^}}atomic_xor_i32_ret_addr64_offset:
-; SI: buffer_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
+; SI: buffer_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
; SI: buffer_store_dword [[RET]]
define void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
diff --git a/test/CodeGen/R600/gv-const-addrspace-fail.ll b/test/CodeGen/R600/gv-const-addrspace-fail.ll
index 905948f..af0df41 100644
--- a/test/CodeGen/R600/gv-const-addrspace-fail.ll
+++ b/test/CodeGen/R600/gv-const-addrspace-fail.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; XUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
diff --git a/test/CodeGen/R600/gv-const-addrspace.ll b/test/CodeGen/R600/gv-const-addrspace.ll
index 6aa20b8..45af71d 100644
--- a/test/CodeGen/R600/gv-const-addrspace.ll
+++ b/test/CodeGen/R600/gv-const-addrspace.ll
@@ -1,5 +1,6 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
@b = internal addrspace(2) constant [1 x i16] [ i16 7 ], align 2
@@ -9,6 +10,7 @@
; FUNC-LABEL: {{^}}float:
; FIXME: We should be using s_load_dword here.
; SI: buffer_load_dword
+; VI: s_load_dword
; EG-DAG: MOV {{\** *}}T2.X
; EG-DAG: MOV {{\** *}}T3.X
@@ -31,6 +33,7 @@ entry:
; FIXME: We should be using s_load_dword here.
; SI: buffer_load_dword
+; VI: s_load_dword
; EG-DAG: MOV {{\** *}}T2.X
; EG-DAG: MOV {{\** *}}T3.X
@@ -53,7 +56,7 @@ entry:
@struct_foo_gv = internal unnamed_addr addrspace(2) constant [1 x %struct.foo] [ %struct.foo { float 16.0, [5 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4] } ]
; FUNC-LABEL: {{^}}struct_foo_gv_load:
-; SI: s_load_dword
+; GCN: s_load_dword
define void @struct_foo_gv_load(i32 addrspace(1)* %out, i32 %index) {
%gep = getelementptr inbounds [1 x %struct.foo] addrspace(2)* @struct_foo_gv, i32 0, i32 0, i32 1, i32 %index
@@ -70,6 +73,7 @@ define void @struct_foo_gv_load(i32 addrspace(1)* %out, i32 %index) {
; FUNC-LABEL: {{^}}array_v1_gv_load:
; FIXME: We should be using s_load_dword here.
; SI: buffer_load_dword
+; VI: s_load_dword
define void @array_v1_gv_load(<1 x i32> addrspace(1)* %out, i32 %index) {
%gep = getelementptr inbounds [4 x <1 x i32>] addrspace(2)* @array_v1_gv, i32 0, i32 %index
%load = load <1 x i32> addrspace(2)* %gep, align 4
diff --git a/test/CodeGen/R600/half.ll b/test/CodeGen/R600/half.ll
index 6ad9b2f..35a41c5 100644
--- a/test/CodeGen/R600/half.ll
+++ b/test/CodeGen/R600/half.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga | FileCheck %s
define void @test_load_store(half addrspace(1)* %in, half addrspace(1)* %out) {
; CHECK-LABEL: {{^}}test_load_store:
diff --git a/test/CodeGen/R600/hsa.ll b/test/CodeGen/R600/hsa.ll
new file mode 100644
index 0000000..ff75b90
--- /dev/null
+++ b/test/CodeGen/R600/hsa.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=kaveri | FileCheck --check-prefix=HSA %s
+
+; HSA: {{^}}simple:
+; HSA: .section .hsa.version
+; HSA-NEXT: .ascii "HSA Code Unit:0.0:AMD:0.1:GFX8.1:0"
+; Make sure we are setting the ATC bit:
+; HSA: s_mov_b32 s[[HI:[0-9]]], 0x100f000
+; HSA: buffer_store_dword v{{[0-9]+}}, s[0:[[HI]]], 0
+
+define void @simple(i32 addrspace(1)* %out) {
+entry:
+ store i32 0, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/i1-copy-implicit-def.ll b/test/CodeGen/R600/i1-copy-implicit-def.ll
index 7c5bc04..b11a211 100644
--- a/test/CodeGen/R600/i1-copy-implicit-def.ll
+++ b/test/CodeGen/R600/i1-copy-implicit-def.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; SILowerI1Copies was not handling IMPLICIT_DEF
; SI-LABEL: {{^}}br_implicit_def:
diff --git a/test/CodeGen/R600/i1-copy-phi.ll b/test/CodeGen/R600/i1-copy-phi.ll
index bfa8672..430466e 100644
--- a/test/CodeGen/R600/i1-copy-phi.ll
+++ b/test/CodeGen/R600/i1-copy-phi.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}br_i1_phi:
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
diff --git a/test/CodeGen/R600/icmp64.ll b/test/CodeGen/R600/icmp64.ll
index 870bf7f..0eaa33e 100644
--- a/test/CodeGen/R600/icmp64.ll
+++ b/test/CodeGen/R600/icmp64.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}test_i64_eq:
; SI: v_cmp_eq_i64
diff --git a/test/CodeGen/R600/imm.ll b/test/CodeGen/R600/imm.ll
index 1fcaf29..9b95fd6 100644
--- a/test/CodeGen/R600/imm.ll
+++ b/test/CodeGen/R600/imm.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=CHECK %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=CHECK %s
; Use a 64-bit value with lo bits that can be represented as an inline constant
; CHECK-LABEL: {{^}}i64_imm_inline_lo:
@@ -22,73 +23,100 @@ entry:
ret void
}
-; CHECK-LABEL: {{^}}store_inline_imm_0.0_f32
-; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
+; CHECK-LABEL: {{^}}store_imm_neg_0.0_i64:
+; CHECK-DAG: s_mov_b32 s[[HI_SREG:[0-9]+]], 0x80000000
+; CHECK-DAG: s_mov_b32 s[[LO_SREG:[0-9]+]], 0{{$}}
+; CHECK-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], s[[LO_SREG]]
+; CHECK-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], s[[HI_SREG]]
+; CHECK: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
+define void @store_imm_neg_0.0_i64(i64 addrspace(1) *%out) {
+ store i64 -9223372036854775808, i64 addrspace(1) *%out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}store_inline_imm_neg_0.0_i32:
+; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], 0x80000000
; CHECK-NEXT: buffer_store_dword [[REG]]
+define void @store_inline_imm_neg_0.0_i32(i32 addrspace(1)* %out) {
+ store i32 -2147483648, i32 addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}store_inline_imm_0.0_f32:
+; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
+; CHECK: buffer_store_dword [[REG]]
define void @store_inline_imm_0.0_f32(float addrspace(1)* %out) {
store float 0.0, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}store_inline_imm_0.5_f32
+; CHECK-LABEL: {{^}}store_imm_neg_0.0_f32:
+; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], 0x80000000
+; CHECK: buffer_store_dword [[REG]]
+define void @store_imm_neg_0.0_f32(float addrspace(1)* %out) {
+ store float -0.0, float addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}store_inline_imm_0.5_f32:
; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], 0.5{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @store_inline_imm_0.5_f32(float addrspace(1)* %out) {
store float 0.5, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}store_inline_imm_m_0.5_f32
+; CHECK-LABEL: {{^}}store_inline_imm_m_0.5_f32:
; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], -0.5{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @store_inline_imm_m_0.5_f32(float addrspace(1)* %out) {
store float -0.5, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}store_inline_imm_1.0_f32
+; CHECK-LABEL: {{^}}store_inline_imm_1.0_f32:
; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], 1.0{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @store_inline_imm_1.0_f32(float addrspace(1)* %out) {
store float 1.0, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}store_inline_imm_m_1.0_f32
+; CHECK-LABEL: {{^}}store_inline_imm_m_1.0_f32:
; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], -1.0{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @store_inline_imm_m_1.0_f32(float addrspace(1)* %out) {
store float -1.0, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}store_inline_imm_2.0_f32
+; CHECK-LABEL: {{^}}store_inline_imm_2.0_f32:
; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], 2.0{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @store_inline_imm_2.0_f32(float addrspace(1)* %out) {
store float 2.0, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}store_inline_imm_m_2.0_f32
+; CHECK-LABEL: {{^}}store_inline_imm_m_2.0_f32:
; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], -2.0{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @store_inline_imm_m_2.0_f32(float addrspace(1)* %out) {
store float -2.0, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}store_inline_imm_4.0_f32
+; CHECK-LABEL: {{^}}store_inline_imm_4.0_f32:
; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], 4.0{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @store_inline_imm_4.0_f32(float addrspace(1)* %out) {
store float 4.0, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}store_inline_imm_m_4.0_f32
+; CHECK-LABEL: {{^}}store_inline_imm_m_4.0_f32:
; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], -4.0{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @store_inline_imm_m_4.0_f32(float addrspace(1)* %out) {
store float -4.0, float addrspace(1)* %out
ret void
@@ -96,106 +124,106 @@ define void @store_inline_imm_m_4.0_f32(float addrspace(1)* %out) {
; CHECK-LABEL: {{^}}store_literal_imm_f32:
; CHECK: v_mov_b32_e32 [[REG:v[0-9]+]], 0x45800000
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @store_literal_imm_f32(float addrspace(1)* %out) {
store float 4096.0, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}add_inline_imm_0.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_0.0_f32:
; CHECK: s_load_dword [[VAL:s[0-9]+]]
-; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], 0.0, [[VAL]]{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], 0, [[VAL]]{{$}}
+; CHECK: buffer_store_dword [[REG]]
define void @add_inline_imm_0.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 0.0
store float %y, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}add_inline_imm_0.5_f32
+; CHECK-LABEL: {{^}}add_inline_imm_0.5_f32:
; CHECK: s_load_dword [[VAL:s[0-9]+]]
; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], 0.5, [[VAL]]{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @add_inline_imm_0.5_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 0.5
store float %y, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}add_inline_imm_neg_0.5_f32
+; CHECK-LABEL: {{^}}add_inline_imm_neg_0.5_f32:
; CHECK: s_load_dword [[VAL:s[0-9]+]]
; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], -0.5, [[VAL]]{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @add_inline_imm_neg_0.5_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, -0.5
store float %y, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}add_inline_imm_1.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_1.0_f32:
; CHECK: s_load_dword [[VAL:s[0-9]+]]
; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], 1.0, [[VAL]]{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @add_inline_imm_1.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 1.0
store float %y, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}add_inline_imm_neg_1.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_neg_1.0_f32:
; CHECK: s_load_dword [[VAL:s[0-9]+]]
; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], -1.0, [[VAL]]{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @add_inline_imm_neg_1.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, -1.0
store float %y, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}add_inline_imm_2.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_2.0_f32:
; CHECK: s_load_dword [[VAL:s[0-9]+]]
; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], 2.0, [[VAL]]{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @add_inline_imm_2.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 2.0
store float %y, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}add_inline_imm_neg_2.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_neg_2.0_f32:
; CHECK: s_load_dword [[VAL:s[0-9]+]]
; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], -2.0, [[VAL]]{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @add_inline_imm_neg_2.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, -2.0
store float %y, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}add_inline_imm_4.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_4.0_f32:
; CHECK: s_load_dword [[VAL:s[0-9]+]]
; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], 4.0, [[VAL]]{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @add_inline_imm_4.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, 4.0
store float %y, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: {{^}}add_inline_imm_neg_4.0_f32
+; CHECK-LABEL: {{^}}add_inline_imm_neg_4.0_f32:
; CHECK: s_load_dword [[VAL:s[0-9]+]]
; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], -4.0, [[VAL]]{{$}}
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @add_inline_imm_neg_4.0_f32(float addrspace(1)* %out, float %x) {
%y = fadd float %x, -4.0
store float %y, float addrspace(1)* %out
ret void
}
-; CHECK-LABEL: @commute_add_inline_imm_0.5_f32
+; CHECK-LABEL: {{^}}commute_add_inline_imm_0.5_f32:
; CHECK: buffer_load_dword [[VAL:v[0-9]+]]
; CHECK: v_add_f32_e32 [[REG:v[0-9]+]], 0.5, [[VAL]]
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @commute_add_inline_imm_0.5_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%x = load float addrspace(1)* %in
%y = fadd float %x, 0.5
@@ -203,13 +231,387 @@ define void @commute_add_inline_imm_0.5_f32(float addrspace(1)* %out, float addr
ret void
}
-; CHECK-LABEL: @commute_add_literal_f32
+; CHECK-LABEL: {{^}}commute_add_literal_f32:
; CHECK: buffer_load_dword [[VAL:v[0-9]+]]
; CHECK: v_add_f32_e32 [[REG:v[0-9]+]], 0x44800000, [[VAL]]
-; CHECK-NEXT: buffer_store_dword [[REG]]
+; CHECK: buffer_store_dword [[REG]]
define void @commute_add_literal_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%x = load float addrspace(1)* %in
%y = fadd float %x, 1024.0
store float %y, float addrspace(1)* %out
ret void
}
+
+; CHECK-LABEL: {{^}}add_inline_imm_1_f32:
+; CHECK: s_load_dword [[VAL:s[0-9]+]]
+; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], 1, [[VAL]]{{$}}
+; CHECK: buffer_store_dword [[REG]]
+define void @add_inline_imm_1_f32(float addrspace(1)* %out, float %x) {
+ %y = fadd float %x, 0x36a0000000000000
+ store float %y, float addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_2_f32:
+; CHECK: s_load_dword [[VAL:s[0-9]+]]
+; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], 2, [[VAL]]{{$}}
+; CHECK: buffer_store_dword [[REG]]
+define void @add_inline_imm_2_f32(float addrspace(1)* %out, float %x) {
+ %y = fadd float %x, 0x36b0000000000000
+ store float %y, float addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_16_f32:
+; CHECK: s_load_dword [[VAL:s[0-9]+]]
+; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], 16, [[VAL]]
+; CHECK: buffer_store_dword [[REG]]
+define void @add_inline_imm_16_f32(float addrspace(1)* %out, float %x) {
+ %y = fadd float %x, 0x36e0000000000000
+ store float %y, float addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_neg_1_f32:
+; CHECK: s_load_dword [[VAL:s[0-9]+]]
+; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], -1, [[VAL]]
+; CHECK: buffer_store_dword [[REG]]
+define void @add_inline_imm_neg_1_f32(float addrspace(1)* %out, float %x) {
+ %y = fadd float %x, 0xffffffffe0000000
+ store float %y, float addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_neg_2_f32:
+; CHECK: s_load_dword [[VAL:s[0-9]+]]
+; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], -2, [[VAL]]
+; CHECK: buffer_store_dword [[REG]]
+define void @add_inline_imm_neg_2_f32(float addrspace(1)* %out, float %x) {
+ %y = fadd float %x, 0xffffffffc0000000
+ store float %y, float addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_neg_16_f32:
+; CHECK: s_load_dword [[VAL:s[0-9]+]]
+; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], -16, [[VAL]]
+; CHECK: buffer_store_dword [[REG]]
+define void @add_inline_imm_neg_16_f32(float addrspace(1)* %out, float %x) {
+ %y = fadd float %x, 0xfffffffe00000000
+ store float %y, float addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_63_f32:
+; CHECK: s_load_dword [[VAL:s[0-9]+]]
+; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], 63, [[VAL]]
+; CHECK: buffer_store_dword [[REG]]
+define void @add_inline_imm_63_f32(float addrspace(1)* %out, float %x) {
+ %y = fadd float %x, 0x36ff800000000000
+ store float %y, float addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_64_f32:
+; CHECK: s_load_dword [[VAL:s[0-9]+]]
+; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], 64, [[VAL]]
+; CHECK: buffer_store_dword [[REG]]
+define void @add_inline_imm_64_f32(float addrspace(1)* %out, float %x) {
+ %y = fadd float %x, 0x3700000000000000
+ store float %y, float addrspace(1)* %out
+ ret void
+}
+
+
+; CHECK-LABEL: {{^}}add_inline_imm_0.0_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 0, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_0.0_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, 0.0
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_0.5_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 0.5, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_0.5_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, 0.5
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_neg_0.5_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], -0.5, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_neg_0.5_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, -0.5
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_1.0_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 1.0, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_1.0_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, 1.0
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_neg_1.0_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], -1.0, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_neg_1.0_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, -1.0
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_2.0_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 2.0, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_2.0_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, 2.0
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_neg_2.0_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], -2.0, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_neg_2.0_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, -2.0
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_4.0_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 4.0, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_4.0_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, 4.0
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_neg_4.0_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], -4.0, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_neg_4.0_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, -4.0
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+
+; CHECK-LABEL: {{^}}add_inline_imm_1_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 1, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_1_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, 0x0000000000000001
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_2_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 2, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_2_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, 0x0000000000000002
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_16_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 16, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_16_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, 0x0000000000000010
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_neg_1_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], -1, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_neg_1_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, 0xffffffffffffffff
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_neg_2_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], -2, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_neg_2_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, 0xfffffffffffffffe
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_neg_16_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], -16, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_neg_16_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, 0xfffffffffffffff0
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_63_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 63, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_63_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, 0x000000000000003F
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}add_inline_imm_64_f64:
+; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 64, [[VAL]]
+; CHECK: buffer_store_dwordx2 [[REG]]
+define void @add_inline_imm_64_f64(double addrspace(1)* %out, double %x) {
+ %y = fadd double %x, 0x0000000000000040
+ store double %y, double addrspace(1)* %out
+ ret void
+}
+
+
+; CHECK-LABEL: {{^}}store_inline_imm_0.0_f64:
+; CHECK: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0
+; CHECK: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0
+; CHECK: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
+define void @store_inline_imm_0.0_f64(double addrspace(1)* %out) {
+ store double 0.0, double addrspace(1)* %out
+ ret void
+}
+
+
+; CHECK-LABEL: {{^}}store_literal_imm_neg_0.0_f64:
+; CHECK-DAG: s_mov_b32 s[[HI_SREG:[0-9]+]], 0x80000000
+; CHECK-DAG: s_mov_b32 s[[LO_SREG:[0-9]+]], 0{{$}}
+; CHECK-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], s[[LO_SREG]]
+; CHECK-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], s[[HI_SREG]]
+; CHECK: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
+define void @store_literal_imm_neg_0.0_f64(double addrspace(1)* %out) {
+ store double -0.0, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}store_inline_imm_0.5_f64:
+; CHECK-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
+; CHECK-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0x3fe00000
+; CHECK: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
+define void @store_inline_imm_0.5_f64(double addrspace(1)* %out) {
+ store double 0.5, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}store_inline_imm_m_0.5_f64:
+; CHECK-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
+; CHECK-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0xbfe00000
+; CHECK: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
+define void @store_inline_imm_m_0.5_f64(double addrspace(1)* %out) {
+ store double -0.5, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}store_inline_imm_1.0_f64:
+; CHECK-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
+; CHECK-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0x3ff00000
+; CHECK: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
+define void @store_inline_imm_1.0_f64(double addrspace(1)* %out) {
+ store double 1.0, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}store_inline_imm_m_1.0_f64:
+; CHECK-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
+; CHECK-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0xbff00000
+; CHECK: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
+define void @store_inline_imm_m_1.0_f64(double addrspace(1)* %out) {
+ store double -1.0, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}store_inline_imm_2.0_f64:
+; CHECK-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
+; CHECK-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 2.0
+; CHECK: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
+define void @store_inline_imm_2.0_f64(double addrspace(1)* %out) {
+ store double 2.0, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}store_inline_imm_m_2.0_f64:
+; CHECK-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
+; CHECK-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], -2.0
+; CHECK: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
+define void @store_inline_imm_m_2.0_f64(double addrspace(1)* %out) {
+ store double -2.0, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}store_inline_imm_4.0_f64:
+; CHECK-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
+; CHECK-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0x40100000
+; CHECK: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
+define void @store_inline_imm_4.0_f64(double addrspace(1)* %out) {
+ store double 4.0, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}store_inline_imm_m_4.0_f64:
+; CHECK-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}}
+; CHECK-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0xc0100000
+; CHECK: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
+define void @store_inline_imm_m_4.0_f64(double addrspace(1)* %out) {
+ store double -4.0, double addrspace(1)* %out
+ ret void
+}
+
+; CHECK-LABEL: {{^}}store_literal_imm_f64:
+; CHECK-DAG: s_mov_b32 s[[HI_SREG:[0-9]+]], 0x40b00000
+; CHECK-DAG: s_mov_b32 s[[LO_SREG:[0-9]+]], 0{{$}}
+; CHECK-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], s[[LO_SREG]]
+; CHECK-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], s[[HI_SREG]]
+; CHECK: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
+define void @store_literal_imm_f64(double addrspace(1)* %out) {
+ store double 4096.0, double addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/indirect-addressing-si.ll b/test/CodeGen/R600/indirect-addressing-si.ll
index 0ba1614..9cd2d84 100644
--- a/test/CodeGen/R600/indirect-addressing-si.ll
+++ b/test/CodeGen/R600/indirect-addressing-si.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
; Tests for indirect addressing on SI, which is implemented using dynamic
; indexing of vectors.
diff --git a/test/CodeGen/R600/indirect-private-64.ll b/test/CodeGen/R600/indirect-private-64.ll
index e0a6ce1..cb06d60 100644
--- a/test/CodeGen/R600/indirect-private-64.ll
+++ b/test/CodeGen/R600/indirect-private-64.ll
@@ -1,5 +1,7 @@
-; RUN: llc -march=r600 -mcpu=SI -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s
-; RUN: llc -march=r600 -mcpu=SI -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s
declare void @llvm.AMDGPU.barrier.local() noduplicate nounwind
diff --git a/test/CodeGen/R600/infinite-loop.ll b/test/CodeGen/R600/infinite-loop.ll
index 48edab0..7233aa5 100644
--- a/test/CodeGen/R600/infinite-loop.ll
+++ b/test/CodeGen/R600/infinite-loop.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}infinite_loop:
; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3e7
diff --git a/test/CodeGen/R600/inline-asm.ll b/test/CodeGen/R600/inline-asm.ll
new file mode 100644
index 0000000..efc2292
--- /dev/null
+++ b/test/CodeGen/R600/inline-asm.ll
@@ -0,0 +1,12 @@
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
+
+; CHECK: {{^}}inline_asm:
+; CHECK: s_endpgm
+; CHECK: s_endpgm
+define void @inline_asm(i32 addrspace(1)* %out) {
+entry:
+ store i32 5, i32 addrspace(1)* %out
+ call void asm sideeffect "s_endpgm", ""()
+ ret void
+}
diff --git a/test/CodeGen/R600/inline-calls.ll b/test/CodeGen/R600/inline-calls.ll
index 3bceeca..33a4c83 100644
--- a/test/CodeGen/R600/inline-calls.ll
+++ b/test/CodeGen/R600/inline-calls.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck %s
; CHECK-NOT: {{^}}func:
diff --git a/test/CodeGen/R600/input-mods.ll b/test/CodeGen/R600/input-mods.ll
index e3e9499..1c4d285 100644
--- a/test/CodeGen/R600/input-mods.ll
+++ b/test/CodeGen/R600/input-mods.ll
@@ -1,13 +1,13 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
-;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG
+;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM
-;EG-CHECK-LABEL: {{^}}test:
-;EG-CHECK: EXP_IEEE *
-;CM-CHECK-LABEL: {{^}}test:
-;CM-CHECK: EXP_IEEE T{{[0-9]+}}.X, -|T{{[0-9]+}}.X|
-;CM-CHECK: EXP_IEEE T{{[0-9]+}}.Y (MASKED), -|T{{[0-9]+}}.X|
-;CM-CHECK: EXP_IEEE T{{[0-9]+}}.Z (MASKED), -|T{{[0-9]+}}.X|
-;CM-CHECK: EXP_IEEE * T{{[0-9]+}}.W (MASKED), -|T{{[0-9]+}}.X|
+;EG-LABEL: {{^}}test:
+;EG: EXP_IEEE *
+;CM-LABEL: {{^}}test:
+;CM: EXP_IEEE T{{[0-9]+}}.X, -|T{{[0-9]+}}.X|
+;CM: EXP_IEEE T{{[0-9]+}}.Y (MASKED), -|T{{[0-9]+}}.X|
+;CM: EXP_IEEE T{{[0-9]+}}.Z (MASKED), -|T{{[0-9]+}}.X|
+;CM: EXP_IEEE * T{{[0-9]+}}.W (MASKED), -|T{{[0-9]+}}.X|
define void @test(<4 x float> inreg %reg0) #0 {
%r0 = extractelement <4 x float> %reg0, i32 0
diff --git a/test/CodeGen/R600/insert_subreg.ll b/test/CodeGen/R600/insert_subreg.ll
index e311e19..4a5e886 100644
--- a/test/CodeGen/R600/insert_subreg.ll
+++ b/test/CodeGen/R600/insert_subreg.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -mattr=-promote-alloca -verify-machineinstrs < %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=-promote-alloca -verify-machineinstrs < %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-promote-alloca -verify-machineinstrs < %s
; Test that INSERT_SUBREG instructions don't have non-register operands after
; instruction selection.
diff --git a/test/CodeGen/R600/insert_vector_elt.ll b/test/CodeGen/R600/insert_vector_elt.ll
index 857c414..64afddc 100644
--- a/test/CodeGen/R600/insert_vector_elt.ll
+++ b/test/CodeGen/R600/insert_vector_elt.ll
@@ -1,4 +1,5 @@
-; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI %s
; FIXME: Broken on evergreen
; FIXME: For some reason the 8 and 16 vectors are being stored as
diff --git a/test/CodeGen/R600/kernel-args.ll b/test/CodeGen/R600/kernel-args.ll
index 9a7da90..5db45ce 100644
--- a/test/CodeGen/R600/kernel-args.ll
+++ b/test/CodeGen/R600/kernel-args.ll
@@ -1,11 +1,11 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=GCN --check-prefix=FUNC
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=GCN --check-prefix=FUNC
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; EG-CHECK-LABEL: {{^}}i8_arg:
-; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: {{^}}i8_arg:
-; SI-CHECK: buffer_load_ubyte
+; FUNC-LABEL: {{^}}i8_arg:
+; EG: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; GCN: buffer_load_ubyte
define void @i8_arg(i32 addrspace(1)* nocapture %out, i8 %in) nounwind {
entry:
@@ -14,10 +14,10 @@ entry:
ret void
}
-; EG-CHECK-LABEL: {{^}}i8_zext_arg:
-; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: {{^}}i8_zext_arg:
-; SI-CHECK: s_load_dword s{{[0-9]}}, s[0:1], 0xb
+; FUNC-LABEL: {{^}}i8_zext_arg:
+; EG: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
+; VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
define void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zeroext %in) nounwind {
entry:
@@ -26,10 +26,10 @@ entry:
ret void
}
-; EG-CHECK-LABEL: {{^}}i8_sext_arg:
-; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: {{^}}i8_sext_arg:
-; SI-CHECK: s_load_dword s{{[0-9]}}, s[0:1], 0xb
+; FUNC-LABEL: {{^}}i8_sext_arg:
+; EG: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
+; VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
define void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 signext %in) nounwind {
entry:
@@ -38,10 +38,9 @@ entry:
ret void
}
-; EG-CHECK-LABEL: {{^}}i16_arg:
-; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: {{^}}i16_arg:
-; SI-CHECK: buffer_load_ushort
+; FUNC-LABEL: {{^}}i16_arg:
+; EG: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; GCN: buffer_load_ushort
define void @i16_arg(i32 addrspace(1)* nocapture %out, i16 %in) nounwind {
entry:
@@ -50,10 +49,10 @@ entry:
ret void
}
-; EG-CHECK-LABEL: {{^}}i16_zext_arg:
-; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: {{^}}i16_zext_arg:
-; SI-CHECK: s_load_dword s{{[0-9]}}, s[0:1], 0xb
+; FUNC-LABEL: {{^}}i16_zext_arg:
+; EG: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
+; VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
define void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 zeroext %in) nounwind {
entry:
@@ -62,10 +61,10 @@ entry:
ret void
}
-; EG-CHECK-LABEL: {{^}}i16_sext_arg:
-; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: {{^}}i16_sext_arg:
-; SI-CHECK: s_load_dword s{{[0-9]}}, s[0:1], 0xb
+; FUNC-LABEL: {{^}}i16_sext_arg:
+; EG: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
+; VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
define void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 signext %in) nounwind {
entry:
@@ -74,380 +73,369 @@ entry:
ret void
}
-; EG-CHECK-LABEL: {{^}}i32_arg:
-; EG-CHECK: T{{[0-9]\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: {{^}}i32_arg:
-; s_load_dword s{{[0-9]}}, s[0:1], 0xb
+; FUNC-LABEL: {{^}}i32_arg:
+; EG: T{{[0-9]\.[XYZW]}}, KC0[2].Z
+; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
+; VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
define void @i32_arg(i32 addrspace(1)* nocapture %out, i32 %in) nounwind {
entry:
store i32 %in, i32 addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: {{^}}f32_arg:
-; EG-CHECK: T{{[0-9]\.[XYZW]}}, KC0[2].Z
-; SI-CHECK-LABEL: {{^}}f32_arg:
-; s_load_dword s{{[0-9]}}, s[0:1], 0xb
+; FUNC-LABEL: {{^}}f32_arg:
+; EG: T{{[0-9]\.[XYZW]}}, KC0[2].Z
+; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
+; VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
define void @f32_arg(float addrspace(1)* nocapture %out, float %in) nounwind {
entry:
store float %in, float addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: {{^}}v2i8_arg:
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; SI-CHECK-LABEL: {{^}}v2i8_arg:
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
+; FUNC-LABEL: {{^}}v2i8_arg:
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
define void @v2i8_arg(<2 x i8> addrspace(1)* %out, <2 x i8> %in) {
entry:
store <2 x i8> %in, <2 x i8> addrspace(1)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}v2i16_arg:
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; SI-CHECK-LABEL: {{^}}v2i16_arg:
-; SI-CHECK-DAG: buffer_load_ushort
-; SI-CHECK-DAG: buffer_load_ushort
+; FUNC-LABEL: {{^}}v2i16_arg:
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; GCN-DAG: buffer_load_ushort
+; GCN-DAG: buffer_load_ushort
define void @v2i16_arg(<2 x i16> addrspace(1)* %out, <2 x i16> %in) {
entry:
store <2 x i16> %in, <2 x i16> addrspace(1)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}v2i32_arg:
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].X
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[2].W
-; SI-CHECK-LABEL: {{^}}v2i32_arg:
-; SI-CHECK: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xb
+; FUNC-LABEL: {{^}}v2i32_arg:
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].X
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[2].W
+; SI: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xb
+; VI: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0x2c
define void @v2i32_arg(<2 x i32> addrspace(1)* nocapture %out, <2 x i32> %in) nounwind {
entry:
store <2 x i32> %in, <2 x i32> addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: {{^}}v2f32_arg:
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].X
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[2].W
-; SI-CHECK-LABEL: {{^}}v2f32_arg:
-; SI-CHECK: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xb
+; FUNC-LABEL: {{^}}v2f32_arg:
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].X
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[2].W
+; SI: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xb
+; VI: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0x2c
define void @v2f32_arg(<2 x float> addrspace(1)* nocapture %out, <2 x float> %in) nounwind {
entry:
store <2 x float> %in, <2 x float> addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: {{^}}v3i8_arg:
+; FUNC-LABEL: {{^}}v3i8_arg:
; VTX_READ_8 T{{[0-9]}}.X, T{{[0-9]}}.X, 40
; VTX_READ_8 T{{[0-9]}}.X, T{{[0-9]}}.X, 41
; VTX_READ_8 T{{[0-9]}}.X, T{{[0-9]}}.X, 42
-; SI-CHECK-LABEL: {{^}}v3i8_arg:
define void @v3i8_arg(<3 x i8> addrspace(1)* nocapture %out, <3 x i8> %in) nounwind {
entry:
store <3 x i8> %in, <3 x i8> addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: {{^}}v3i16_arg:
+; FUNC-LABEL: {{^}}v3i16_arg:
; VTX_READ_16 T{{[0-9]}}.X, T{{[0-9]}}.X, 44
; VTX_READ_16 T{{[0-9]}}.X, T{{[0-9]}}.X, 46
; VTX_READ_16 T{{[0-9]}}.X, T{{[0-9]}}.X, 48
-; SI-CHECK-LABEL: {{^}}v3i16_arg:
define void @v3i16_arg(<3 x i16> addrspace(1)* nocapture %out, <3 x i16> %in) nounwind {
entry:
store <3 x i16> %in, <3 x i16> addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: {{^}}v3i32_arg:
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
-; SI-CHECK-LABEL: {{^}}v3i32_arg:
-; SI-CHECK: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd
+; FUNC-LABEL: {{^}}v3i32_arg:
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
+; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd
+; VI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x34
define void @v3i32_arg(<3 x i32> addrspace(1)* nocapture %out, <3 x i32> %in) nounwind {
entry:
store <3 x i32> %in, <3 x i32> addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: {{^}}v3f32_arg:
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
-; SI-CHECK-LABEL: {{^}}v3f32_arg:
-; SI-CHECK: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd
+; FUNC-LABEL: {{^}}v3f32_arg:
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
+; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd
+; VI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x34
define void @v3f32_arg(<3 x float> addrspace(1)* nocapture %out, <3 x float> %in) nounwind {
entry:
store <3 x float> %in, <3 x float> addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: {{^}}v4i8_arg:
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; SI-CHECK-LABEL: {{^}}v4i8_arg:
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
+; FUNC-LABEL: {{^}}v4i8_arg:
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
define void @v4i8_arg(<4 x i8> addrspace(1)* %out, <4 x i8> %in) {
entry:
store <4 x i8> %in, <4 x i8> addrspace(1)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}v4i16_arg:
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; SI-CHECK-LABEL: {{^}}v4i16_arg:
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
+; FUNC-LABEL: {{^}}v4i16_arg:
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
define void @v4i16_arg(<4 x i16> addrspace(1)* %out, <4 x i16> %in) {
entry:
store <4 x i16> %in, <4 x i16> addrspace(1)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}v4i32_arg:
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].X
-; SI-CHECK-LABEL: {{^}}v4i32_arg:
-; SI-CHECK: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xd
+; FUNC-LABEL: {{^}}v4i32_arg:
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].X
+; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xd
+; VI: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0x34
define void @v4i32_arg(<4 x i32> addrspace(1)* nocapture %out, <4 x i32> %in) nounwind {
entry:
store <4 x i32> %in, <4 x i32> addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: {{^}}v4f32_arg:
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].X
-; SI-CHECK-LABEL: {{^}}v4f32_arg:
-; SI-CHECK: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xd
+; FUNC-LABEL: {{^}}v4f32_arg:
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].X
+; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xd
+; VI: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0x34
define void @v4f32_arg(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) nounwind {
entry:
store <4 x float> %in, <4 x float> addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: {{^}}v8i8_arg:
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; SI-CHECK-LABEL: {{^}}v8i8_arg:
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
+; FUNC-LABEL: {{^}}v8i8_arg:
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
define void @v8i8_arg(<8 x i8> addrspace(1)* %out, <8 x i8> %in) {
entry:
store <8 x i8> %in, <8 x i8> addrspace(1)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}v8i16_arg:
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; SI-CHECK-LABEL: {{^}}v8i16_arg:
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
+; FUNC-LABEL: {{^}}v8i16_arg:
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
define void @v8i16_arg(<8 x i16> addrspace(1)* %out, <8 x i16> %in) {
entry:
store <8 x i16> %in, <8 x i16> addrspace(1)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}v8i32_arg:
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].X
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].X
-; SI-CHECK-LABEL: {{^}}v8i32_arg:
-; SI-CHECK: s_load_dwordx8 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x11
+; FUNC-LABEL: {{^}}v8i32_arg:
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].X
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].X
+; SI: s_load_dwordx8 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x11
+; VI: s_load_dwordx8 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x44
define void @v8i32_arg(<8 x i32> addrspace(1)* nocapture %out, <8 x i32> %in) nounwind {
entry:
store <8 x i32> %in, <8 x i32> addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: {{^}}v8f32_arg:
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].X
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].X
-; SI-CHECK-LABEL: {{^}}v8f32_arg:
-; SI-CHECK: s_load_dwordx8 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x11
+; FUNC-LABEL: {{^}}v8f32_arg:
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].X
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].X
+; SI: s_load_dwordx8 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x11
define void @v8f32_arg(<8 x float> addrspace(1)* nocapture %out, <8 x float> %in) nounwind {
entry:
store <8 x float> %in, <8 x float> addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: {{^}}v16i8_arg:
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; EG-CHECK: VTX_READ_8
-; SI-CHECK-LABEL: {{^}}v16i8_arg:
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
+; FUNC-LABEL: {{^}}v16i8_arg:
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; EG: VTX_READ_8
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
define void @v16i8_arg(<16 x i8> addrspace(1)* %out, <16 x i8> %in) {
entry:
store <16 x i8> %in, <16 x i8> addrspace(1)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}v16i16_arg:
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; EG-CHECK: VTX_READ_16
-; SI-CHECK-LABEL: {{^}}v16i16_arg:
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
+; FUNC-LABEL: {{^}}v16i16_arg:
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
define void @v16i16_arg(<16 x i16> addrspace(1)* %out, <16 x i16> %in) {
entry:
store <16 x i16> %in, <16 x i16> addrspace(1)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}v16i32_arg:
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].X
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].X
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].X
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[10].X
-; SI-CHECK-LABEL: {{^}}v16i32_arg:
-; SI-CHECK: s_load_dwordx16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x19
+; FUNC-LABEL: {{^}}v16i32_arg:
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].X
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].X
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].X
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[10].X
+; SI: s_load_dwordx16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x19
+; VI: s_load_dwordx16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x64
define void @v16i32_arg(<16 x i32> addrspace(1)* nocapture %out, <16 x i32> %in) nounwind {
entry:
store <16 x i32> %in, <16 x i32> addrspace(1)* %out, align 4
ret void
}
-; EG-CHECK-LABEL: {{^}}v16f32_arg:
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].X
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].X
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].X
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Y
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Z
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].W
-; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[10].X
-; SI-CHECK-LABEL: {{^}}v16f32_arg:
-; SI-CHECK: s_load_dwordx16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x19
+; FUNC-LABEL: {{^}}v16f32_arg:
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].X
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].X
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].X
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Y
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Z
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].W
+; EG-DAG: T{{[0-9]\.[XYZW]}}, KC0[10].X
+; SI: s_load_dwordx16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x19
+; VI: s_load_dwordx16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x64
define void @v16f32_arg(<16 x float> addrspace(1)* nocapture %out, <16 x float> %in) nounwind {
entry:
store <16 x float> %in, <16 x float> addrspace(1)* %out, align 4
@@ -455,18 +443,18 @@ entry:
}
; FUNC-LABEL: {{^}}kernel_arg_i64:
-; SI: s_load_dwordx2
-; SI: s_load_dwordx2
-; SI: buffer_store_dwordx2
+; GCN: s_load_dwordx2
+; GCN: s_load_dwordx2
+; GCN: buffer_store_dwordx2
define void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwind {
store i64 %a, i64 addrspace(1)* %out, align 8
ret void
}
; XFUNC-LABEL: {{^}}kernel_arg_v1i64:
-; XSI: s_load_dwordx2
-; XSI: s_load_dwordx2
-; XSI: buffer_store_dwordx2
+; XGCN: s_load_dwordx2
+; XGCN: s_load_dwordx2
+; XGCN: buffer_store_dwordx2
; define void @kernel_arg_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a) nounwind {
; store <1 x i64> %a, <1 x i64> addrspace(1)* %out, align 8
; ret void
diff --git a/test/CodeGen/R600/large-alloca.ll b/test/CodeGen/R600/large-alloca.ll
index d8be6d4..788816c 100644
--- a/test/CodeGen/R600/large-alloca.ll
+++ b/test/CodeGen/R600/large-alloca.ll
@@ -1,6 +1,7 @@
; XFAIL: *
; REQUIRES: asserts
-; RUN: llc -march=r600 -mcpu=SI < %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s
define void @large_alloca(i32 addrspace(1)* %out, i32 %x, i32 %y) nounwind {
%large = alloca [8192 x i32], align 4
diff --git a/test/CodeGen/R600/large-constant-initializer.ll b/test/CodeGen/R600/large-constant-initializer.ll
index 5612dd3..c8671ef 100644
--- a/test/CodeGen/R600/large-constant-initializer.ll
+++ b/test/CodeGen/R600/large-constant-initializer.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI < %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s
; CHECK: s_endpgm
@gv = external unnamed_addr addrspace(2) constant [239 x i32], align 4
diff --git a/test/CodeGen/R600/lds-initializer.ll b/test/CodeGen/R600/lds-initializer.ll
index 91d5d12..7344eff 100644
--- a/test/CodeGen/R600/lds-initializer.ll
+++ b/test/CodeGen/R600/lds-initializer.ll
@@ -1,4 +1,5 @@
-; RUN: not llc -march=r600 -mcpu=SI < %s 2>&1 | FileCheck %s
+; RUN: not llc -march=amdgcn -mcpu=SI < %s 2>&1 | FileCheck %s
+; RUN: not llc -march=amdgcn -mcpu=tonga < %s 2>&1 | FileCheck %s
; CHECK: error: unsupported initializer for address space in load_init_lds_global
diff --git a/test/CodeGen/R600/lds-zero-initializer.ll b/test/CodeGen/R600/lds-zero-initializer.ll
index 23912a9..1fb6f52 100644
--- a/test/CodeGen/R600/lds-zero-initializer.ll
+++ b/test/CodeGen/R600/lds-zero-initializer.ll
@@ -1,4 +1,5 @@
-; RUN: not llc -march=r600 -mcpu=SI < %s 2>&1 | FileCheck %s
+; RUN: not llc -march=amdgcn -mcpu=SI < %s 2>&1 | FileCheck %s
+; RUN: not llc -march=amdgcn -mcpu=tonga < %s 2>&1 | FileCheck %s
; CHECK: error: unsupported initializer for address space in load_zeroinit_lds_global
diff --git a/test/CodeGen/R600/llvm.AMDGPU.abs.ll b/test/CodeGen/R600/llvm.AMDGPU.abs.ll
index b4aede8..8bc2583 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.abs.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.abs.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.AMDGPU.abs(i32) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll b/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll
index 98f6695..a11d9ae 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll
@@ -1,8 +1,10 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}test_barrier_global:
; EG: GROUP_BARRIER
+; SI: buffer_store_dword
+; SI: s_waitcnt
; SI: s_barrier
define void @test_barrier_global(i32 addrspace(1)* %out) {
diff --git a/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll b/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
index 92fe9f2..76c2453 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
@@ -1,8 +1,11 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}test_barrier_local:
; EG: GROUP_BARRIER
+
+; SI: buffer_store_dword
+; SI: s_waitcnt
; SI: s_barrier
define void @test_barrier_local(i32 addrspace(1)* %out) {
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll b/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
index 0b60d0d..2ec2546 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood -show-mc-encoding -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.AMDGPU.bfe.i32(i32, i32, i32) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll b/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
index 0794ac4..6cd0108 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.AMDGPU.bfe.u32(i32, i32, i32) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfi.ll b/test/CodeGen/R600/llvm.AMDGPU.bfi.ll
index df61b0b..517a55a 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.bfi.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfi.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.AMDGPU.bfi(i32, i32, i32) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfm.ll b/test/CodeGen/R600/llvm.AMDGPU.bfm.ll
index 0ba4af5..2346f40 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.bfm.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.bfm.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.AMDGPU.bfm(i32, i32) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.brev.ll b/test/CodeGen/R600/llvm.AMDGPU.brev.ll
index 647df34..3973f53 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.brev.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.brev.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare i32 @llvm.AMDGPU.brev(i32) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.clamp.ll b/test/CodeGen/R600/llvm.AMDGPU.clamp.ll
index c6efdb9..11ec963 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.clamp.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.clamp.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare float @llvm.fabs.f32(float) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.class.ll b/test/CodeGen/R600/llvm.AMDGPU.class.ll
new file mode 100644
index 0000000..f111eb9
--- /dev/null
+++ b/test/CodeGen/R600/llvm.AMDGPU.class.ll
@@ -0,0 +1,497 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+declare i1 @llvm.AMDGPU.class.f32(float, i32) #1
+declare i1 @llvm.AMDGPU.class.f64(double, i32) #1
+declare i32 @llvm.r600.read.tidig.x() #1
+declare float @llvm.fabs.f32(float) #1
+declare double @llvm.fabs.f64(double) #1
+
+; SI-LABEL: {{^}}test_class_f32:
+; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
+; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[VB]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
+; SI-NEXT: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
+ %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 %b) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_fabs_f32:
+; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
+; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SA]]|, [[VB]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
+; SI-NEXT: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
+ %a.fabs = call float @llvm.fabs.f32(float %a) #1
+ %result = call i1 @llvm.AMDGPU.class.f32(float %a.fabs, i32 %b) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_fneg_f32:
+; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
+; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -[[SA]], [[VB]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
+; SI-NEXT: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_fneg_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
+ %a.fneg = fsub float -0.0, %a
+ %result = call i1 @llvm.AMDGPU.class.f32(float %a.fneg, i32 %b) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_fneg_fabs_f32:
+; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
+; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -|[[SA]]|, [[VB]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
+; SI-NEXT: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_fneg_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
+ %a.fabs = call float @llvm.fabs.f32(float %a) #1
+ %a.fneg.fabs = fsub float -0.0, %a.fabs
+ %result = call i1 @llvm.AMDGPU.class.f32(float %a.fneg.fabs, i32 %b) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_1_f32:
+; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI: v_cmp_class_f32_e64 [[COND:s\[[0-9]+:[0-9]+\]]], [[SA]], 1{{$}}
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[COND]]
+; SI-NEXT: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_1_f32(i32 addrspace(1)* %out, float %a) #0 {
+ %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_64_f32:
+; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI: v_cmp_class_f32_e64 [[COND:s\[[0-9]+:[0-9]+\]]], [[SA]], 64{{$}}
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[COND]]
+; SI-NEXT: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_64_f32(i32 addrspace(1)* %out, float %a) #0 {
+ %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 64) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; Set all 10 bits of mask
+; SI-LABEL: {{^}}test_class_full_mask_f32:
+; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x3ff{{$}}
+; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[MASK]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
+; SI-NEXT: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_full_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
+ %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1023) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_9bit_mask_f32:
+; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
+; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[MASK]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
+; SI-NEXT: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_9bit_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
+ %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 511) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}v_test_class_full_mask_f32:
+; SI-DAG: buffer_load_dword [[VA:v[0-9]+]]
+; SI-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
+; SI: v_cmp_class_f32_e32 vcc, [[VA]], [[MASK]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
+; SI: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @v_test_class_full_mask_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %a = load float addrspace(1)* %gep.in
+
+ %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 511) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %gep.out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_inline_imm_constant_dynamic_mask_f32:
+; SI-DAG: buffer_load_dword [[VB:v[0-9]+]]
+; SI: v_cmp_class_f32_e32 vcc, 1.0, [[VB]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
+; SI: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_inline_imm_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %b = load i32 addrspace(1)* %gep.in
+
+ %result = call i1 @llvm.AMDGPU.class.f32(float 1.0, i32 %b) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %gep.out, align 4
+ ret void
+}
+
+; FIXME: Why isn't this using a literal constant operand?
+; SI-LABEL: {{^}}test_class_lit_constant_dynamic_mask_f32:
+; SI-DAG: buffer_load_dword [[VB:v[0-9]+]]
+; SI-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
+; SI: v_cmp_class_f32_e32 vcc, [[VK]], [[VB]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
+; SI: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_lit_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %b = load i32 addrspace(1)* %gep.in
+
+ %result = call i1 @llvm.AMDGPU.class.f32(float 1024.0, i32 %b) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %gep.out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_f64:
+; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
+; SI: v_cmp_class_f64_e32 vcc, [[SA]], [[VB]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
+; SI-NEXT: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
+ %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 %b) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_fabs_f64:
+; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
+; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SA]]|, [[VB]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
+; SI-NEXT: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
+ %a.fabs = call double @llvm.fabs.f64(double %a) #1
+ %result = call i1 @llvm.AMDGPU.class.f64(double %a.fabs, i32 %b) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_fneg_f64:
+; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
+; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -[[SA]], [[VB]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
+; SI-NEXT: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_fneg_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
+ %a.fneg = fsub double -0.0, %a
+ %result = call i1 @llvm.AMDGPU.class.f64(double %a.fneg, i32 %b) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_fneg_fabs_f64:
+; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
+; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -|[[SA]]|, [[VB]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
+; SI-NEXT: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_fneg_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
+ %a.fabs = call double @llvm.fabs.f64(double %a) #1
+ %a.fneg.fabs = fsub double -0.0, %a.fabs
+ %result = call i1 @llvm.AMDGPU.class.f64(double %a.fneg.fabs, i32 %b) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_1_f64:
+; SI: v_cmp_class_f64_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 1{{$}}
+; SI: s_endpgm
+define void @test_class_1_f64(i32 addrspace(1)* %out, double %a) #0 {
+ %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 1) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_64_f64:
+; SI: v_cmp_class_f64_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 64{{$}}
+; SI: s_endpgm
+define void @test_class_64_f64(i32 addrspace(1)* %out, double %a) #0 {
+ %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 64) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; Set all 9 bits of mask
+; SI-LABEL: {{^}}test_class_full_mask_f64:
+; SI: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
+; SI: v_cmp_class_f64_e32 vcc, [[SA]], [[MASK]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
+; SI-NEXT: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_full_mask_f64(i32 addrspace(1)* %out, double %a) #0 {
+ %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 511) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}v_test_class_full_mask_f64:
+; SI-DAG: buffer_load_dwordx2 [[VA:v\[[0-9]+:[0-9]+\]]]
+; SI-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
+; SI: v_cmp_class_f64_e32 vcc, [[VA]], [[MASK]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
+; SI: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @v_test_class_full_mask_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.in = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %a = load double addrspace(1)* %in
+
+ %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 511) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %gep.out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_inline_imm_constant_dynamic_mask_f64:
+; XSI: v_cmp_class_f64_e32 vcc, 1.0,
+; SI: v_cmp_class_f64_e32 vcc,
+; SI: s_endpgm
+define void @test_class_inline_imm_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %b = load i32 addrspace(1)* %gep.in
+
+ %result = call i1 @llvm.AMDGPU.class.f64(double 1.0, i32 %b) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %gep.out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_lit_constant_dynamic_mask_f64:
+; SI: v_cmp_class_f64_e32 vcc, s{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}
+; SI: s_endpgm
+define void @test_class_lit_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %b = load i32 addrspace(1)* %gep.in
+
+ %result = call i1 @llvm.AMDGPU.class.f64(double 1024.0, i32 %b) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %gep.out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_fold_or_class_f32_0:
+; SI-NOT: v_cmp_class
+; SI: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 3{{$}}
+; SI-NOT: v_cmp_class
+; SI: s_endpgm
+define void @test_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %a = load float addrspace(1)* %gep.in
+
+ %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
+ %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 3) #1
+ %or = or i1 %class0, %class1
+
+ %sext = sext i1 %or to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_fold_or3_class_f32_0:
+; SI-NOT: v_cmp_class
+; SI: v_cmp_class_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 7{{$}}
+; SI-NOT: v_cmp_class
+; SI: s_endpgm
+define void @test_fold_or3_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %a = load float addrspace(1)* %gep.in
+
+ %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
+ %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 2) #1
+ %class2 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
+ %or.0 = or i1 %class0, %class1
+ %or.1 = or i1 %or.0, %class2
+
+ %sext = sext i1 %or.1 to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_fold_or_all_tests_class_f32_0:
+; SI-NOT: v_cmp_class
+; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x3ff{{$}}
+; SI: v_cmp_class_f32_e32 vcc, v{{[0-9]+}}, [[MASK]]{{$}}
+; SI-NOT: v_cmp_class
+; SI: s_endpgm
+define void @test_fold_or_all_tests_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %a = load float addrspace(1)* %gep.in
+
+ %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
+ %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 2) #1
+ %class2 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
+ %class3 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 8) #1
+ %class4 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 16) #1
+ %class5 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 32) #1
+ %class6 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 64) #1
+ %class7 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 128) #1
+ %class8 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 256) #1
+ %class9 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 512) #1
+ %or.0 = or i1 %class0, %class1
+ %or.1 = or i1 %or.0, %class2
+ %or.2 = or i1 %or.1, %class3
+ %or.3 = or i1 %or.2, %class4
+ %or.4 = or i1 %or.3, %class5
+ %or.5 = or i1 %or.4, %class6
+ %or.6 = or i1 %or.5, %class7
+ %or.7 = or i1 %or.6, %class8
+ %or.8 = or i1 %or.7, %class9
+ %sext = sext i1 %or.8 to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_fold_or_class_f32_1:
+; SI-NOT: v_cmp_class
+; SI: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 12{{$}}
+; SI-NOT: v_cmp_class
+; SI: s_endpgm
+define void @test_fold_or_class_f32_1(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %a = load float addrspace(1)* %gep.in
+
+ %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
+ %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 8) #1
+ %or = or i1 %class0, %class1
+
+ %sext = sext i1 %or to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_fold_or_class_f32_2:
+; SI-NOT: v_cmp_class
+; SI: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 7{{$}}
+; SI-NOT: v_cmp_class
+; SI: s_endpgm
+define void @test_fold_or_class_f32_2(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %a = load float addrspace(1)* %gep.in
+
+ %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 7) #1
+ %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 7) #1
+ %or = or i1 %class0, %class1
+
+ %sext = sext i1 %or to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_no_fold_or_class_f32_0:
+; SI-DAG: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 4{{$}}
+; SI-DAG: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}, 8{{$}}
+; SI: s_or_b64
+; SI: s_endpgm
+define void @test_no_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in, float %b) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %a = load float addrspace(1)* %gep.in
+
+ %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
+ %class1 = call i1 @llvm.AMDGPU.class.f32(float %b, i32 8) #1
+ %or = or i1 %class0, %class1
+
+ %sext = sext i1 %or to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_0_f32:
+; SI-NOT: v_cmp_class
+; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
+; SI: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_0_f32(i32 addrspace(1)* %out, float %a) #0 {
+ %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 0) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}test_class_0_f64:
+; SI-NOT: v_cmp_class
+; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
+; SI: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
+define void @test_class_0_f64(i32 addrspace(1)* %out, double %a) #0 {
+ %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 0) #1
+ %sext = sext i1 %result to i32
+ store i32 %sext, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll b/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
index 7aacbb9..799817e 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI %s
declare float @llvm.AMDGPU.cvt.f32.ubyte0(i32) nounwind readnone
declare float @llvm.AMDGPU.cvt.f32.ubyte1(i32) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll b/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll
index 009fd73..55ca9c7 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll
@@ -1,25 +1,29 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN %s
declare float @llvm.AMDGPU.div.fixup.f32(float, float, float) nounwind readnone
declare double @llvm.AMDGPU.div.fixup.f64(double, double, double) nounwind readnone
-; SI-LABEL: {{^}}test_div_fixup_f32:
+; GCN-LABEL: {{^}}test_div_fixup_f32:
; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: s_load_dword [[SC:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
-; SI-DAG: v_mov_b32_e32 [[VC:v[0-9]+]], [[SC]]
-; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
-; SI: v_div_fixup_f32 [[RESULT:v[0-9]+]], [[SA]], [[VB]], [[VC]]
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; VI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI-DAG: s_load_dword [[SC:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x34
+; VI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30
+; GCN-DAG: v_mov_b32_e32 [[VC:v[0-9]+]], [[SC]]
+; GCN-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
+; GCN: v_div_fixup_f32 [[RESULT:v[0-9]+]], [[SA]], [[VB]], [[VC]]
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
define void @test_div_fixup_f32(float addrspace(1)* %out, float %a, float %b, float %c) nounwind {
%result = call float @llvm.AMDGPU.div.fixup.f32(float %a, float %b, float %c) nounwind readnone
store float %result, float addrspace(1)* %out, align 4
ret void
}
-; SI-LABEL: {{^}}test_div_fixup_f64:
-; SI: v_div_fixup_f64
+; GCN-LABEL: {{^}}test_div_fixup_f64:
+; GCN: v_div_fixup_f64
define void @test_div_fixup_f64(double addrspace(1)* %out, double %a, double %b, double %c) nounwind {
%result = call double @llvm.AMDGPU.div.fixup.f64(double %a, double %b, double %c) nounwind readnone
store double %result, double addrspace(1)* %out, align 8
diff --git a/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll b/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
index dcca9e9..239fd53 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
@@ -1,27 +1,179 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; XUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; FIXME: Enable for VI.
+
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
+declare void @llvm.AMDGPU.barrier.global() nounwind noduplicate
declare float @llvm.AMDGPU.div.fmas.f32(float, float, float, i1) nounwind readnone
declare double @llvm.AMDGPU.div.fmas.f64(double, double, double, i1) nounwind readnone
-; SI-LABEL: {{^}}test_div_fmas_f32:
+; GCN-LABEL: {{^}}test_div_fmas_f32:
; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: s_load_dword [[SC:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; VI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI-DAG: s_load_dword [[SC:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x34
+; VI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30
+; GCN-DAG: v_mov_b32_e32 [[VC:v[0-9]+]], [[SC]]
+; GCN-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
+; GCN-DAG: v_mov_b32_e32 [[VA:v[0-9]+]], [[SA]]
+; GCN: v_div_fmas_f32 [[RESULT:v[0-9]+]], [[VB]], [[VA]], [[VC]]
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
+define void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
+ %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c, i1 %d) nounwind readnone
+ store float %result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_div_fmas_f32_inline_imm_0:
+; SI-DAG: s_load_dword [[SC:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
; SI-DAG: v_mov_b32_e32 [[VC:v[0-9]+]], [[SC]]
; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
-; SI: v_div_fmas_f32 [[RESULT:v[0-9]+]], [[SA]], [[VB]], [[VC]]
+; SI: v_div_fmas_f32 [[RESULT:v[0-9]+]], 1.0, [[VB]], [[VC]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
- %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c, i1 %d) nounwind readnone
+define void @test_div_fmas_f32_inline_imm_0(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
+ %result = call float @llvm.AMDGPU.div.fmas.f32(float 1.0, float %b, float %c, i1 %d) nounwind readnone
+ store float %result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_div_fmas_f32_inline_imm_1:
+; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[SC:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI-DAG: v_mov_b32_e32 [[VC:v[0-9]+]], [[SC]]
+; SI-DAG: v_mov_b32_e32 [[VA:v[0-9]+]], [[SA]]
+; SI: v_div_fmas_f32 [[RESULT:v[0-9]+]], 1.0, [[VA]], [[VC]]
+; SI: buffer_store_dword [[RESULT]],
+; SI: s_endpgm
+define void @test_div_fmas_f32_inline_imm_1(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
+ %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float 1.0, float %c, i1 %d) nounwind readnone
store float %result, float addrspace(1)* %out, align 4
ret void
}
-; SI-LABEL: {{^}}test_div_fmas_f64:
-; SI: v_div_fmas_f64
+; GCN-LABEL: {{^}}test_div_fmas_f32_inline_imm_2:
+; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; SI-DAG: v_mov_b32_e32 [[VA:v[0-9]+]], [[SA]]
+; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
+; SI: v_div_fmas_f32 [[RESULT:v[0-9]+]], [[VA]], [[VB]], 1.0
+; SI: buffer_store_dword [[RESULT]],
+; SI: s_endpgm
+define void @test_div_fmas_f32_inline_imm_2(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
+ %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float 1.0, i1 %d) nounwind readnone
+ store float %result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_div_fmas_f64:
+; GCN: v_div_fmas_f64
define void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c, i1 %d) nounwind {
%result = call double @llvm.AMDGPU.div.fmas.f64(double %a, double %b, double %c, i1 %d) nounwind readnone
store double %result, double addrspace(1)* %out, align 8
ret void
}
+
+; GCN-LABEL: {{^}}test_div_fmas_f32_cond_to_vcc:
+; SI: v_cmp_eq_i32_e64 vcc, s{{[0-9]+}}, 0
+; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+define void @test_div_fmas_f32_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c, i32 %i) nounwind {
+ %cmp = icmp eq i32 %i, 0
+ %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c, i1 %cmp) nounwind readnone
+ store float %result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_div_fmas_f32_imm_false_cond_to_vcc:
+; SI: s_mov_b64 vcc, 0
+; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+define void @test_div_fmas_f32_imm_false_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c) nounwind {
+ %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c, i1 false) nounwind readnone
+ store float %result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_div_fmas_f32_imm_true_cond_to_vcc:
+; SI: s_mov_b64 vcc, -1
+; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+define void @test_div_fmas_f32_imm_true_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c) nounwind {
+ %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c, i1 true) nounwind readnone
+ store float %result, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_div_fmas_f32_logical_cond_to_vcc:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+
+; SI-DAG: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0
+; SI-DAG: v_cmp_ne_i32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0
+; SI: s_and_b64 vcc, [[CMP0]], [[CMP1]]
+; SI: v_div_fmas_f32 {{v[0-9]+}}, [[A]], [[B]], [[C]]
+; SI: s_endpgm
+define void @test_div_fmas_f32_logical_cond_to_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 %d) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.a = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.b = getelementptr float addrspace(1)* %gep.a, i32 1
+ %gep.c = getelementptr float addrspace(1)* %gep.a, i32 2
+ %gep.out = getelementptr float addrspace(1)* %out, i32 2
+
+ %a = load float addrspace(1)* %gep.a
+ %b = load float addrspace(1)* %gep.b
+ %c = load float addrspace(1)* %gep.c
+
+ %cmp0 = icmp eq i32 %tid, 0
+ %cmp1 = icmp ne i32 %d, 0
+ %and = and i1 %cmp0, %cmp1
+
+ %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c, i1 %and) nounwind readnone
+ store float %result, float addrspace(1)* %gep.out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}test_div_fmas_f32_i1_phi_vcc:
+; SI: v_cmp_eq_i32_e64 [[CMPTID:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0
+; SI: s_and_saveexec_b64 [[CMPTID]], [[CMPTID]]
+; SI: s_xor_b64 [[CMPTID]], exec, [[CMPTID]]
+
+; SI: buffer_load_dword [[LOAD:v[0-9]+]]
+; SI: v_cmp_ne_i32_e64 [[CMPLOAD:s\[[0-9]+:[0-9]+\]]], [[LOAD]], 0
+; SI: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, [[CMPLOAD]]
+
+
+; SI: BB9_2:
+; SI: s_or_b64 exec, exec, [[CMPTID]]
+; SI: v_cmp_ne_i32_e32 vcc, 0, v0
+; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+; SI: buffer_store_dword
+; SI: s_endpgm
+define void @test_div_fmas_f32_i1_phi_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 addrspace(1)* %dummy) nounwind {
+entry:
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.out = getelementptr float addrspace(1)* %out, i32 2
+ %gep.a = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.b = getelementptr float addrspace(1)* %gep.a, i32 1
+ %gep.c = getelementptr float addrspace(1)* %gep.a, i32 2
+
+ %a = load float addrspace(1)* %gep.a
+ %b = load float addrspace(1)* %gep.b
+ %c = load float addrspace(1)* %gep.c
+
+ %cmp0 = icmp eq i32 %tid, 0
+ br i1 %cmp0, label %bb, label %exit
+
+bb:
+ %val = load i32 addrspace(1)* %dummy
+ %cmp1 = icmp ne i32 %val, 0
+ br label %exit
+
+exit:
+ %cond = phi i1 [false, %entry], [%cmp1, %bb]
+ %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c, i1 %cond) nounwind readnone
+ store float %result, float addrspace(1)* %gep.out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll b/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
index 641c8ca..5773da0 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
@@ -1,12 +1,13 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
declare { float, i1 } @llvm.AMDGPU.div.scale.f32(float, float, i1) nounwind readnone
declare { double, i1 } @llvm.AMDGPU.div.scale.f64(double, double, i1) nounwind readnone
+declare float @llvm.fabs.f32(float) nounwind readnone
; SI-LABEL @test_div_scale_f32_1:
; SI-DAG: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64
-; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], [[A]]
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
@@ -26,7 +27,7 @@ define void @test_div_scale_f32_1(float addrspace(1)* %out, float addrspace(1)*
; SI-LABEL @test_div_scale_f32_2:
; SI-DAG: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64
-; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[A]]
; SI: buffer_store_dword [[RESULT0]]
; SI: s_endpgm
@@ -46,7 +47,7 @@ define void @test_div_scale_f32_2(float addrspace(1)* %out, float addrspace(1)*
; SI-LABEL @test_div_scale_f64_1:
; SI-DAG: buffer_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64
-; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x8
+; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8
; SI: v_div_scale_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], [[A]]
; SI: buffer_store_dwordx2 [[RESULT0]]
; SI: s_endpgm
@@ -66,7 +67,7 @@ define void @test_div_scale_f64_1(double addrspace(1)* %out, double addrspace(1)
; SI-LABEL @test_div_scale_f64_1:
; SI-DAG: buffer_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64
-; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x8
+; SI-DAG: buffer_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8
; SI: v_div_scale_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[A]], [[B]], [[A]]
; SI: buffer_store_dwordx2 [[RESULT0]]
; SI: s_endpgm
@@ -285,3 +286,79 @@ define void @test_div_scale_f64_all_scalar_2(double addrspace(1)* %out, double %
store double %result0, double addrspace(1)* %out, align 8
ret void
}
+
+; SI-LABEL @test_div_scale_f32_inline_imm_num:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[A]], [[A]], 1.0
+; SI: buffer_store_dword [[RESULT0]]
+; SI: s_endpgm
+define void @test_div_scale_f32_inline_imm_num(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %a = load float addrspace(1)* %gep.0, align 4
+
+ %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float 1.0, float %a, i1 false) nounwind readnone
+ %result0 = extractvalue { float, i1 } %result, 0
+ store float %result0, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL @test_div_scale_f32_inline_imm_den:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], 2.0, 2.0, [[A]]
+; SI: buffer_store_dword [[RESULT0]]
+; SI: s_endpgm
+define void @test_div_scale_f32_inline_imm_den(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %a = load float addrspace(1)* %gep.0, align 4
+
+ %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float 2.0, i1 false) nounwind readnone
+ %result0 = extractvalue { float, i1 } %result, 0
+ store float %result0, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL @test_div_scale_f32_fabs_num:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], |[[A]]|
+; SI: buffer_store_dword [[RESULT0]]
+; SI: s_endpgm
+define void @test_div_scale_f32_fabs_num(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+
+ %a = load float addrspace(1)* %gep.0, align 4
+ %b = load float addrspace(1)* %gep.1, align 4
+
+ %a.fabs = call float @llvm.fabs.f32(float %a) nounwind readnone
+
+ %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a.fabs, float %b, i1 false) nounwind readnone
+ %result0 = extractvalue { float, i1 } %result, 0
+ store float %result0, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL @test_div_scale_f32_fabs_den:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], |[[B]]|, |[[B]]|, [[A]]
+; SI: buffer_store_dword [[RESULT0]]
+; SI: s_endpgm
+define void @test_div_scale_f32_fabs_den(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+
+ %a = load float addrspace(1)* %gep.0, align 4
+ %b = load float addrspace(1)* %gep.1, align 4
+
+ %b.fabs = call float @llvm.fabs.f32(float %b) nounwind readnone
+
+ %result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float %b.fabs, i1 false) nounwind readnone
+ %result0 = extractvalue { float, i1 } %result, 0
+ store float %result0, float addrspace(1)* %out, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.fract.ll b/test/CodeGen/R600/llvm.AMDGPU.fract.ll
index 235068c..7d15300 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.fract.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.fract.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare float @llvm.AMDGPU.fract.f32(float) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imad24.ll b/test/CodeGen/R600/llvm.AMDGPU.imad24.ll
index 8998840..42102e3 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.imad24.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.imad24.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=CM -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
; XUN: llc -march=r600 -mcpu=r600 -verify-machineinstrs < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imax.ll b/test/CodeGen/R600/llvm.AMDGPU.imax.ll
index dac21a4..ce7fca0 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.imax.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.imax.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
+; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}vector_imax:
; SI: v_max_i32_e32
@@ -29,4 +30,4 @@ declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
-!0 = metadata !{metadata !"const", null, i32 1}
+!0 = !{!"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imin.ll b/test/CodeGen/R600/llvm.AMDGPU.imin.ll
index 462c497..15cd38b 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.imin.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.imin.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
+; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}vector_imin:
; SI: v_min_i32_e32
@@ -29,4 +30,4 @@ declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
-!0 = metadata !{metadata !"const", null, i32 1}
+!0 = !{!"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.imul24.ll b/test/CodeGen/R600/llvm.AMDGPU.imul24.ll
index db563dd..fdc1172 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.imul24.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.imul24.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=CM -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
diff --git a/test/CodeGen/R600/llvm.AMDGPU.kill.ll b/test/CodeGen/R600/llvm.AMDGPU.kill.ll
index 988b43c..d1ff3b1 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.kill.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.kill.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}kill_gs_const:
; SI-NOT: v_cmpx_le_f32
@@ -19,4 +20,4 @@ declare void @llvm.AMDGPU.kill(float)
attributes #0 = { "ShaderType"="2" }
-!0 = metadata !{metadata !"const", null, i32 1}
+!0 = !{!"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.ldexp.ll b/test/CodeGen/R600/llvm.AMDGPU.ldexp.ll
index 72719fe..a59c0ce 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.ldexp.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.ldexp.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare float @llvm.AMDGPU.ldexp.f32(float, i32) nounwind readnone
declare double @llvm.AMDGPU.ldexp.f64(double, i32) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll b/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll
index 6e3fa25..4cafd56 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare float @llvm.AMDGPU.legacy.rsq(float) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll b/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll
index c4b04c5..d2a655b 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare double @llvm.AMDGPU.rcp.f64(double) nounwind readnone
declare double @llvm.sqrt.f64(double) nounwind readnone
@@ -22,6 +23,8 @@ define void @rcp_pat_f64(double addrspace(1)* %out, double %src) nounwind {
; FUNC-LABEL: {{^}}rsq_rcp_pat_f64:
; SI-UNSAFE: v_rsq_f64_e32
; SI-SAFE-NOT: v_rsq_f64_e32
+; SI-SAFE: v_sqrt_f64
+; SI-SAFE: v_rcp_f64
define void @rsq_rcp_pat_f64(double addrspace(1)* %out, double %src) nounwind {
%sqrt = call double @llvm.sqrt.f64(double %src) nounwind readnone
%rcp = call double @llvm.AMDGPU.rcp.f64(double %sqrt) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rcp.ll b/test/CodeGen/R600/llvm.AMDGPU.rcp.ll
index 3ee3e6b..edd6e9a 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.rcp.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.rcp.ll
@@ -1,6 +1,9 @@
-; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals -enable-unsafe-fp-math -verify-machineinstrs < %s | FileCheck -check-prefix=SI-UNSAFE -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
-; XUN: llc -march=r600 -mcpu=SI -mattr=+fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE-SPDENORM -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=-fp32-denormals -enable-unsafe-fp-math -verify-machineinstrs < %s | FileCheck -check-prefix=SI-UNSAFE -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=-fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
+; XUN: llc -march=amdgcn -mcpu=SI -mattr=+fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE-SPDENORM -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-fp32-denormals -enable-unsafe-fp-math -verify-machineinstrs < %s | FileCheck -check-prefix=SI-UNSAFE -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
+; XUN: llc -march=amdgcn -mcpu=tonga -mattr=+fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE-SPDENORM -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG-SAFE -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll
index 18854be..67f1d22 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll
@@ -1,9 +1,21 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=FUNC %s
declare double @llvm.AMDGPU.rsq.clamped.f64(double) nounwind readnone
; FUNC-LABEL: {{^}}rsq_clamped_f64:
; SI: v_rsq_clamp_f64_e32
+
+; VI: v_rsq_f64_e32 [[RSQ:v\[[0-9]+:[0-9]+\]]], s[2:3]
+; TODO: this constant should be folded:
+; VI: s_mov_b32 s[[ALLBITS:[0-9+]]], -1
+; VI: s_mov_b32 s[[HIGH1:[0-9+]]], 0x7fefffff
+; VI: s_mov_b32 s[[LOW1:[0-9+]]], s[[ALLBITS]]
+; VI: v_min_f64 v[0:1], [[RSQ]], s{{\[}}[[LOW1]]:[[HIGH1]]]
+; VI: s_mov_b32 s[[HIGH2:[0-9+]]], 0xffefffff
+; VI: s_mov_b32 s[[LOW2:[0-9+]]], s[[ALLBITS]]
+; VI: v_max_f64 v[0:1], v[0:1], s{{\[}}[[LOW2]]:[[HIGH2]]]
+
define void @rsq_clamped_f64(double addrspace(1)* %out, double %src) nounwind {
%rsq_clamped = call double @llvm.AMDGPU.rsq.clamped.f64(double %src) nounwind readnone
store double %rsq_clamped, double addrspace(1)* %out, align 8
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll
index 6bf9f0c..eeff253 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
@@ -6,7 +7,15 @@ declare float @llvm.AMDGPU.rsq.clamped.f32(float) nounwind readnone
; FUNC-LABEL: {{^}}rsq_clamped_f32:
; SI: v_rsq_clamp_f32_e32
+
+; VI: v_rsq_f32_e32 [[RSQ:v[0-9]+]], {{s[0-9]+}}
+; VI: v_min_f32_e32 [[MIN:v[0-9]+]], 0x7f7fffff, [[RSQ]]
+; TODO: this constant should be folded:
+; VI: v_mov_b32_e32 [[MINFLT:v[0-9]+]], 0xff7fffff
+; VI: v_max_f32_e32 {{v[0-9]+}}, [[MIN]], [[MINFLT]]
+
; EG: RECIPSQRT_CLAMPED
+
define void @rsq_clamped_f32(float addrspace(1)* %out, float %src) nounwind {
%rsq_clamped = call float @llvm.AMDGPU.rsq.clamped.f32(float %src) nounwind readnone
store float %rsq_clamped, float addrspace(1)* %out, align 4
diff --git a/test/CodeGen/R600/llvm.AMDGPU.rsq.ll b/test/CodeGen/R600/llvm.AMDGPU.rsq.ll
index d6299b8..36b72f1 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.rsq.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.rsq.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare float @llvm.AMDGPU.rsq.f32(float) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll b/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
index 2e6bd5c..5829f73 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare double @llvm.AMDGPU.trig.preop.f64(double, i32) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.AMDGPU.trunc.ll b/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
index fdd531d..74792e5 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
@@ -1,10 +1,11 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600 %s
+; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=SI %s
-; R600-CHECK: {{^}}amdgpu_trunc:
-; R600-CHECK: TRUNC T{{[0-9]+\.[XYZW]}}, KC0[2].Z
-; SI-CHECK: {{^}}amdgpu_trunc:
-; SI-CHECK: v_trunc_f32
+; R600: {{^}}amdgpu_trunc:
+; R600: TRUNC T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; SI: {{^}}amdgpu_trunc:
+; SI: v_trunc_f32
define void @amdgpu_trunc(float addrspace(1)* %out, float %x) {
entry:
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umad24.ll b/test/CodeGen/R600/llvm.AMDGPU.umad24.ll
index 59d6248..88613db 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.umad24.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.umad24.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; XUN: llc -march=r600 -mcpu=r600 -verify-machineinstrs < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
@@ -20,7 +20,7 @@ define void @test_umad24(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2
; FUNC-LABEL: {{^}}commute_umad24:
; SI-DAG: buffer_load_dword [[SRC0:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dword [[SRC2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[SRC2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_mad_u32_u24 [[RESULT:v[0-9]+]], 4, [[SRC0]], [[SRC2]]
; SI: buffer_store_dword [[RESULT]]
define void @commute_umad24(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umax.ll b/test/CodeGen/R600/llvm.AMDGPU.umax.ll
index ee854ec..4320dfe 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.umax.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.umax.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
+; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}vector_umax:
; SI: v_max_u32_e32
@@ -44,4 +45,4 @@ declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
-!0 = metadata !{metadata !"const", null, i32 1}
+!0 = !{!"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umin.ll b/test/CodeGen/R600/llvm.AMDGPU.umin.ll
index 2eaa372..e4cac33 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.umin.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.umin.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
+; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck -check-prefix=SI %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}vector_umin:
; SI: v_min_u32_e32
@@ -44,4 +45,4 @@ declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
-!0 = metadata !{metadata !"const", null, i32 1}
+!0 = !{!"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.AMDGPU.umul24.ll b/test/CodeGen/R600/llvm.AMDGPU.umul24.ll
index 567ac31..76624a0 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.umul24.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.umul24.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; RUN: llc -march=r600 -mcpu=cayman -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; XUN: llc -march=r600 -mcpu=r600 -verify-machineinstrs < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
diff --git a/test/CodeGen/R600/llvm.SI.fs.interp.constant.ll b/test/CodeGen/R600/llvm.SI.fs.interp.constant.ll
deleted file mode 100644
index d26bc32..0000000
--- a/test/CodeGen/R600/llvm.SI.fs.interp.constant.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
-
-;CHECK: s_mov_b32
-;CHECK-NEXT: v_interp_mov_f32
-
-define void @main(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg) "ShaderType"="0" {
-main_body:
- %4 = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %3)
- %5 = call i32 @llvm.SI.packf16(float %4, float %4)
- %6 = bitcast i32 %5 to float
- call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %6, float %6, float %6, float %6)
- ret void
-}
-
-declare void @llvm.AMDGPU.shader.type(i32)
-
-declare float @llvm.SI.fs.constant(i32, i32, i32) readnone
-
-declare i32 @llvm.SI.packf16(float, float) readnone
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
diff --git a/test/CodeGen/R600/llvm.SI.fs.interp.ll b/test/CodeGen/R600/llvm.SI.fs.interp.ll
new file mode 100644
index 0000000..6b36140
--- /dev/null
+++ b/test/CodeGen/R600/llvm.SI.fs.interp.ll
@@ -0,0 +1,30 @@
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
+
+;CHECK-NOT: s_wqm
+;CHECK: s_mov_b32
+;CHECK-NEXT: v_interp_mov_f32
+;CHECK: v_interp_p1_f32
+;CHECK: v_interp_p2_f32
+
+define void @main(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>) #0 {
+main_body:
+ %5 = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %3)
+ %6 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %3, <2 x i32> %4)
+ %7 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %3, <2 x i32> %4)
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %5, float %6, float %7, float %7)
+ ret void
+}
+
+declare void @llvm.AMDGPU.shader.type(i32)
+
+; Function Attrs: nounwind readnone
+declare float @llvm.SI.fs.constant(i32, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #1
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/llvm.SI.gather4.ll b/test/CodeGen/R600/llvm.SI.gather4.ll
index 91a2012..275cb58 100644
--- a/test/CodeGen/R600/llvm.SI.gather4.ll
+++ b/test/CodeGen/R600/llvm.SI.gather4.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
;CHECK-LABEL: {{^}}gather4_v2:
;CHECK: image_gather4 {{v\[[0-9]+:[0-9]+\]}}, 1, 0, 0, -1, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
diff --git a/test/CodeGen/R600/llvm.SI.getlod.ll b/test/CodeGen/R600/llvm.SI.getlod.ll
index ec26fe5..06ee98e 100644
--- a/test/CodeGen/R600/llvm.SI.getlod.ll
+++ b/test/CodeGen/R600/llvm.SI.getlod.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
;CHECK-LABEL: {{^}}getlod:
;CHECK: image_get_lod {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, -1, 0, 0, 0, 0, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
diff --git a/test/CodeGen/R600/llvm.SI.image.ll b/test/CodeGen/R600/llvm.SI.image.ll
index 4eec543..0fac8d7 100644
--- a/test/CodeGen/R600/llvm.SI.image.ll
+++ b/test/CodeGen/R600/llvm.SI.image.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
;CHECK-LABEL: {{^}}image_load:
;CHECK: image_load {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
diff --git a/test/CodeGen/R600/llvm.SI.image.sample.ll b/test/CodeGen/R600/llvm.SI.image.sample.ll
index ebff391..4bc638a 100644
--- a/test/CodeGen/R600/llvm.SI.image.sample.ll
+++ b/test/CodeGen/R600/llvm.SI.image.sample.ll
@@ -1,6 +1,8 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
;CHECK-LABEL: {{^}}sample:
+;CHECK: s_wqm
;CHECK: image_sample {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample() #0 {
main_body:
@@ -14,6 +16,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_cl:
+;CHECK: s_wqm
;CHECK: image_sample_cl {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_cl() #0 {
main_body:
@@ -27,6 +30,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_d:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_d {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_d() #0 {
main_body:
@@ -40,6 +44,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_d_cl:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_d_cl {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_d_cl() #0 {
main_body:
@@ -53,6 +58,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_l:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_l {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_l() #0 {
main_body:
@@ -66,6 +72,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_b:
+;CHECK: s_wqm
;CHECK: image_sample_b {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_b() #0 {
main_body:
@@ -79,6 +86,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_b_cl:
+;CHECK: s_wqm
;CHECK: image_sample_b_cl {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_b_cl() #0 {
main_body:
@@ -92,6 +100,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_lz:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_lz {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_lz() #0 {
main_body:
@@ -105,6 +114,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_cd:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_cd {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_cd() #0 {
main_body:
@@ -118,6 +128,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_cd_cl:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_cd_cl {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_cd_cl() #0 {
main_body:
@@ -131,6 +142,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c:
+;CHECK: s_wqm
;CHECK: image_sample_c {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c() #0 {
main_body:
@@ -144,6 +156,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_cl:
+;CHECK: s_wqm
;CHECK: image_sample_c_cl {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_cl() #0 {
main_body:
@@ -157,6 +170,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_d:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_c_d {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_d() #0 {
main_body:
@@ -170,6 +184,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_d_cl:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_c_d_cl {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_d_cl() #0 {
main_body:
@@ -183,6 +198,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_l:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_c_l {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_l() #0 {
main_body:
@@ -196,6 +212,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_b:
+;CHECK: s_wqm
;CHECK: image_sample_c_b {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_b() #0 {
main_body:
@@ -209,6 +226,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_b_cl:
+;CHECK: s_wqm
;CHECK: image_sample_c_b_cl {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_b_cl() #0 {
main_body:
@@ -222,6 +240,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_lz:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_c_lz {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_lz() #0 {
main_body:
@@ -235,6 +254,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_cd:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_c_cd {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_cd() #0 {
main_body:
@@ -248,6 +268,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_cd_cl:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_c_cd_cl {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_cd_cl() #0 {
main_body:
diff --git a/test/CodeGen/R600/llvm.SI.image.sample.o.ll b/test/CodeGen/R600/llvm.SI.image.sample.o.ll
index dbc1b2b..9d89354 100644
--- a/test/CodeGen/R600/llvm.SI.image.sample.o.ll
+++ b/test/CodeGen/R600/llvm.SI.image.sample.o.ll
@@ -1,6 +1,8 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
;CHECK-LABEL: {{^}}sample:
+;CHECK: s_wqm
;CHECK: image_sample_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample() #0 {
main_body:
@@ -14,6 +16,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_cl:
+;CHECK: s_wqm
;CHECK: image_sample_cl_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_cl() #0 {
main_body:
@@ -27,6 +30,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_d:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_d_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_d() #0 {
main_body:
@@ -40,6 +44,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_d_cl:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_d_cl_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_d_cl() #0 {
main_body:
@@ -53,6 +58,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_l:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_l_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_l() #0 {
main_body:
@@ -66,6 +72,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_b:
+;CHECK: s_wqm
;CHECK: image_sample_b_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_b() #0 {
main_body:
@@ -79,6 +86,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_b_cl:
+;CHECK: s_wqm
;CHECK: image_sample_b_cl_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_b_cl() #0 {
main_body:
@@ -92,6 +100,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_lz:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_lz_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_lz() #0 {
main_body:
@@ -105,6 +114,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_cd:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_cd_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_cd() #0 {
main_body:
@@ -118,6 +128,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_cd_cl:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_cd_cl_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_cd_cl() #0 {
main_body:
@@ -131,6 +142,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c:
+;CHECK: s_wqm
;CHECK: image_sample_c_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c() #0 {
main_body:
@@ -144,6 +156,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_cl:
+;CHECK: s_wqm
;CHECK: image_sample_c_cl_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_cl() #0 {
main_body:
@@ -157,6 +170,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_d:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_c_d_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_d() #0 {
main_body:
@@ -170,6 +184,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_d_cl:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_c_d_cl_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_d_cl() #0 {
main_body:
@@ -183,6 +198,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_l:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_c_l_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_l() #0 {
main_body:
@@ -196,6 +212,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_b:
+;CHECK: s_wqm
;CHECK: image_sample_c_b_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_b() #0 {
main_body:
@@ -209,6 +226,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_b_cl:
+;CHECK: s_wqm
;CHECK: image_sample_c_b_cl_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_b_cl() #0 {
main_body:
@@ -222,6 +240,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_lz:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_c_lz_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_lz() #0 {
main_body:
@@ -235,6 +254,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_cd:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_c_cd_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_cd() #0 {
main_body:
@@ -248,6 +268,7 @@ main_body:
}
;CHECK-LABEL: {{^}}sample_c_cd_cl:
+;CHECK-NOT: s_wqm
;CHECK: image_sample_c_cd_cl_o {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}
define void @sample_c_cd_cl() #0 {
main_body:
diff --git a/test/CodeGen/R600/llvm.SI.imageload.ll b/test/CodeGen/R600/llvm.SI.imageload.ll
index 673d92d..35e4591 100644
--- a/test/CodeGen/R600/llvm.SI.imageload.ll
+++ b/test/CodeGen/R600/llvm.SI.imageload.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
;CHECK-DAG: image_load {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, -1
;CHECK-DAG: image_load_mip {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, 0
@@ -126,6 +127,6 @@ declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float
attributes #0 = { "ShaderType"="0" }
attributes #1 = { nounwind readnone }
-!0 = metadata !{metadata !"const", null}
-!1 = metadata !{}
-!2 = metadata !{metadata !0, metadata !0, i64 0, i32 1}
+!0 = !{!"const", null}
+!1 = !{}
+!2 = !{!0, !0, i64 0, i32 1}
diff --git a/test/CodeGen/R600/llvm.SI.load.dword.ll b/test/CodeGen/R600/llvm.SI.load.dword.ll
index e5c6201..d2e6a8e 100644
--- a/test/CodeGen/R600/llvm.SI.load.dword.ll
+++ b/test/CodeGen/R600/llvm.SI.load.dword.ll
@@ -1,28 +1,41 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=verde -show-mc-encoding -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -show-mc-encoding -verify-machineinstrs < %s | FileCheck %s
; Example of a simple geometry shader loading vertex attributes from the
; ESGS ring buffer
-; CHECK-LABEL: {{^}}main:
-; CHECK: buffer_load_dword
-; CHECK: buffer_load_dword
-; CHECK: buffer_load_dword
-; CHECK: buffer_load_dword
+; FIXME: Out of bounds immediate offset crashes
-define void @main([17 x <16 x i8>] addrspace(2)* byval, [32 x <16 x i8>] addrspace(2)* byval, [16 x <32 x i8>] addrspace(2)* byval, [2 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* inreg, [17 x <16 x i8>] addrspace(2)* inreg, i32, i32, i32, i32) #0 {
+; CHECK-LABEL: {{^}}main:
+; CHECK: buffer_load_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 glc slc
+; CHECK: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offen glc slc
+; CHECK: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 idxen glc slc
+; CHECK: buffer_load_dword {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 idxen offen glc slc
+; CHECK: s_movk_i32 [[K:s[0-9]+]], 0x4d2 ; encoding
+; CHECK: buffer_load_dword {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, [[K]] idxen offen offset:65535 glc slc
+
+define void @main([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <32 x i8>] addrspace(2)* byval %arg2, [2 x <16 x i8>] addrspace(2)* byval %arg3, [17 x <16 x i8>] addrspace(2)* inreg %arg4, [17 x <16 x i8>] addrspace(2)* inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9) #0 {
main_body:
- %10 = getelementptr [2 x <16 x i8>] addrspace(2)* %3, i64 0, i32 1
- %11 = load <16 x i8> addrspace(2)* %10, !tbaa !0
- %12 = shl i32 %6, 2
- %13 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %11, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0)
- %14 = bitcast i32 %13 to float
- %15 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %11, i32 %12, i32 0, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0)
- %16 = bitcast i32 %15 to float
- %17 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %11, i32 %12, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 0)
- %18 = bitcast i32 %17 to float
- %19 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %11, <2 x i32> <i32 0, i32 0>, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 0)
- %20 = bitcast i32 %19 to float
- call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %14, float %16, float %18, float %20)
+ %tmp = getelementptr [2 x <16 x i8>] addrspace(2)* %arg3, i64 0, i32 1
+ %tmp10 = load <16 x i8> addrspace(2)* %tmp, !tbaa !0
+ %tmp11 = shl i32 %arg6, 2
+ %tmp12 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0)
+ %tmp13 = bitcast i32 %tmp12 to float
+ %tmp14 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 %tmp11, i32 0, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0)
+ %tmp15 = bitcast i32 %tmp14 to float
+ %tmp16 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 %tmp11, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 0)
+ %tmp17 = bitcast i32 %tmp16 to float
+ %tmp18 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %tmp10, <2 x i32> zeroinitializer, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 0)
+ %tmp19 = bitcast i32 %tmp18 to float
+
+ %tmp20 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %tmp10, <2 x i32> zeroinitializer, i32 0, i32 123, i32 1, i32 1, i32 1, i32 1, i32 0)
+ %tmp21 = bitcast i32 %tmp20 to float
+
+ %tmp22 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %tmp10, <2 x i32> zeroinitializer, i32 1234, i32 65535, i32 1, i32 1, i32 1, i32 1, i32 0)
+ %tmp23 = bitcast i32 %tmp22 to float
+
+ call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %tmp13, float %tmp15, float %tmp17, float %tmp19)
+ call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %tmp21, float %tmp23, float %tmp23, float %tmp23)
ret void
}
@@ -37,4 +50,4 @@ declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float
attributes #0 = { "ShaderType"="1" }
attributes #1 = { nounwind readonly }
-!0 = metadata !{metadata !"const", null, i32 1}
+!0 = !{!"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.SI.resinfo.ll b/test/CodeGen/R600/llvm.SI.resinfo.ll
index d8f3722..ac95fd0 100644
--- a/test/CodeGen/R600/llvm.SI.resinfo.ll
+++ b/test/CodeGen/R600/llvm.SI.resinfo.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
; CHECK-DAG: image_get_resinfo {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, -1
; CHECK-DAG: image_get_resinfo {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, 0
diff --git a/test/CodeGen/R600/llvm.SI.sample-masked.ll b/test/CodeGen/R600/llvm.SI.sample-masked.ll
index 9e86bec..ce9558c 100644
--- a/test/CodeGen/R600/llvm.SI.sample-masked.ll
+++ b/test/CodeGen/R600/llvm.SI.sample-masked.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga | FileCheck %s
; CHECK-LABEL: {{^}}v1:
; CHECK: image_sample {{v\[[0-9]+:[0-9]+\]}}, 13
diff --git a/test/CodeGen/R600/llvm.SI.sample.ll b/test/CodeGen/R600/llvm.SI.sample.ll
index a1d2c02..509c45f 100644
--- a/test/CodeGen/R600/llvm.SI.sample.ll
+++ b/test/CodeGen/R600/llvm.SI.sample.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
;CHECK-DAG: image_sample {{v\[[0-9]+:[0-9]+\]}}, 15
;CHECK-DAG: image_sample {{v\[[0-9]+:[0-9]+\]}}, 3
diff --git a/test/CodeGen/R600/llvm.SI.sampled.ll b/test/CodeGen/R600/llvm.SI.sampled.ll
index 91b71f3..f2badff 100644
--- a/test/CodeGen/R600/llvm.SI.sampled.ll
+++ b/test/CodeGen/R600/llvm.SI.sampled.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
;CHECK-DAG: image_sample_d {{v\[[0-9]+:[0-9]+\]}}, 15
;CHECK-DAG: image_sample_d {{v\[[0-9]+:[0-9]+\]}}, 3
diff --git a/test/CodeGen/R600/llvm.SI.sendmsg-m0.ll b/test/CodeGen/R600/llvm.SI.sendmsg-m0.ll
new file mode 100644
index 0000000..2198590
--- /dev/null
+++ b/test/CodeGen/R600/llvm.SI.sendmsg-m0.ll
@@ -0,0 +1,20 @@
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=BOTH %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=VI --check-prefix=BOTH %s
+
+; BOTH-LABEL: {{^}}main:
+; BOTH: s_mov_b32 m0, s0
+; VI-NEXT: s_nop 0
+; BOTH-NEXT: s_sendmsg Gs_done(nop)
+; BOTH-NEXT: s_endpgm
+
+define void @main(i32 inreg %a) #0 {
+main_body:
+ call void @llvm.SI.sendmsg(i32 3, i32 %a)
+ ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.SI.sendmsg(i32, i32) #1
+
+attributes #0 = { "ShaderType"="2" "unsafe-fp-math"="true" }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/R600/llvm.SI.sendmsg.ll b/test/CodeGen/R600/llvm.SI.sendmsg.ll
index 042fc5b..ce38002 100644
--- a/test/CodeGen/R600/llvm.SI.sendmsg.ll
+++ b/test/CodeGen/R600/llvm.SI.sendmsg.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
; CHECK-LABEL: {{^}}main:
; CHECK: s_sendmsg Gs(emit stream 0)
diff --git a/test/CodeGen/R600/llvm.SI.tbuffer.store.ll b/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
index 702daea..71f5154 100644
--- a/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
+++ b/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
;CHECK-LABEL: {{^}}test1:
;CHECK: tbuffer_store_format_xyzw {{v\[[0-9]+:[0-9]+\]}}, 0x20, -1, 0, -1, 0, 14, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
diff --git a/test/CodeGen/R600/llvm.SI.tid.ll b/test/CodeGen/R600/llvm.SI.tid.ll
index ee96124..f6e6d70 100644
--- a/test/CodeGen/R600/llvm.SI.tid.ll
+++ b/test/CodeGen/R600/llvm.SI.tid.ll
@@ -1,7 +1,9 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=GCN %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=VI --check-prefix=GCN %s
-;CHECK: v_mbcnt_lo_u32_b32_e64
-;CHECK: v_mbcnt_hi_u32_b32_e32
+;GCN: v_mbcnt_lo_u32_b32_e64
+;SI: v_mbcnt_hi_u32_b32_e32
+;VI: v_mbcnt_hi_u32_b32_e64
define void @main(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg) "ShaderType"="0" {
main_body:
diff --git a/test/CodeGen/R600/llvm.amdgpu.kilp.ll b/test/CodeGen/R600/llvm.amdgpu.kilp.ll
index 08bee38..42df6db 100644
--- a/test/CodeGen/R600/llvm.amdgpu.kilp.ll
+++ b/test/CodeGen/R600/llvm.amdgpu.kilp.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}kilp_gs_const:
; SI: s_mov_b64 exec, 0
@@ -17,4 +18,4 @@ declare void @llvm.AMDGPU.kilp(float)
attributes #0 = { "ShaderType"="2" }
-!0 = metadata !{metadata !"const", null, i32 1}
+!0 = !{!"const", null, i32 1}
diff --git a/test/CodeGen/R600/llvm.amdgpu.lrp.ll b/test/CodeGen/R600/llvm.amdgpu.lrp.ll
index ee922fe..4e4c2ec 100644
--- a/test/CodeGen/R600/llvm.amdgpu.lrp.ll
+++ b/test/CodeGen/R600/llvm.amdgpu.lrp.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare float @llvm.AMDGPU.lrp(float, float, float) nounwind readnone
diff --git a/test/CodeGen/R600/llvm.cos.ll b/test/CodeGen/R600/llvm.cos.ll
index 837340f..c65df8b 100644
--- a/test/CodeGen/R600/llvm.cos.ll
+++ b/test/CodeGen/R600/llvm.cos.ll
@@ -1,5 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s -check-prefix=EG -check-prefix=FUNC
-;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s -check-prefix=SI -check-prefix=FUNC
+;RUN: llc < %s -march=amdgcn -mcpu=SI | FileCheck %s -check-prefix=SI -check-prefix=FUNC
+;RUN: llc < %s -march=amdgcn -mcpu=tonga | FileCheck %s -check-prefix=SI -check-prefix=FUNC
;FUNC-LABEL: test
;EG: MULADD_IEEE *
diff --git a/test/CodeGen/R600/llvm.exp2.ll b/test/CodeGen/R600/llvm.exp2.ll
index 52dc67d..4269892 100644
--- a/test/CodeGen/R600/llvm.exp2.ll
+++ b/test/CodeGen/R600/llvm.exp2.ll
@@ -1,14 +1,15 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK --check-prefix=FUNC
-;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK --check-prefix=FUNC
-;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK --check-prefix=FUNC
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
+;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
+;RUN: llc < %s -march=amdgcn -mcpu=SI | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+;RUN: llc < %s -march=amdgcn -mcpu=tonga | FileCheck %s --check-prefix=SI --check-prefix=FUNC
;FUNC-LABEL: {{^}}test:
-;EG-CHECK: EXP_IEEE
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
-;SI-CHECK: v_exp_f32
+;EG: EXP_IEEE
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;SI: v_exp_f32
define void @test(float addrspace(1)* %out, float %in) {
entry:
@@ -18,20 +19,20 @@ entry:
}
;FUNC-LABEL: {{^}}testv2:
-;EG-CHECK: EXP_IEEE
-;EG-CHECK: EXP_IEEE
+;EG: EXP_IEEE
+;EG: EXP_IEEE
; FIXME: We should be able to merge these packets together on Cayman so we
; have a maximum of 4 instructions.
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
-;SI-CHECK: v_exp_f32
-;SI-CHECK: v_exp_f32
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;SI: v_exp_f32
+;SI: v_exp_f32
define void @testv2(<2 x float> addrspace(1)* %out, <2 x float> %in) {
entry:
@@ -41,32 +42,32 @@ entry:
}
;FUNC-LABEL: {{^}}testv4:
-;EG-CHECK: EXP_IEEE
-;EG-CHECK: EXP_IEEE
-;EG-CHECK: EXP_IEEE
-;EG-CHECK: EXP_IEEE
+;EG: EXP_IEEE
+;EG: EXP_IEEE
+;EG: EXP_IEEE
+;EG: EXP_IEEE
; FIXME: We should be able to merge these packets together on Cayman so we
; have a maximum of 4 instructions.
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
-;CM-CHECK-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
-;SI-CHECK: v_exp_f32
-;SI-CHECK: v_exp_f32
-;SI-CHECK: v_exp_f32
-;SI-CHECK: v_exp_f32
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-DAG: EXP_IEEE T{{[0-9]+\.[XYZW]}}
+;SI: v_exp_f32
+;SI: v_exp_f32
+;SI: v_exp_f32
+;SI: v_exp_f32
define void @testv4(<4 x float> addrspace(1)* %out, <4 x float> %in) {
entry:
%0 = call <4 x float> @llvm.exp2.v4f32(<4 x float> %in)
diff --git a/test/CodeGen/R600/llvm.floor.ll b/test/CodeGen/R600/llvm.floor.ll
deleted file mode 100644
index 0c7a15b..0000000
--- a/test/CodeGen/R600/llvm.floor.ll
+++ /dev/null
@@ -1,54 +0,0 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
-
-; R600-CHECK: {{^}}f32:
-; R600-CHECK: FLOOR
-; SI-CHECK: {{^}}f32:
-; SI-CHECK: v_floor_f32_e32
-define void @f32(float addrspace(1)* %out, float %in) {
-entry:
- %0 = call float @llvm.floor.f32(float %in)
- store float %0, float addrspace(1)* %out
- ret void
-}
-
-; R600-CHECK: {{^}}v2f32:
-; R600-CHECK: FLOOR
-; R600-CHECK: FLOOR
-; SI-CHECK: {{^}}v2f32:
-; SI-CHECK: v_floor_f32_e32
-; SI-CHECK: v_floor_f32_e32
-define void @v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
-entry:
- %0 = call <2 x float> @llvm.floor.v2f32(<2 x float> %in)
- store <2 x float> %0, <2 x float> addrspace(1)* %out
- ret void
-}
-
-; R600-CHECK: {{^}}v4f32:
-; R600-CHECK: FLOOR
-; R600-CHECK: FLOOR
-; R600-CHECK: FLOOR
-; R600-CHECK: FLOOR
-; SI-CHECK: {{^}}v4f32:
-; SI-CHECK: v_floor_f32_e32
-; SI-CHECK: v_floor_f32_e32
-; SI-CHECK: v_floor_f32_e32
-; SI-CHECK: v_floor_f32_e32
-define void @v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
-entry:
- %0 = call <4 x float> @llvm.floor.v4f32(<4 x float> %in)
- store <4 x float> %0, <4 x float> addrspace(1)* %out
- ret void
-}
-
-; Function Attrs: nounwind readonly
-declare float @llvm.floor.f32(float) #0
-
-; Function Attrs: nounwind readonly
-declare <2 x float> @llvm.floor.v2f32(<2 x float>) #0
-
-; Function Attrs: nounwind readonly
-declare <4 x float> @llvm.floor.v4f32(<4 x float>) #0
-
-attributes #0 = { nounwind readonly }
diff --git a/test/CodeGen/R600/llvm.log2.ll b/test/CodeGen/R600/llvm.log2.ll
index 0b54a46..c75e785 100644
--- a/test/CodeGen/R600/llvm.log2.ll
+++ b/test/CodeGen/R600/llvm.log2.ll
@@ -1,14 +1,15 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK --check-prefix=FUNC
-;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK --check-prefix=FUNC
-;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK --check-prefix=FUNC
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
+;RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
+;RUN: llc < %s -march=amdgcn -mcpu=SI | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+;RUN: llc < %s -march=amdgcn -mcpu=tonga | FileCheck %s --check-prefix=SI --check-prefix=FUNC
;FUNC-LABEL: {{^}}test:
-;EG-CHECK: LOG_IEEE
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
-;SI-CHECK: v_log_f32
+;EG: LOG_IEEE
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;SI: v_log_f32
define void @test(float addrspace(1)* %out, float %in) {
entry:
@@ -18,20 +19,20 @@ entry:
}
;FUNC-LABEL: {{^}}testv2:
-;EG-CHECK: LOG_IEEE
-;EG-CHECK: LOG_IEEE
+;EG: LOG_IEEE
+;EG: LOG_IEEE
; FIXME: We should be able to merge these packets together on Cayman so we
; have a maximum of 4 instructions.
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
-;SI-CHECK: v_log_f32
-;SI-CHECK: v_log_f32
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;SI: v_log_f32
+;SI: v_log_f32
define void @testv2(<2 x float> addrspace(1)* %out, <2 x float> %in) {
entry:
@@ -41,32 +42,32 @@ entry:
}
;FUNC-LABEL: {{^}}testv4:
-;EG-CHECK: LOG_IEEE
-;EG-CHECK: LOG_IEEE
-;EG-CHECK: LOG_IEEE
-;EG-CHECK: LOG_IEEE
+;EG: LOG_IEEE
+;EG: LOG_IEEE
+;EG: LOG_IEEE
+;EG: LOG_IEEE
; FIXME: We should be able to merge these packets together on Cayman so we
; have a maximum of 4 instructions.
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
-;CM-CHECK-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
-;SI-CHECK: v_log_f32
-;SI-CHECK: v_log_f32
-;SI-CHECK: v_log_f32
-;SI-CHECK: v_log_f32
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}} (MASKED)
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;CM-DAG: LOG_IEEE T{{[0-9]+\.[XYZW]}}
+;SI: v_log_f32
+;SI: v_log_f32
+;SI: v_log_f32
+;SI: v_log_f32
define void @testv4(<4 x float> addrspace(1)* %out, <4 x float> %in) {
entry:
%0 = call <4 x float> @llvm.log2.v4f32(<4 x float> %in)
diff --git a/test/CodeGen/R600/llvm.memcpy.ll b/test/CodeGen/R600/llvm.memcpy.ll
index 5f2710a..e491732 100644
--- a/test/CodeGen/R600/llvm.memcpy.ll
+++ b/test/CodeGen/R600/llvm.memcpy.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* nocapture, i8 addrspace(3)* nocapture, i32, i32, i1) nounwind
declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind
@@ -6,39 +7,23 @@ declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace
; FUNC-LABEL: {{^}}test_small_memcpy_i64_lds_to_lds_align1:
; SI: ds_read_u8
-; SI: ds_write_b8
; SI: ds_read_u8
-; SI: ds_write_b8
; SI: ds_read_u8
-; SI: ds_write_b8
; SI: ds_read_u8
-; SI: ds_write_b8
; SI: ds_read_u8
-; SI: ds_write_b8
-
; SI: ds_read_u8
-; SI: ds_write_b8
; SI: ds_read_u8
-; SI: ds_write_b8
; SI: ds_read_u8
-; SI: ds_write_b8
+
; SI: ds_read_u8
-; SI: ds_write_b8
; SI: ds_read_u8
-; SI: ds_write_b8
-
; SI: ds_read_u8
-; SI: ds_write_b8
; SI: ds_read_u8
-; SI: ds_write_b8
; SI: ds_read_u8
-; SI: ds_write_b8
; SI: ds_read_u8
-; SI: ds_write_b8
; SI: ds_read_u8
; SI: ds_read_u8
-
; SI: ds_read_u8
; SI: ds_read_u8
; SI: ds_read_u8
@@ -65,6 +50,14 @@ declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace
; SI: ds_write_b8
; SI: ds_write_b8
; SI: ds_write_b8
+
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
; SI: ds_write_b8
; SI: ds_write_b8
@@ -75,6 +68,14 @@ declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace
; SI: ds_write_b8
; SI: ds_write_b8
; SI: ds_write_b8
+
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
; SI: ds_write_b8
; SI: s_endpgm
diff --git a/test/CodeGen/R600/llvm.rint.f64.ll b/test/CodeGen/R600/llvm.rint.f64.ll
index 72b546e..c63fb17 100644
--- a/test/CodeGen/R600/llvm.rint.f64.ll
+++ b/test/CodeGen/R600/llvm.rint.f64.ll
@@ -1,5 +1,6 @@
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}rint_f64:
; CI: v_rndne_f64_e32
diff --git a/test/CodeGen/R600/llvm.rint.ll b/test/CodeGen/R600/llvm.rint.ll
index 2e05964..661db51 100644
--- a/test/CodeGen/R600/llvm.rint.ll
+++ b/test/CodeGen/R600/llvm.rint.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}rint_f32:
; R600: RNDNE
diff --git a/test/CodeGen/R600/llvm.round.f64.ll b/test/CodeGen/R600/llvm.round.f64.ll
new file mode 100644
index 0000000..920dbb3
--- /dev/null
+++ b/test/CodeGen/R600/llvm.round.f64.ll
@@ -0,0 +1,74 @@
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: {{^}}round_f64:
+; SI: s_endpgm
+define void @round_f64(double addrspace(1)* %out, double %x) #0 {
+ %result = call double @llvm.round.f64(double %x) #1
+ store double %result, double addrspace(1)* %out
+ ret void
+}
+
+; This is a pretty large function, so just test a few of the
+; instructions that are necessary.
+
+; FUNC-LABEL: {{^}}v_round_f64:
+; SI: buffer_load_dwordx2
+; SI: v_bfe_u32 [[EXP:v[0-9]+]], v{{[0-9]+}}, 20, 11
+
+; SI-DAG: v_not_b32_e32
+; SI-DAG: v_not_b32_e32
+
+; SI-DAG: v_cmp_eq_i32
+
+; SI-DAG: s_mov_b32 [[BFIMASK:s[0-9]+]], 0x7fffffff
+; SI-DAG: v_cmp_lt_i32_e64
+; SI-DAG: v_bfi_b32 [[COPYSIGN:v[0-9]+]], [[BFIMASK]]
+
+; SI-DAG: v_cmp_gt_i32_e64
+
+
+; SI: buffer_store_dwordx2
+; SI: s_endpgm
+define void @v_round_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x() #1
+ %gep = getelementptr double addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr double addrspace(1)* %out, i32 %tid
+ %x = load double addrspace(1)* %gep
+ %result = call double @llvm.round.f64(double %x) #1
+ store double %result, double addrspace(1)* %out.gep
+ ret void
+}
+
+; FUNC-LABEL: {{^}}round_v2f64:
+; SI: s_endpgm
+define void @round_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) #0 {
+ %result = call <2 x double> @llvm.round.v2f64(<2 x double> %in) #1
+ store <2 x double> %result, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}round_v4f64:
+; SI: s_endpgm
+define void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) #0 {
+ %result = call <4 x double> @llvm.round.v4f64(<4 x double> %in) #1
+ store <4 x double> %result, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}round_v8f64:
+; SI: s_endpgm
+define void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %in) #0 {
+ %result = call <8 x double> @llvm.round.v8f64(<8 x double> %in) #1
+ store <8 x double> %result, <8 x double> addrspace(1)* %out
+ ret void
+}
+
+declare i32 @llvm.r600.read.tidig.x() #1
+
+declare double @llvm.round.f64(double) #1
+declare <2 x double> @llvm.round.v2f64(<2 x double>) #1
+declare <4 x double> @llvm.round.v4f64(<4 x double>) #1
+declare <8 x double> @llvm.round.v8f64(<8 x double>) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/llvm.round.ll b/test/CodeGen/R600/llvm.round.ll
index bedf4ba..8d1cfb6 100644
--- a/test/CodeGen/R600/llvm.round.ll
+++ b/test/CodeGen/R600/llvm.round.ll
@@ -1,17 +1,28 @@
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s --check-prefix=R600 --check-prefix=FUNC
-
-; FUNC-LABEL: {{^}}f32:
-; R600: FRACT {{.*}}, [[ARG:KC[0-9]\[[0-9]+\]\.[XYZW]]]
-; R600-DAG: ADD {{.*}}, -0.5
-; R600-DAG: CEIL {{.*}} [[ARG]]
-; R600-DAG: FLOOR {{.*}} [[ARG]]
-; R600-DAG: CNDGE
-; R600-DAG: CNDGT
-; R600: CNDGE {{[^,]+}}, [[ARG]]
-define void @f32(float addrspace(1)* %out, float %in) {
-entry:
- %0 = call float @llvm.round.f32(float %in)
- store float %0, float addrspace(1)* %out
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
+
+; FUNC-LABEL: {{^}}round_f32:
+; SI-DAG: s_load_dword [[SX:s[0-9]+]]
+; SI-DAG: s_mov_b32 [[K:s[0-9]+]], 0x7fffffff
+; SI: v_trunc_f32_e32 [[TRUNC:v[0-9]+]], [[SX]]
+; SI: v_sub_f32_e32 [[SUB:v[0-9]+]], [[SX]], [[TRUNC]]
+; SI: v_mov_b32_e32 [[VX:v[0-9]+]], [[SX]]
+; SI: v_bfi_b32 [[COPYSIGN:v[0-9]+]], [[K]], 1.0, [[VX]]
+; SI: v_cmp_ge_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SUB]]|, 0.5
+; SI: v_cndmask_b32_e64 [[SEL:v[0-9]+]], 0, [[VX]], [[CMP]]
+; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], [[SEL]], [[TRUNC]]
+; SI: buffer_store_dword [[RESULT]]
+
+; R600: TRUNC {{.*}}, [[ARG:KC[0-9]\[[0-9]+\]\.[XYZW]]]
+; R600-DAG: ADD {{.*}},
+; R600-DAG: BFI_INT
+; R600-DAG: SETGE
+; R600-DAG: CNDE
+; R600-DAG: ADD
+define void @round_f32(float addrspace(1)* %out, float %x) #0 {
+ %result = call float @llvm.round.f32(float %x) #1
+ store float %result, float addrspace(1)* %out
ret void
}
@@ -20,24 +31,37 @@ entry:
; a test for the scalar case, so the vector tests just check that the
; compiler doesn't crash.
-; FUNC-LABEL: v2f32
+; FUNC-LABEL: {{^}}round_v2f32:
+; SI: s_endpgm
; R600: CF_END
-define void @v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
-entry:
- %0 = call <2 x float> @llvm.round.v2f32(<2 x float> %in)
- store <2 x float> %0, <2 x float> addrspace(1)* %out
+define void @round_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) #0 {
+ %result = call <2 x float> @llvm.round.v2f32(<2 x float> %in) #1
+ store <2 x float> %result, <2 x float> addrspace(1)* %out
ret void
}
-; FUNC-LABEL: v4f32
+; FUNC-LABEL: {{^}}round_v4f32:
+; SI: s_endpgm
; R600: CF_END
-define void @v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
-entry:
- %0 = call <4 x float> @llvm.round.v4f32(<4 x float> %in)
- store <4 x float> %0, <4 x float> addrspace(1)* %out
+define void @round_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) #0 {
+ %result = call <4 x float> @llvm.round.v4f32(<4 x float> %in) #1
+ store <4 x float> %result, <4 x float> addrspace(1)* %out
ret void
}
-declare float @llvm.round.f32(float)
-declare <2 x float> @llvm.round.v2f32(<2 x float>)
-declare <4 x float> @llvm.round.v4f32(<4 x float>)
+; FUNC-LABEL: {{^}}round_v8f32:
+; SI: s_endpgm
+; R600: CF_END
+define void @round_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %in) #0 {
+ %result = call <8 x float> @llvm.round.v8f32(<8 x float> %in) #1
+ store <8 x float> %result, <8 x float> addrspace(1)* %out
+ ret void
+}
+
+declare float @llvm.round.f32(float) #1
+declare <2 x float> @llvm.round.v2f32(<2 x float>) #1
+declare <4 x float> @llvm.round.v4f32(<4 x float>) #1
+declare <8 x float> @llvm.round.v8f32(<8 x float>) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/llvm.sin.ll b/test/CodeGen/R600/llvm.sin.ll
index 7e45710..3bb245c 100644
--- a/test/CodeGen/R600/llvm.sin.ll
+++ b/test/CodeGen/R600/llvm.sin.ll
@@ -1,6 +1,8 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=SI-SAFE -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -enable-unsafe-fp-math < %s | FileCheck -check-prefix=SI -check-prefix=SI-UNSAFE -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=SI-SAFE -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -enable-unsafe-fp-math < %s | FileCheck -check-prefix=SI -check-prefix=SI-UNSAFE -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=SI-SAFE -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -enable-unsafe-fp-math < %s | FileCheck -check-prefix=SI -check-prefix=SI-UNSAFE -check-prefix=FUNC %s
; FUNC-LABEL: sin_f32
; EG: MULADD_IEEE *
diff --git a/test/CodeGen/R600/llvm.sqrt.ll b/test/CodeGen/R600/llvm.sqrt.ll
index c039225..cc4717a 100644
--- a/test/CodeGen/R600/llvm.sqrt.ll
+++ b/test/CodeGen/R600/llvm.sqrt.ll
@@ -1,11 +1,12 @@
-; RUN: llc < %s -march=r600 --mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 --mcpu=SI -verify-machineinstrs| FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=r600 --mcpu=redwood | FileCheck %s --check-prefix=R600
+; RUN: llc < %s -march=amdgcn --mcpu=SI -verify-machineinstrs| FileCheck %s --check-prefix=SI
+; RUN: llc < %s -march=amdgcn --mcpu=tonga -verify-machineinstrs| FileCheck %s --check-prefix=SI
-; R600-CHECK-LABEL: {{^}}sqrt_f32:
-; R600-CHECK: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[2].Z
-; R600-CHECK: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[2].Z, PS
-; SI-CHECK-LABEL: {{^}}sqrt_f32:
-; SI-CHECK: v_sqrt_f32_e32
+; R600-LABEL: {{^}}sqrt_f32:
+; R600: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[2].Z
+; R600: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[2].Z, PS
+; SI-LABEL: {{^}}sqrt_f32:
+; SI: v_sqrt_f32_e32
define void @sqrt_f32(float addrspace(1)* %out, float %in) {
entry:
%0 = call float @llvm.sqrt.f32(float %in)
@@ -13,14 +14,14 @@ entry:
ret void
}
-; R600-CHECK-LABEL: {{^}}sqrt_v2f32:
-; R600-CHECK-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[2].W
-; R600-CHECK-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[2].W, PS
-; R600-CHECK-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[3].X
-; R600-CHECK-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].X, PS
-; SI-CHECK-LABEL: {{^}}sqrt_v2f32:
-; SI-CHECK: v_sqrt_f32_e32
-; SI-CHECK: v_sqrt_f32_e32
+; R600-LABEL: {{^}}sqrt_v2f32:
+; R600-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[2].W
+; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[2].W, PS
+; R600-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[3].X
+; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].X, PS
+; SI-LABEL: {{^}}sqrt_v2f32:
+; SI: v_sqrt_f32_e32
+; SI: v_sqrt_f32_e32
define void @sqrt_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) {
entry:
%0 = call <2 x float> @llvm.sqrt.v2f32(<2 x float> %in)
@@ -28,20 +29,20 @@ entry:
ret void
}
-; R600-CHECK-LABEL: {{^}}sqrt_v4f32:
-; R600-CHECK-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[3].Y
-; R600-CHECK-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].Y, PS
-; R600-CHECK-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[3].Z
-; R600-CHECK-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].Z, PS
-; R600-CHECK-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[3].W
-; R600-CHECK-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].W, PS
-; R600-CHECK-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[4].X
-; R600-CHECK-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[4].X, PS
-; SI-CHECK-LABEL: {{^}}sqrt_v4f32:
-; SI-CHECK: v_sqrt_f32_e32
-; SI-CHECK: v_sqrt_f32_e32
-; SI-CHECK: v_sqrt_f32_e32
-; SI-CHECK: v_sqrt_f32_e32
+; R600-LABEL: {{^}}sqrt_v4f32:
+; R600-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[3].Y
+; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].Y, PS
+; R600-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[3].Z
+; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].Z, PS
+; R600-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[3].W
+; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].W, PS
+; R600-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[4].X
+; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[4].X, PS
+; SI-LABEL: {{^}}sqrt_v4f32:
+; SI: v_sqrt_f32_e32
+; SI: v_sqrt_f32_e32
+; SI: v_sqrt_f32_e32
+; SI: v_sqrt_f32_e32
define void @sqrt_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) {
entry:
%0 = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %in)
diff --git a/test/CodeGen/R600/llvm.trunc.ll b/test/CodeGen/R600/llvm.trunc.ll
deleted file mode 100644
index 5585477..0000000
--- a/test/CodeGen/R600/llvm.trunc.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-
-; CHECK-LABEL: {{^}}trunc_f32:
-; CHECK: TRUNC
-
-define void @trunc_f32(float addrspace(1)* %out, float %in) {
-entry:
- %0 = call float @llvm.trunc.f32(float %in)
- store float %0, float addrspace(1)* %out
- ret void
-}
-
-declare float @llvm.trunc.f32(float)
diff --git a/test/CodeGen/R600/load-i1.ll b/test/CodeGen/R600/load-i1.ll
index d85e16f..315c0a3 100644
--- a/test/CodeGen/R600/load-i1.ll
+++ b/test/CodeGen/R600/load-i1.ll
@@ -1,21 +1,58 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-
-; SI-LABEL: {{^}}global_copy_i1_to_i1:
+; FUNC-LABEL: {{^}}global_copy_i1_to_i1:
; SI: buffer_load_ubyte
; SI: v_and_b32_e32 v{{[0-9]+}}, 1
; SI: buffer_store_byte
; SI: s_endpgm
+
+; EG: VTX_READ_8
+; EG: AND_INT
define void @global_copy_i1_to_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1 addrspace(1)* %in
store i1 %load, i1 addrspace(1)* %out, align 1
ret void
}
-; SI-LABEL: {{^}}global_sextload_i1_to_i32:
-; XSI: BUFFER_LOAD_BYTE
+; FUNC-LABEL: {{^}}local_copy_i1_to_i1:
+; SI: ds_read_u8
+; SI: v_and_b32_e32 v{{[0-9]+}}, 1
+; SI: ds_write_b8
+; SI: s_endpgm
+
+; EG: LDS_UBYTE_READ_RET
+; EG: AND_INT
+; EG: LDS_BYTE_WRITE
+define void @local_copy_i1_to_i1(i1 addrspace(3)* %out, i1 addrspace(3)* %in) nounwind {
+ %load = load i1 addrspace(3)* %in
+ store i1 %load, i1 addrspace(3)* %out, align 1
+ ret void
+}
+
+; FUNC-LABEL: {{^}}constant_copy_i1_to_i1:
+; SI: buffer_load_ubyte
+; SI: v_and_b32_e32 v{{[0-9]+}}, 1
+; SI: buffer_store_byte
+; SI: s_endpgm
+
+; EG: VTX_READ_8
+; EG: AND_INT
+define void @constant_copy_i1_to_i1(i1 addrspace(1)* %out, i1 addrspace(2)* %in) nounwind {
+ %load = load i1 addrspace(2)* %in
+ store i1 %load, i1 addrspace(1)* %out, align 1
+ ret void
+}
+
+; FUNC-LABEL: {{^}}global_sextload_i1_to_i32:
+; SI: buffer_load_ubyte
+; SI: v_bfe_i32
; SI: buffer_store_dword
; SI: s_endpgm
+
+; EG: VTX_READ_8
+; EG: BFE_INT
define void @global_sextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1 addrspace(1)* %in
%ext = sext i1 %load to i32
@@ -23,10 +60,11 @@ define void @global_sextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
ret void
}
-; SI-LABEL: {{^}}global_zextload_i1_to_i32:
+; FUNC-LABEL: {{^}}global_zextload_i1_to_i32:
; SI: buffer_load_ubyte
; SI: buffer_store_dword
; SI: s_endpgm
+
define void @global_zextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
%load = load i1 addrspace(1)* %in
%ext = zext i1 %load to i32
@@ -34,8 +72,9 @@ define void @global_zextload_i1_to_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
ret void
}
-; SI-LABEL: {{^}}global_sextload_i1_to_i64:
-; XSI: BUFFER_LOAD_BYTE
+; FUNC-LABEL: {{^}}global_sextload_i1_to_i64:
+; SI: buffer_load_ubyte
+; SI: v_bfe_i32
; SI: buffer_store_dwordx2
; SI: s_endpgm
define void @global_sextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
@@ -45,8 +84,9 @@ define void @global_sextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
ret void
}
-; SI-LABEL: {{^}}global_zextload_i1_to_i64:
+; FUNC-LABEL: {{^}}global_zextload_i1_to_i64:
; SI: buffer_load_ubyte
+; SI: v_mov_b32_e32 {{v[0-9]+}}, 0
; SI: buffer_store_dwordx2
; SI: s_endpgm
define void @global_zextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
@@ -56,7 +96,7 @@ define void @global_zextload_i1_to_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
ret void
}
-; SI-LABEL: {{^}}i1_arg:
+; FUNC-LABEL: {{^}}i1_arg:
; SI: buffer_load_ubyte
; SI: v_and_b32_e32
; SI: buffer_store_byte
@@ -66,7 +106,7 @@ define void @i1_arg(i1 addrspace(1)* %out, i1 %x) nounwind {
ret void
}
-; SI-LABEL: {{^}}i1_arg_zext_i32:
+; FUNC-LABEL: {{^}}i1_arg_zext_i32:
; SI: buffer_load_ubyte
; SI: buffer_store_dword
; SI: s_endpgm
@@ -76,7 +116,7 @@ define void @i1_arg_zext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
ret void
}
-; SI-LABEL: {{^}}i1_arg_zext_i64:
+; FUNC-LABEL: {{^}}i1_arg_zext_i64:
; SI: buffer_load_ubyte
; SI: buffer_store_dwordx2
; SI: s_endpgm
@@ -86,8 +126,8 @@ define void @i1_arg_zext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
ret void
}
-; SI-LABEL: {{^}}i1_arg_sext_i32:
-; XSI: BUFFER_LOAD_BYTE
+; FUNC-LABEL: {{^}}i1_arg_sext_i32:
+; SI: buffer_load_ubyte
; SI: buffer_store_dword
; SI: s_endpgm
define void @i1_arg_sext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
@@ -96,8 +136,10 @@ define void @i1_arg_sext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
ret void
}
-; SI-LABEL: {{^}}i1_arg_sext_i64:
-; XSI: BUFFER_LOAD_BYTE
+; FUNC-LABEL: {{^}}i1_arg_sext_i64:
+; SI: buffer_load_ubyte
+; SI: v_bfe_i32
+; SI: v_ashrrev_i32
; SI: buffer_store_dwordx2
; SI: s_endpgm
define void @i1_arg_sext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
diff --git a/test/CodeGen/R600/load.ll b/test/CodeGen/R600/load.ll
index 62d3063..b71b7cb 100644
--- a/test/CodeGen/R600/load.ll
+++ b/test/CodeGen/R600/load.ll
@@ -1,6 +1,7 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK --check-prefix=FUNC %s
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=R600-CHECK --check-prefix=FUNC %s
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600 --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=R600 --check-prefix=FUNC %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
;===------------------------------------------------------------------------===;
; GLOBAL ADDRESS SPACE
@@ -8,9 +9,9 @@
; Load an i8 value from the global address space.
; FUNC-LABEL: {{^}}load_i8:
-; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
+; R600: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK: buffer_load_ubyte v{{[0-9]+}},
+; SI: buffer_load_ubyte v{{[0-9]+}},
define void @load_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
%1 = load i8 addrspace(1)* %in
%2 = zext i8 %1 to i32
@@ -19,12 +20,12 @@ define void @load_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
}
; FUNC-LABEL: {{^}}load_i8_sext:
-; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
-; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
-; R600-CHECK: 24
-; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
-; R600-CHECK: 24
-; SI-CHECK: buffer_load_sbyte
+; R600: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
+; R600: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
+; R600: 24
+; R600: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
+; R600: 24
+; SI: buffer_load_sbyte
define void @load_i8_sext(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
%0 = load i8 addrspace(1)* %in
@@ -34,10 +35,10 @@ entry:
}
; FUNC-LABEL: {{^}}load_v2i8:
-; R600-CHECK: VTX_READ_8
-; R600-CHECK: VTX_READ_8
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
+; R600: VTX_READ_8
+; R600: VTX_READ_8
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
define void @load_v2i8(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) {
entry:
%0 = load <2 x i8> addrspace(1)* %in
@@ -47,18 +48,18 @@ entry:
}
; FUNC-LABEL: {{^}}load_v2i8_sext:
-; R600-CHECK-DAG: VTX_READ_8 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
-; R600-CHECK-DAG: VTX_READ_8 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
-; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
-; R600-CHECK-DAG: 24
-; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_X_CHAN]]
-; R600-CHECK-DAG: 24
-; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Y_CHAN:[XYZW]]], [[DST_Y]]
-; R600-CHECK-DAG: 24
-; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
-; R600-CHECK-DAG: 24
-; SI-CHECK: buffer_load_sbyte
-; SI-CHECK: buffer_load_sbyte
+; R600-DAG: VTX_READ_8 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
+; R600-DAG: VTX_READ_8 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
+; R600-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
+; R600-DAG: 24
+; R600-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_X_CHAN]]
+; R600-DAG: 24
+; R600-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Y_CHAN:[XYZW]]], [[DST_Y]]
+; R600-DAG: 24
+; R600-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
+; R600-DAG: 24
+; SI: buffer_load_sbyte
+; SI: buffer_load_sbyte
define void @load_v2i8_sext(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(1)* %in) {
entry:
%0 = load <2 x i8> addrspace(1)* %in
@@ -68,14 +69,14 @@ entry:
}
; FUNC-LABEL: {{^}}load_v4i8:
-; R600-CHECK: VTX_READ_8
-; R600-CHECK: VTX_READ_8
-; R600-CHECK: VTX_READ_8
-; R600-CHECK: VTX_READ_8
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
-; SI-CHECK: buffer_load_ubyte
+; R600: VTX_READ_8
+; R600: VTX_READ_8
+; R600: VTX_READ_8
+; R600: VTX_READ_8
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
define void @load_v4i8(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) {
entry:
%0 = load <4 x i8> addrspace(1)* %in
@@ -85,30 +86,30 @@ entry:
}
; FUNC-LABEL: {{^}}load_v4i8_sext:
-; R600-CHECK-DAG: VTX_READ_8 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
-; R600-CHECK-DAG: VTX_READ_8 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
-; R600-CHECK-DAG: VTX_READ_8 [[DST_Z:T[0-9]\.[XYZW]]], [[DST_Z]]
-; R600-CHECK-DAG: VTX_READ_8 [[DST_W:T[0-9]\.[XYZW]]], [[DST_W]]
-; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
-; R600-CHECK-DAG: 24
-; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_X_CHAN]]
-; R600-CHECK-DAG: 24
-; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Y_CHAN:[XYZW]]], [[DST_Y]]
-; R600-CHECK-DAG: 24
-; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
-; R600-CHECK-DAG: 24
-; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Z_CHAN:[XYZW]]], [[DST_Z]]
-; R600-CHECK-DAG: 24
-; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Z_CHAN]]
-; R600-CHECK-DAG: 24
-; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_W_CHAN:[XYZW]]], [[DST_W]]
-; R600-CHECK-DAG: 24
-; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_W_CHAN]]
-; R600-CHECK-DAG: 24
-; SI-CHECK: buffer_load_sbyte
-; SI-CHECK: buffer_load_sbyte
-; SI-CHECK: buffer_load_sbyte
-; SI-CHECK: buffer_load_sbyte
+; R600-DAG: VTX_READ_8 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
+; R600-DAG: VTX_READ_8 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
+; R600-DAG: VTX_READ_8 [[DST_Z:T[0-9]\.[XYZW]]], [[DST_Z]]
+; R600-DAG: VTX_READ_8 [[DST_W:T[0-9]\.[XYZW]]], [[DST_W]]
+; R600-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
+; R600-DAG: 24
+; R600-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_X_CHAN]]
+; R600-DAG: 24
+; R600-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Y_CHAN:[XYZW]]], [[DST_Y]]
+; R600-DAG: 24
+; R600-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
+; R600-DAG: 24
+; R600-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Z_CHAN:[XYZW]]], [[DST_Z]]
+; R600-DAG: 24
+; R600-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Z_CHAN]]
+; R600-DAG: 24
+; R600-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_W_CHAN:[XYZW]]], [[DST_W]]
+; R600-DAG: 24
+; R600-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_W_CHAN]]
+; R600-DAG: 24
+; SI: buffer_load_sbyte
+; SI: buffer_load_sbyte
+; SI: buffer_load_sbyte
+; SI: buffer_load_sbyte
define void @load_v4i8_sext(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) {
entry:
%0 = load <4 x i8> addrspace(1)* %in
@@ -119,8 +120,8 @@ entry:
; Load an i16 value from the global address space.
; FUNC-LABEL: {{^}}load_i16:
-; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK: buffer_load_ushort
+; R600: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
+; SI: buffer_load_ushort
define void @load_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
entry:
%0 = load i16 addrspace(1)* %in
@@ -130,12 +131,12 @@ entry:
}
; FUNC-LABEL: {{^}}load_i16_sext:
-; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
-; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
-; R600-CHECK: 16
-; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
-; R600-CHECK: 16
-; SI-CHECK: buffer_load_sshort
+; R600: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
+; R600: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
+; R600: 16
+; R600: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
+; R600: 16
+; SI: buffer_load_sshort
define void @load_i16_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
entry:
%0 = load i16 addrspace(1)* %in
@@ -145,10 +146,10 @@ entry:
}
; FUNC-LABEL: {{^}}load_v2i16:
-; R600-CHECK: VTX_READ_16
-; R600-CHECK: VTX_READ_16
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
+; R600: VTX_READ_16
+; R600: VTX_READ_16
+; SI: buffer_load_ushort
+; SI: buffer_load_ushort
define void @load_v2i16(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
entry:
%0 = load <2 x i16> addrspace(1)* %in
@@ -158,18 +159,18 @@ entry:
}
; FUNC-LABEL: {{^}}load_v2i16_sext:
-; R600-CHECK-DAG: VTX_READ_16 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
-; R600-CHECK-DAG: VTX_READ_16 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
-; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
-; R600-CHECK-DAG: 16
-; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_X_CHAN]]
-; R600-CHECK-DAG: 16
-; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Y_CHAN:[XYZW]]], [[DST_Y]]
-; R600-CHECK-DAG: 16
-; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
-; R600-CHECK-DAG: 16
-; SI-CHECK: buffer_load_sshort
-; SI-CHECK: buffer_load_sshort
+; R600-DAG: VTX_READ_16 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
+; R600-DAG: VTX_READ_16 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
+; R600-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
+; R600-DAG: 16
+; R600-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_X_CHAN]]
+; R600-DAG: 16
+; R600-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Y_CHAN:[XYZW]]], [[DST_Y]]
+; R600-DAG: 16
+; R600-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
+; R600-DAG: 16
+; SI: buffer_load_sshort
+; SI: buffer_load_sshort
define void @load_v2i16_sext(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
entry:
%0 = load <2 x i16> addrspace(1)* %in
@@ -179,14 +180,14 @@ entry:
}
; FUNC-LABEL: {{^}}load_v4i16:
-; R600-CHECK: VTX_READ_16
-; R600-CHECK: VTX_READ_16
-; R600-CHECK: VTX_READ_16
-; R600-CHECK: VTX_READ_16
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
-; SI-CHECK: buffer_load_ushort
+; R600: VTX_READ_16
+; R600: VTX_READ_16
+; R600: VTX_READ_16
+; R600: VTX_READ_16
+; SI: buffer_load_ushort
+; SI: buffer_load_ushort
+; SI: buffer_load_ushort
+; SI: buffer_load_ushort
define void @load_v4i16(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
entry:
%0 = load <4 x i16> addrspace(1)* %in
@@ -196,30 +197,30 @@ entry:
}
; FUNC-LABEL: {{^}}load_v4i16_sext:
-; R600-CHECK-DAG: VTX_READ_16 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
-; R600-CHECK-DAG: VTX_READ_16 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
-; R600-CHECK-DAG: VTX_READ_16 [[DST_Z:T[0-9]\.[XYZW]]], [[DST_Z]]
-; R600-CHECK-DAG: VTX_READ_16 [[DST_W:T[0-9]\.[XYZW]]], [[DST_W]]
-; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
-; R600-CHECK-DAG: 16
-; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_X_CHAN]]
-; R600-CHECK-DAG: 16
-; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Y_CHAN:[XYZW]]], [[DST_Y]]
-; R600-CHECK-DAG: 16
-; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
-; R600-CHECK-DAG: 16
-; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Z_CHAN:[XYZW]]], [[DST_Z]]
-; R600-CHECK-DAG: 16
-; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Z_CHAN]]
-; R600-CHECK-DAG: 16
-; R600-CHECK-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_W_CHAN:[XYZW]]], [[DST_W]]
-; R600-CHECK-DAG: 16
-; R600-CHECK-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_W_CHAN]]
-; R600-CHECK-DAG: 16
-; SI-CHECK: buffer_load_sshort
-; SI-CHECK: buffer_load_sshort
-; SI-CHECK: buffer_load_sshort
-; SI-CHECK: buffer_load_sshort
+; R600-DAG: VTX_READ_16 [[DST_X:T[0-9]\.[XYZW]]], [[DST_X]]
+; R600-DAG: VTX_READ_16 [[DST_Y:T[0-9]\.[XYZW]]], [[DST_Y]]
+; R600-DAG: VTX_READ_16 [[DST_Z:T[0-9]\.[XYZW]]], [[DST_Z]]
+; R600-DAG: VTX_READ_16 [[DST_W:T[0-9]\.[XYZW]]], [[DST_W]]
+; R600-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_X_CHAN:[XYZW]]], [[DST_X]]
+; R600-DAG: 16
+; R600-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_X_CHAN]]
+; R600-DAG: 16
+; R600-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Y_CHAN:[XYZW]]], [[DST_Y]]
+; R600-DAG: 16
+; R600-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Y_CHAN]]
+; R600-DAG: 16
+; R600-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_Z_CHAN:[XYZW]]], [[DST_Z]]
+; R600-DAG: 16
+; R600-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_Z_CHAN]]
+; R600-DAG: 16
+; R600-DAG: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_W_CHAN:[XYZW]]], [[DST_W]]
+; R600-DAG: 16
+; R600-DAG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_W_CHAN]]
+; R600-DAG: 16
+; SI: buffer_load_sshort
+; SI: buffer_load_sshort
+; SI: buffer_load_sshort
+; SI: buffer_load_sshort
define void @load_v4i16_sext(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
entry:
%0 = load <4 x i16> addrspace(1)* %in
@@ -230,9 +231,9 @@ entry:
; load an i32 value from the global address space.
; FUNC-LABEL: {{^}}load_i32:
-; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
+; R600: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-; SI-CHECK: buffer_load_dword v{{[0-9]+}}
+; SI: buffer_load_dword v{{[0-9]+}}
define void @load_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%0 = load i32 addrspace(1)* %in
@@ -242,9 +243,9 @@ entry:
; load a f32 value from the global address space.
; FUNC-LABEL: {{^}}load_f32:
-; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
+; R600: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-; SI-CHECK: buffer_load_dword v{{[0-9]+}}
+; SI: buffer_load_dword v{{[0-9]+}}
define void @load_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
entry:
%0 = load float addrspace(1)* %in
@@ -254,9 +255,9 @@ entry:
; load a v2f32 value from the global address space
; FUNC-LABEL: {{^}}load_v2f32:
-; R600-CHECK: MEM_RAT
-; R600-CHECK: VTX_READ_64
-; SI-CHECK: buffer_load_dwordx2
+; R600: MEM_RAT
+; R600: VTX_READ_64
+; SI: buffer_load_dwordx2
define void @load_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) {
entry:
%0 = load <2 x float> addrspace(1)* %in
@@ -265,8 +266,8 @@ entry:
}
; FUNC-LABEL: {{^}}load_i64:
-; R600-CHECK: VTX_READ_64
-; SI-CHECK: buffer_load_dwordx2
+; R600: VTX_READ_64
+; SI: buffer_load_dwordx2
define void @load_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
entry:
%0 = load i64 addrspace(1)* %in
@@ -275,11 +276,11 @@ entry:
}
; FUNC-LABEL: {{^}}load_i64_sext:
-; R600-CHECK: MEM_RAT
-; R600-CHECK: MEM_RAT
-; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, literal.x
-; R600-CHECK: 31
-; SI-CHECK: buffer_load_dword
+; R600: MEM_RAT
+; R600: MEM_RAT
+; R600: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, literal.x
+; R600: 31
+; SI: buffer_load_dword
define void @load_i64_sext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
@@ -290,8 +291,8 @@ entry:
}
; FUNC-LABEL: {{^}}load_i64_zext:
-; R600-CHECK: MEM_RAT
-; R600-CHECK: MEM_RAT
+; R600: MEM_RAT
+; R600: MEM_RAT
define void @load_i64_zext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%0 = load i32 addrspace(1)* %in
@@ -301,17 +302,17 @@ entry:
}
; FUNC-LABEL: {{^}}load_v8i32:
-; R600-CHECK: VTX_READ_128
-; R600-CHECK: VTX_READ_128
+; R600: VTX_READ_128
+; R600: VTX_READ_128
; XXX: We should be using DWORDX4 instructions on SI.
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
define void @load_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(1)* %in) {
entry:
%0 = load <8 x i32> addrspace(1)* %in
@@ -320,27 +321,27 @@ entry:
}
; FUNC-LABEL: {{^}}load_v16i32:
-; R600-CHECK: VTX_READ_128
-; R600-CHECK: VTX_READ_128
-; R600-CHECK: VTX_READ_128
-; R600-CHECK: VTX_READ_128
+; R600: VTX_READ_128
+; R600: VTX_READ_128
+; R600: VTX_READ_128
+; R600: VTX_READ_128
; XXX: We should be using DWORDX4 instructions on SI.
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
-; SI-CHECK: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
+; SI: buffer_load_dword
define void @load_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(1)* %in) {
entry:
%0 = load <16 x i32> addrspace(1)* %in
@@ -354,12 +355,12 @@ entry:
; Load a sign-extended i8 value
; FUNC-LABEL: {{^}}load_const_i8_sext:
-; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
-; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
-; R600-CHECK: 24
-; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
-; R600-CHECK: 24
-; SI-CHECK: buffer_load_sbyte v{{[0-9]+}},
+; R600: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
+; R600: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
+; R600: 24
+; R600: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
+; R600: 24
+; SI: buffer_load_sbyte v{{[0-9]+}},
define void @load_const_i8_sext(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
entry:
%0 = load i8 addrspace(2)* %in
@@ -370,8 +371,8 @@ entry:
; Load an aligned i8 value
; FUNC-LABEL: {{^}}load_const_i8_aligned:
-; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK: buffer_load_ubyte v{{[0-9]+}},
+; R600: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
+; SI: buffer_load_ubyte v{{[0-9]+}},
define void @load_const_i8_aligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
entry:
%0 = load i8 addrspace(2)* %in
@@ -382,8 +383,8 @@ entry:
; Load an un-aligned i8 value
; FUNC-LABEL: {{^}}load_const_i8_unaligned:
-; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK: buffer_load_ubyte v{{[0-9]+}},
+; R600: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
+; SI: buffer_load_ubyte v{{[0-9]+}},
define void @load_const_i8_unaligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
entry:
%0 = getelementptr i8 addrspace(2)* %in, i32 1
@@ -395,12 +396,12 @@ entry:
; Load a sign-extended i16 value
; FUNC-LABEL: {{^}}load_const_i16_sext:
-; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
-; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
-; R600-CHECK: 16
-; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
-; R600-CHECK: 16
-; SI-CHECK: buffer_load_sshort
+; R600: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
+; R600: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
+; R600: 16
+; R600: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
+; R600: 16
+; SI: buffer_load_sshort
define void @load_const_i16_sext(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
entry:
%0 = load i16 addrspace(2)* %in
@@ -411,8 +412,8 @@ entry:
; Load an aligned i16 value
; FUNC-LABEL: {{^}}load_const_i16_aligned:
-; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK: buffer_load_ushort
+; R600: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
+; SI: buffer_load_ushort
define void @load_const_i16_aligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
entry:
%0 = load i16 addrspace(2)* %in
@@ -423,8 +424,8 @@ entry:
; Load an un-aligned i16 value
; FUNC-LABEL: {{^}}load_const_i16_unaligned:
-; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK: buffer_load_ushort
+; R600: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
+; SI: buffer_load_ushort
define void @load_const_i16_unaligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
entry:
%0 = getelementptr i16 addrspace(2)* %in, i32 1
@@ -436,9 +437,9 @@ entry:
; Load an i32 value from the constant address space.
; FUNC-LABEL: {{^}}load_const_addrspace_i32:
-; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
+; R600: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-; SI-CHECK: s_load_dword s{{[0-9]+}}
+; SI: s_load_dword s{{[0-9]+}}
define void @load_const_addrspace_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
entry:
%0 = load i32 addrspace(2)* %in
@@ -448,9 +449,9 @@ entry:
; Load a f32 value from the constant address space.
; FUNC-LABEL: {{^}}load_const_addrspace_f32:
-; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
+; R600: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-; SI-CHECK: s_load_dword s{{[0-9]+}}
+; SI: s_load_dword s{{[0-9]+}}
define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(2)* %in) {
%1 = load float addrspace(2)* %in
store float %1, float addrspace(1)* %out
@@ -463,10 +464,10 @@ define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(
; Load an i8 value from the local address space.
; FUNC-LABEL: {{^}}load_i8_local:
-; R600-CHECK: LDS_UBYTE_READ_RET
-; SI-CHECK-NOT: s_wqm_b64
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_u8
+; R600: LDS_UBYTE_READ_RET
+; SI-NOT: s_wqm_b64
+; SI: s_mov_b32 m0
+; SI: ds_read_u8
define void @load_i8_local(i32 addrspace(1)* %out, i8 addrspace(3)* %in) {
%1 = load i8 addrspace(3)* %in
%2 = zext i8 %1 to i32
@@ -475,11 +476,11 @@ define void @load_i8_local(i32 addrspace(1)* %out, i8 addrspace(3)* %in) {
}
; FUNC-LABEL: {{^}}load_i8_sext_local:
-; R600-CHECK: LDS_UBYTE_READ_RET
-; R600-CHECK: ASHR
-; SI-CHECK-NOT: s_wqm_b64
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_i8
+; R600: LDS_UBYTE_READ_RET
+; R600: ASHR
+; SI-NOT: s_wqm_b64
+; SI: s_mov_b32 m0
+; SI: ds_read_i8
define void @load_i8_sext_local(i32 addrspace(1)* %out, i8 addrspace(3)* %in) {
entry:
%0 = load i8 addrspace(3)* %in
@@ -489,12 +490,12 @@ entry:
}
; FUNC-LABEL: {{^}}load_v2i8_local:
-; R600-CHECK: LDS_UBYTE_READ_RET
-; R600-CHECK: LDS_UBYTE_READ_RET
-; SI-CHECK-NOT: s_wqm_b64
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_u8
-; SI-CHECK: ds_read_u8
+; R600: LDS_UBYTE_READ_RET
+; R600: LDS_UBYTE_READ_RET
+; SI-NOT: s_wqm_b64
+; SI: s_mov_b32 m0
+; SI: ds_read_u8
+; SI: ds_read_u8
define void @load_v2i8_local(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(3)* %in) {
entry:
%0 = load <2 x i8> addrspace(3)* %in
@@ -504,14 +505,14 @@ entry:
}
; FUNC-LABEL: {{^}}load_v2i8_sext_local:
-; R600-CHECK-DAG: LDS_UBYTE_READ_RET
-; R600-CHECK-DAG: LDS_UBYTE_READ_RET
-; R600-CHECK-DAG: ASHR
-; R600-CHECK-DAG: ASHR
-; SI-CHECK-NOT: s_wqm_b64
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_i8
-; SI-CHECK: ds_read_i8
+; R600-DAG: LDS_UBYTE_READ_RET
+; R600-DAG: LDS_UBYTE_READ_RET
+; R600-DAG: ASHR
+; R600-DAG: ASHR
+; SI-NOT: s_wqm_b64
+; SI: s_mov_b32 m0
+; SI: ds_read_i8
+; SI: ds_read_i8
define void @load_v2i8_sext_local(<2 x i32> addrspace(1)* %out, <2 x i8> addrspace(3)* %in) {
entry:
%0 = load <2 x i8> addrspace(3)* %in
@@ -521,16 +522,16 @@ entry:
}
; FUNC-LABEL: {{^}}load_v4i8_local:
-; R600-CHECK: LDS_UBYTE_READ_RET
-; R600-CHECK: LDS_UBYTE_READ_RET
-; R600-CHECK: LDS_UBYTE_READ_RET
-; R600-CHECK: LDS_UBYTE_READ_RET
-; SI-CHECK-NOT: s_wqm_b64
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_u8
-; SI-CHECK: ds_read_u8
-; SI-CHECK: ds_read_u8
-; SI-CHECK: ds_read_u8
+; R600: LDS_UBYTE_READ_RET
+; R600: LDS_UBYTE_READ_RET
+; R600: LDS_UBYTE_READ_RET
+; R600: LDS_UBYTE_READ_RET
+; SI-NOT: s_wqm_b64
+; SI: s_mov_b32 m0
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
define void @load_v4i8_local(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(3)* %in) {
entry:
%0 = load <4 x i8> addrspace(3)* %in
@@ -540,20 +541,20 @@ entry:
}
; FUNC-LABEL: {{^}}load_v4i8_sext_local:
-; R600-CHECK-DAG: LDS_UBYTE_READ_RET
-; R600-CHECK-DAG: LDS_UBYTE_READ_RET
-; R600-CHECK-DAG: LDS_UBYTE_READ_RET
-; R600-CHECK-DAG: LDS_UBYTE_READ_RET
-; R600-CHECK-DAG: ASHR
-; R600-CHECK-DAG: ASHR
-; R600-CHECK-DAG: ASHR
-; R600-CHECK-DAG: ASHR
-; SI-CHECK-NOT: s_wqm_b64
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_i8
-; SI-CHECK: ds_read_i8
-; SI-CHECK: ds_read_i8
-; SI-CHECK: ds_read_i8
+; R600-DAG: LDS_UBYTE_READ_RET
+; R600-DAG: LDS_UBYTE_READ_RET
+; R600-DAG: LDS_UBYTE_READ_RET
+; R600-DAG: LDS_UBYTE_READ_RET
+; R600-DAG: ASHR
+; R600-DAG: ASHR
+; R600-DAG: ASHR
+; R600-DAG: ASHR
+; SI-NOT: s_wqm_b64
+; SI: s_mov_b32 m0
+; SI: ds_read_i8
+; SI: ds_read_i8
+; SI: ds_read_i8
+; SI: ds_read_i8
define void @load_v4i8_sext_local(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(3)* %in) {
entry:
%0 = load <4 x i8> addrspace(3)* %in
@@ -564,10 +565,10 @@ entry:
; Load an i16 value from the local address space.
; FUNC-LABEL: {{^}}load_i16_local:
-; R600-CHECK: LDS_USHORT_READ_RET
-; SI-CHECK-NOT: s_wqm_b64
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_u16
+; R600: LDS_USHORT_READ_RET
+; SI-NOT: s_wqm_b64
+; SI: s_mov_b32 m0
+; SI: ds_read_u16
define void @load_i16_local(i32 addrspace(1)* %out, i16 addrspace(3)* %in) {
entry:
%0 = load i16 addrspace(3)* %in
@@ -577,11 +578,11 @@ entry:
}
; FUNC-LABEL: {{^}}load_i16_sext_local:
-; R600-CHECK: LDS_USHORT_READ_RET
-; R600-CHECK: ASHR
-; SI-CHECK-NOT: s_wqm_b64
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_i16
+; R600: LDS_USHORT_READ_RET
+; R600: ASHR
+; SI-NOT: s_wqm_b64
+; SI: s_mov_b32 m0
+; SI: ds_read_i16
define void @load_i16_sext_local(i32 addrspace(1)* %out, i16 addrspace(3)* %in) {
entry:
%0 = load i16 addrspace(3)* %in
@@ -591,12 +592,12 @@ entry:
}
; FUNC-LABEL: {{^}}load_v2i16_local:
-; R600-CHECK: LDS_USHORT_READ_RET
-; R600-CHECK: LDS_USHORT_READ_RET
-; SI-CHECK-NOT: s_wqm_b64
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_u16
-; SI-CHECK: ds_read_u16
+; R600: LDS_USHORT_READ_RET
+; R600: LDS_USHORT_READ_RET
+; SI-NOT: s_wqm_b64
+; SI: s_mov_b32 m0
+; SI: ds_read_u16
+; SI: ds_read_u16
define void @load_v2i16_local(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(3)* %in) {
entry:
%0 = load <2 x i16> addrspace(3)* %in
@@ -606,14 +607,14 @@ entry:
}
; FUNC-LABEL: {{^}}load_v2i16_sext_local:
-; R600-CHECK-DAG: LDS_USHORT_READ_RET
-; R600-CHECK-DAG: LDS_USHORT_READ_RET
-; R600-CHECK-DAG: ASHR
-; R600-CHECK-DAG: ASHR
-; SI-CHECK-NOT: s_wqm_b64
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_i16
-; SI-CHECK: ds_read_i16
+; R600-DAG: LDS_USHORT_READ_RET
+; R600-DAG: LDS_USHORT_READ_RET
+; R600-DAG: ASHR
+; R600-DAG: ASHR
+; SI-NOT: s_wqm_b64
+; SI: s_mov_b32 m0
+; SI: ds_read_i16
+; SI: ds_read_i16
define void @load_v2i16_sext_local(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(3)* %in) {
entry:
%0 = load <2 x i16> addrspace(3)* %in
@@ -623,16 +624,16 @@ entry:
}
; FUNC-LABEL: {{^}}load_v4i16_local:
-; R600-CHECK: LDS_USHORT_READ_RET
-; R600-CHECK: LDS_USHORT_READ_RET
-; R600-CHECK: LDS_USHORT_READ_RET
-; R600-CHECK: LDS_USHORT_READ_RET
-; SI-CHECK-NOT: s_wqm_b64
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_u16
-; SI-CHECK: ds_read_u16
-; SI-CHECK: ds_read_u16
-; SI-CHECK: ds_read_u16
+; R600: LDS_USHORT_READ_RET
+; R600: LDS_USHORT_READ_RET
+; R600: LDS_USHORT_READ_RET
+; R600: LDS_USHORT_READ_RET
+; SI-NOT: s_wqm_b64
+; SI: s_mov_b32 m0
+; SI: ds_read_u16
+; SI: ds_read_u16
+; SI: ds_read_u16
+; SI: ds_read_u16
define void @load_v4i16_local(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(3)* %in) {
entry:
%0 = load <4 x i16> addrspace(3)* %in
@@ -642,20 +643,20 @@ entry:
}
; FUNC-LABEL: {{^}}load_v4i16_sext_local:
-; R600-CHECK-DAG: LDS_USHORT_READ_RET
-; R600-CHECK-DAG: LDS_USHORT_READ_RET
-; R600-CHECK-DAG: LDS_USHORT_READ_RET
-; R600-CHECK-DAG: LDS_USHORT_READ_RET
-; R600-CHECK-DAG: ASHR
-; R600-CHECK-DAG: ASHR
-; R600-CHECK-DAG: ASHR
-; R600-CHECK-DAG: ASHR
-; SI-CHECK-NOT: s_wqm_b64
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_i16
-; SI-CHECK: ds_read_i16
-; SI-CHECK: ds_read_i16
-; SI-CHECK: ds_read_i16
+; R600-DAG: LDS_USHORT_READ_RET
+; R600-DAG: LDS_USHORT_READ_RET
+; R600-DAG: LDS_USHORT_READ_RET
+; R600-DAG: LDS_USHORT_READ_RET
+; R600-DAG: ASHR
+; R600-DAG: ASHR
+; R600-DAG: ASHR
+; R600-DAG: ASHR
+; SI-NOT: s_wqm_b64
+; SI: s_mov_b32 m0
+; SI: ds_read_i16
+; SI: ds_read_i16
+; SI: ds_read_i16
+; SI: ds_read_i16
define void @load_v4i16_sext_local(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(3)* %in) {
entry:
%0 = load <4 x i16> addrspace(3)* %in
@@ -666,10 +667,10 @@ entry:
; load an i32 value from the local address space.
; FUNC-LABEL: {{^}}load_i32_local:
-; R600-CHECK: LDS_READ_RET
-; SI-CHECK-NOT: s_wqm_b64
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_b32
+; R600: LDS_READ_RET
+; SI-NOT: s_wqm_b64
+; SI: s_mov_b32 m0
+; SI: ds_read_b32
define void @load_i32_local(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
entry:
%0 = load i32 addrspace(3)* %in
@@ -679,9 +680,9 @@ entry:
; load a f32 value from the local address space.
; FUNC-LABEL: {{^}}load_f32_local:
-; R600-CHECK: LDS_READ_RET
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_b32
+; R600: LDS_READ_RET
+; SI: s_mov_b32 m0
+; SI: ds_read_b32
define void @load_f32_local(float addrspace(1)* %out, float addrspace(3)* %in) {
entry:
%0 = load float addrspace(3)* %in
@@ -691,10 +692,10 @@ entry:
; load a v2f32 value from the local address space
; FUNC-LABEL: {{^}}load_v2f32_local:
-; R600-CHECK: LDS_READ_RET
-; R600-CHECK: LDS_READ_RET
-; SI-CHECK: s_mov_b32 m0
-; SI-CHECK: ds_read_b64
+; R600: LDS_READ_RET
+; R600: LDS_READ_RET
+; SI: s_mov_b32 m0
+; SI: ds_read_b64
define void @load_v2f32_local(<2 x float> addrspace(1)* %out, <2 x float> addrspace(3)* %in) {
entry:
%0 = load <2 x float> addrspace(3)* %in
@@ -704,11 +705,11 @@ entry:
; Test loading a i32 and v2i32 value from the same base pointer.
; FUNC-LABEL: {{^}}load_i32_v2i32_local:
-; R600-CHECK: LDS_READ_RET
-; R600-CHECK: LDS_READ_RET
-; R600-CHECK: LDS_READ_RET
-; SI-CHECK-DAG: ds_read_b32
-; SI-CHECK-DAG: ds_read2_b32
+; R600: LDS_READ_RET
+; R600: LDS_READ_RET
+; R600: LDS_READ_RET
+; SI-DAG: ds_read_b32
+; SI-DAG: ds_read2_b32
define void @load_i32_v2i32_local(<2 x i32> addrspace(1)* %out, i32 addrspace(3)* %in) {
%scalar = load i32 addrspace(3)* %in
%tmp0 = bitcast i32 addrspace(3)* %in to <2 x i32> addrspace(3)*
@@ -726,9 +727,9 @@ define void @load_i32_v2i32_local(<2 x i32> addrspace(1)* %out, i32 addrspace(3)
; On SI we need to make sure that the base offset is a register and not
; an immediate.
; FUNC-LABEL: {{^}}load_i32_local_const_ptr:
-; SI-CHECK: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0
-; SI-CHECK: ds_read_b32 v0, v[[ZERO]] offset:4
-; R600-CHECK: LDS_READ_RET
+; SI: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0
+; SI: ds_read_b32 v0, v[[ZERO]] offset:4
+; R600: LDS_READ_RET
define void @load_i32_local_const_ptr(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
entry:
%tmp0 = getelementptr [512 x i32] addrspace(3)* @lds, i32 0, i32 1
diff --git a/test/CodeGen/R600/load.vec.ll b/test/CodeGen/R600/load.vec.ll
index 0d6e213..346d8dc 100644
--- a/test/CodeGen/R600/load.vec.ll
+++ b/test/CodeGen/R600/load.vec.ll
@@ -1,11 +1,12 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=SI %s
; load a v2i32 value from the global address space.
-; EG-CHECK: {{^}}load_v2i32:
-; EG-CHECK: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0
-; SI-CHECK: {{^}}load_v2i32:
-; SI-CHECK: buffer_load_dwordx2 v[{{[0-9]+:[0-9]+}}]
+; EG: {{^}}load_v2i32:
+; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0
+; SI: {{^}}load_v2i32:
+; SI: buffer_load_dwordx2 v[{{[0-9]+:[0-9]+}}]
define void @load_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%a = load <2 x i32> addrspace(1) * %in
store <2 x i32> %a, <2 x i32> addrspace(1)* %out
@@ -13,10 +14,10 @@ define void @load_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %i
}
; load a v4i32 value from the global address space.
-; EG-CHECK: {{^}}load_v4i32:
-; EG-CHECK: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0
-; SI-CHECK: {{^}}load_v4i32:
-; SI-CHECK: buffer_load_dwordx4 v[{{[0-9]+:[0-9]+}}]
+; EG: {{^}}load_v4i32:
+; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0
+; SI: {{^}}load_v4i32:
+; SI: buffer_load_dwordx4 v[{{[0-9]+:[0-9]+}}]
define void @load_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%a = load <4 x i32> addrspace(1) * %in
store <4 x i32> %a, <4 x i32> addrspace(1)* %out
diff --git a/test/CodeGen/R600/load64.ll b/test/CodeGen/R600/load64.ll
index a60c4eb..cb3d654 100644
--- a/test/CodeGen/R600/load64.ll
+++ b/test/CodeGen/R600/load64.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tahiti -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
; load a f64 value from the global address space.
; CHECK-LABEL: {{^}}load_f64:
diff --git a/test/CodeGen/R600/local-64.ll b/test/CodeGen/R600/local-64.ll
index eb14b5f..4b45169 100644
--- a/test/CodeGen/R600/local-64.ll
+++ b/test/CodeGen/R600/local-64.ll
@@ -1,8 +1,9 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck --check-prefix=SI --check-prefix=BOTH %s
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs< %s | FileCheck --check-prefix=CI --check-prefix=BOTH %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck --check-prefix=SI --check-prefix=BOTH %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs< %s | FileCheck --check-prefix=CI --check-prefix=BOTH %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck --check-prefix=CI --check-prefix=BOTH %s
; BOTH-LABEL: {{^}}local_i32_load
-; BOTH: ds_read_b32 [[REG:v[0-9]+]], v{{[0-9]+}} offset:28 [M0]
+; BOTH: ds_read_b32 [[REG:v[0-9]+]], v{{[0-9]+}} offset:28
; BOTH: buffer_store_dword [[REG]],
define void @local_i32_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
%gep = getelementptr i32 addrspace(3)* %in, i32 7
@@ -12,7 +13,7 @@ define void @local_i32_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounw
}
; BOTH-LABEL: {{^}}local_i32_load_0_offset
-; BOTH: ds_read_b32 [[REG:v[0-9]+]], v{{[0-9]+}} [M0]
+; BOTH: ds_read_b32 [[REG:v[0-9]+]], v{{[0-9]+}}
; BOTH: buffer_store_dword [[REG]],
define void @local_i32_load_0_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
%val = load i32 addrspace(3)* %in, align 4
@@ -22,7 +23,7 @@ define void @local_i32_load_0_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %
; BOTH-LABEL: {{^}}local_i8_load_i16_max_offset:
; BOTH-NOT: ADD
-; BOTH: ds_read_u8 [[REG:v[0-9]+]], {{v[0-9]+}} offset:65535 [M0]
+; BOTH: ds_read_u8 [[REG:v[0-9]+]], {{v[0-9]+}} offset:65535
; BOTH: buffer_store_byte [[REG]],
define void @local_i8_load_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
%gep = getelementptr i8 addrspace(3)* %in, i32 65535
@@ -37,7 +38,7 @@ define void @local_i8_load_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)
; SI: s_or_b32 [[ADDR:s[0-9]+]], s{{[0-9]+}}, 0x10000
; CI: s_add_i32 [[ADDR:s[0-9]+]], s{{[0-9]+}}, 0x10000
; BOTH: v_mov_b32_e32 [[VREGADDR:v[0-9]+]], [[ADDR]]
-; BOTH: ds_read_u8 [[REG:v[0-9]+]], [[VREGADDR]] [M0]
+; BOTH: ds_read_u8 [[REG:v[0-9]+]], [[VREGADDR]]
; BOTH: buffer_store_byte [[REG]],
define void @local_i8_load_over_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
%gep = getelementptr i8 addrspace(3)* %in, i32 65536
@@ -48,7 +49,7 @@ define void @local_i8_load_over_i16_max_offset(i8 addrspace(1)* %out, i8 addrspa
; BOTH-LABEL: {{^}}local_i64_load:
; BOTH-NOT: ADD
-; BOTH: ds_read_b64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}} offset:56 [M0]
+; BOTH: ds_read_b64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}} offset:56
; BOTH: buffer_store_dwordx2 [[REG]],
define void @local_i64_load(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
%gep = getelementptr i64 addrspace(3)* %in, i32 7
@@ -58,7 +59,7 @@ define void @local_i64_load(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounw
}
; BOTH-LABEL: {{^}}local_i64_load_0_offset
-; BOTH: ds_read_b64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}} [M0]
+; BOTH: ds_read_b64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}
; BOTH: buffer_store_dwordx2 [[REG]],
define void @local_i64_load_0_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
%val = load i64 addrspace(3)* %in, align 8
@@ -68,7 +69,7 @@ define void @local_i64_load_0_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %
; BOTH-LABEL: {{^}}local_f64_load:
; BOTH-NOT: ADD
-; BOTH: ds_read_b64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}} offset:56 [M0]
+; BOTH: ds_read_b64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}} offset:56
; BOTH: buffer_store_dwordx2 [[REG]],
define void @local_f64_load(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
%gep = getelementptr double addrspace(3)* %in, i32 7
@@ -78,7 +79,7 @@ define void @local_f64_load(double addrspace(1)* %out, double addrspace(3)* %in)
}
; BOTH-LABEL: {{^}}local_f64_load_0_offset
-; BOTH: ds_read_b64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}} [M0]
+; BOTH: ds_read_b64 [[REG:v\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}
; BOTH: buffer_store_dwordx2 [[REG]],
define void @local_f64_load_0_offset(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
%val = load double addrspace(3)* %in, align 8
@@ -88,7 +89,7 @@ define void @local_f64_load_0_offset(double addrspace(1)* %out, double addrspace
; BOTH-LABEL: {{^}}local_i64_store:
; BOTH-NOT: ADD
-; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:56 [M0]
+; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:56
define void @local_i64_store(i64 addrspace(3)* %out) nounwind {
%gep = getelementptr i64 addrspace(3)* %out, i32 7
store i64 5678, i64 addrspace(3)* %gep, align 8
@@ -97,7 +98,7 @@ define void @local_i64_store(i64 addrspace(3)* %out) nounwind {
; BOTH-LABEL: {{^}}local_i64_store_0_offset:
; BOTH-NOT: ADD
-; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} [M0]
+; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}
define void @local_i64_store_0_offset(i64 addrspace(3)* %out) nounwind {
store i64 1234, i64 addrspace(3)* %out, align 8
ret void
@@ -105,7 +106,7 @@ define void @local_i64_store_0_offset(i64 addrspace(3)* %out) nounwind {
; BOTH-LABEL: {{^}}local_f64_store:
; BOTH-NOT: ADD
-; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:56 [M0]
+; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:56
define void @local_f64_store(double addrspace(3)* %out) nounwind {
%gep = getelementptr double addrspace(3)* %out, i32 7
store double 16.0, double addrspace(3)* %gep, align 8
@@ -113,7 +114,7 @@ define void @local_f64_store(double addrspace(3)* %out) nounwind {
}
; BOTH-LABEL: {{^}}local_f64_store_0_offset
-; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} [M0]
+; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}
define void @local_f64_store_0_offset(double addrspace(3)* %out) nounwind {
store double 20.0, double addrspace(3)* %out, align 8
ret void
@@ -121,8 +122,8 @@ define void @local_f64_store_0_offset(double addrspace(3)* %out) nounwind {
; BOTH-LABEL: {{^}}local_v2i64_store:
; BOTH-NOT: ADD
-; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:112 [M0]
-; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:120 [M0]
+; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:112
+; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:120
; BOTH: s_endpgm
define void @local_v2i64_store(<2 x i64> addrspace(3)* %out) nounwind {
%gep = getelementptr <2 x i64> addrspace(3)* %out, i32 7
@@ -132,8 +133,8 @@ define void @local_v2i64_store(<2 x i64> addrspace(3)* %out) nounwind {
; BOTH-LABEL: {{^}}local_v2i64_store_0_offset:
; BOTH-NOT: ADD
-; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} [M0]
-; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:8 [M0]
+; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}
+; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:8
; BOTH: s_endpgm
define void @local_v2i64_store_0_offset(<2 x i64> addrspace(3)* %out) nounwind {
store <2 x i64> <i64 1234, i64 1234>, <2 x i64> addrspace(3)* %out, align 16
@@ -142,10 +143,10 @@ define void @local_v2i64_store_0_offset(<2 x i64> addrspace(3)* %out) nounwind {
; BOTH-LABEL: {{^}}local_v4i64_store:
; BOTH-NOT: ADD
-; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:224 [M0]
-; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:232 [M0]
-; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:240 [M0]
-; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:248 [M0]
+; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:224
+; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:232
+; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:240
+; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:248
; BOTH: s_endpgm
define void @local_v4i64_store(<4 x i64> addrspace(3)* %out) nounwind {
%gep = getelementptr <4 x i64> addrspace(3)* %out, i32 7
@@ -155,10 +156,10 @@ define void @local_v4i64_store(<4 x i64> addrspace(3)* %out) nounwind {
; BOTH-LABEL: {{^}}local_v4i64_store_0_offset:
; BOTH-NOT: ADD
-; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} [M0]
-; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:8 [M0]
-; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:16 [M0]
-; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:24 [M0]
+; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}
+; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:8
+; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:16
+; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:24
; BOTH: s_endpgm
define void @local_v4i64_store_0_offset(<4 x i64> addrspace(3)* %out) nounwind {
store <4 x i64> <i64 1234, i64 1234, i64 1234, i64 1234>, <4 x i64> addrspace(3)* %out, align 16
diff --git a/test/CodeGen/R600/local-atomics.ll b/test/CodeGen/R600/local-atomics.ll
index 2ac811f..29921b6 100644
--- a/test/CodeGen/R600/local-atomics.ll
+++ b/test/CodeGen/R600/local-atomics.ll
@@ -1,15 +1,16 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CIVI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=CIVI -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}lds_atomic_xchg_ret_i32:
; EG: LDS_WRXCHG_RET *
-; SI: s_load_dword [[SPTR:s[0-9]+]],
-; SI: v_mov_b32_e32 [[DATA:v[0-9]+]], 4
-; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
-; SI: ds_wrxchg_rtn_b32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]] [M0]
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 [[DATA:v[0-9]+]], 4
+; GCN: s_load_dword [[SPTR:s[0-9]+]],
+; GCN: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; GCN: ds_wrxchg_rtn_b32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]]
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
define void @lds_atomic_xchg_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xchg i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -18,8 +19,8 @@ define void @lds_atomic_xchg_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %
; FUNC-LABEL: {{^}}lds_atomic_xchg_ret_i32_offset:
; EG: LDS_WRXCHG_RET *
-; SI: ds_wrxchg_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_wrxchg_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_xchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -30,12 +31,12 @@ define void @lds_atomic_xchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspac
; XXX - Is it really necessary to load 4 into VGPR?
; FUNC-LABEL: {{^}}lds_atomic_add_ret_i32:
; EG: LDS_ADD_RET *
-; SI: s_load_dword [[SPTR:s[0-9]+]],
-; SI: v_mov_b32_e32 [[DATA:v[0-9]+]], 4
-; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
-; SI: ds_add_rtn_u32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]] [M0]
-; SI: buffer_store_dword [[RESULT]],
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 [[DATA:v[0-9]+]], 4
+; GCN: s_load_dword [[SPTR:s[0-9]+]],
+; GCN: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; GCN: ds_add_rtn_u32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]]
+; GCN: buffer_store_dword [[RESULT]],
+; GCN: s_endpgm
define void @lds_atomic_add_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -44,8 +45,8 @@ define void @lds_atomic_add_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; FUNC-LABEL: {{^}}lds_atomic_add_ret_i32_offset:
; EG: LDS_ADD_RET *
-; SI: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_add_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -55,9 +56,9 @@ define void @lds_atomic_add_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; FUNC-LABEL: {{^}}lds_atomic_add_ret_i32_bad_si_offset:
; EG: LDS_ADD_RET *
-; SI: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} [M0]
-; CI: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; SI: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; CIVI: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_add_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
@@ -69,10 +70,9 @@ define void @lds_atomic_add_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 ad
; FUNC-LABEL: {{^}}lds_atomic_inc_ret_i32:
; EG: LDS_ADD_RET *
-; SI: s_mov_b32 [[SNEGONE:s[0-9]+]], -1
-; SI: v_mov_b32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
-; SI: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]] [M0]
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 [[NEGONE:v[0-9]+]], -1
+; GCN: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]]
+; GCN: s_endpgm
define void @lds_atomic_inc_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i32 addrspace(3)* %ptr, i32 1 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -81,10 +81,9 @@ define void @lds_atomic_inc_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; FUNC-LABEL: {{^}}lds_atomic_inc_ret_i32_offset:
; EG: LDS_ADD_RET *
-; SI: s_mov_b32 [[SNEGONE:s[0-9]+]], -1
-; SI: v_mov_b32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
-; SI: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]] offset:16
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 [[NEGONE:v[0-9]+]], -1
+; GCN: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]] offset:16
+; GCN: s_endpgm
define void @lds_atomic_inc_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
@@ -94,9 +93,9 @@ define void @lds_atomic_inc_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; FUNC-LABEL: {{^}}lds_atomic_inc_ret_i32_bad_si_offset:
; EG: LDS_ADD_RET *
-; SI: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} [M0]
-; CI: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; SI: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; CIVI: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_inc_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
@@ -108,8 +107,8 @@ define void @lds_atomic_inc_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 ad
; FUNC-LABEL: {{^}}lds_atomic_sub_ret_i32:
; EG: LDS_SUB_RET *
-; SI: ds_sub_rtn_u32
-; SI: s_endpgm
+; GCN: ds_sub_rtn_u32
+; GCN: s_endpgm
define void @lds_atomic_sub_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -118,8 +117,8 @@ define void @lds_atomic_sub_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; FUNC-LABEL: {{^}}lds_atomic_sub_ret_i32_offset:
; EG: LDS_SUB_RET *
-; SI: ds_sub_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_sub_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_sub_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -129,10 +128,9 @@ define void @lds_atomic_sub_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; FUNC-LABEL: {{^}}lds_atomic_dec_ret_i32:
; EG: LDS_SUB_RET *
-; SI: s_mov_b32 [[SNEGONE:s[0-9]+]], -1
-; SI: v_mov_b32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
-; SI: ds_dec_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]] [M0]
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 [[NEGONE:v[0-9]+]], -1
+; GCN: ds_dec_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]]
+; GCN: s_endpgm
define void @lds_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i32 addrspace(3)* %ptr, i32 1 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -141,10 +139,9 @@ define void @lds_atomic_dec_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; FUNC-LABEL: {{^}}lds_atomic_dec_ret_i32_offset:
; EG: LDS_SUB_RET *
-; SI: s_mov_b32 [[SNEGONE:s[0-9]+]], -1
-; SI: v_mov_b32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
-; SI: ds_dec_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]] offset:16
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 [[NEGONE:v[0-9]+]], -1
+; GCN: ds_dec_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]] offset:16
+; GCN: s_endpgm
define void @lds_atomic_dec_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i32 addrspace(3)* %gep, i32 1 seq_cst
@@ -154,8 +151,8 @@ define void @lds_atomic_dec_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; FUNC-LABEL: {{^}}lds_atomic_and_ret_i32:
; EG: LDS_AND_RET *
-; SI: ds_and_rtn_b32
-; SI: s_endpgm
+; GCN: ds_and_rtn_b32
+; GCN: s_endpgm
define void @lds_atomic_and_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw and i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -164,8 +161,8 @@ define void @lds_atomic_and_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; FUNC-LABEL: {{^}}lds_atomic_and_ret_i32_offset:
; EG: LDS_AND_RET *
-; SI: ds_and_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_and_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_and_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw and i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -175,8 +172,8 @@ define void @lds_atomic_and_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; FUNC-LABEL: {{^}}lds_atomic_or_ret_i32:
; EG: LDS_OR_RET *
-; SI: ds_or_rtn_b32
-; SI: s_endpgm
+; GCN: ds_or_rtn_b32
+; GCN: s_endpgm
define void @lds_atomic_or_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw or i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -185,8 +182,8 @@ define void @lds_atomic_or_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %pt
; FUNC-LABEL: {{^}}lds_atomic_or_ret_i32_offset:
; EG: LDS_OR_RET *
-; SI: ds_or_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_or_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_or_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw or i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -196,8 +193,8 @@ define void @lds_atomic_or_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(
; FUNC-LABEL: {{^}}lds_atomic_xor_ret_i32:
; EG: LDS_XOR_RET *
-; SI: ds_xor_rtn_b32
-; SI: s_endpgm
+; GCN: ds_xor_rtn_b32
+; GCN: s_endpgm
define void @lds_atomic_xor_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xor i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -206,8 +203,8 @@ define void @lds_atomic_xor_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; FUNC-LABEL: {{^}}lds_atomic_xor_ret_i32_offset:
; EG: LDS_XOR_RET *
-; SI: ds_xor_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_xor_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_xor_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw xor i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -225,8 +222,8 @@ define void @lds_atomic_xor_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; FUNC-LABEL: {{^}}lds_atomic_min_ret_i32:
; EG: LDS_MIN_INT_RET *
-; SI: ds_min_rtn_i32
-; SI: s_endpgm
+; GCN: ds_min_rtn_i32
+; GCN: s_endpgm
define void @lds_atomic_min_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw min i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -235,8 +232,8 @@ define void @lds_atomic_min_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; FUNC-LABEL: {{^}}lds_atomic_min_ret_i32_offset:
; EG: LDS_MIN_INT_RET *
-; SI: ds_min_rtn_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_min_rtn_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_min_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw min i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -246,8 +243,8 @@ define void @lds_atomic_min_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; FUNC-LABEL: {{^}}lds_atomic_max_ret_i32:
; EG: LDS_MAX_INT_RET *
-; SI: ds_max_rtn_i32
-; SI: s_endpgm
+; GCN: ds_max_rtn_i32
+; GCN: s_endpgm
define void @lds_atomic_max_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw max i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -256,8 +253,8 @@ define void @lds_atomic_max_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %p
; FUNC-LABEL: {{^}}lds_atomic_max_ret_i32_offset:
; EG: LDS_MAX_INT_RET *
-; SI: ds_max_rtn_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_max_rtn_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_max_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw max i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -267,8 +264,8 @@ define void @lds_atomic_max_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace
; FUNC-LABEL: {{^}}lds_atomic_umin_ret_i32:
; EG: LDS_MIN_UINT_RET *
-; SI: ds_min_rtn_u32
-; SI: s_endpgm
+; GCN: ds_min_rtn_u32
+; GCN: s_endpgm
define void @lds_atomic_umin_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umin i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -277,8 +274,8 @@ define void @lds_atomic_umin_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %
; FUNC-LABEL: {{^}}lds_atomic_umin_ret_i32_offset:
; EG: LDS_MIN_UINT_RET *
-; SI: ds_min_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_min_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_umin_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw umin i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -288,8 +285,8 @@ define void @lds_atomic_umin_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspac
; FUNC-LABEL: {{^}}lds_atomic_umax_ret_i32:
; EG: LDS_MAX_UINT_RET *
-; SI: ds_max_rtn_u32
-; SI: s_endpgm
+; GCN: ds_max_rtn_u32
+; GCN: s_endpgm
define void @lds_atomic_umax_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umax i32 addrspace(3)* %ptr, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
@@ -298,8 +295,8 @@ define void @lds_atomic_umax_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %
; FUNC-LABEL: {{^}}lds_atomic_umax_ret_i32_offset:
; EG: LDS_MAX_UINT_RET *
-; SI: ds_max_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_max_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_umax_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw umax i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -308,19 +305,19 @@ define void @lds_atomic_umax_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspac
}
; FUNC-LABEL: {{^}}lds_atomic_xchg_noret_i32:
-; SI: s_load_dword [[SPTR:s[0-9]+]],
-; SI: v_mov_b32_e32 [[DATA:v[0-9]+]], 4
-; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
-; SI: ds_wrxchg_rtn_b32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]] [M0]
-; SI: s_endpgm
+; GCN: s_load_dword [[SPTR:s[0-9]+]],
+; GCN: v_mov_b32_e32 [[DATA:v[0-9]+]], 4
+; GCN: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; GCN: ds_wrxchg_rtn_b32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]]
+; GCN: s_endpgm
define void @lds_atomic_xchg_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xchg i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_xchg_noret_i32_offset:
-; SI: ds_wrxchg_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_wrxchg_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_xchg_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -329,19 +326,19 @@ define void @lds_atomic_xchg_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; XXX - Is it really necessary to load 4 into VGPR?
; FUNC-LABEL: {{^}}lds_atomic_add_noret_i32:
-; SI: s_load_dword [[SPTR:s[0-9]+]],
-; SI: v_mov_b32_e32 [[DATA:v[0-9]+]], 4
-; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
-; SI: ds_add_u32 [[VPTR]], [[DATA]] [M0]
-; SI: s_endpgm
+; GCN: s_load_dword [[SPTR:s[0-9]+]],
+; GCN: v_mov_b32_e32 [[DATA:v[0-9]+]], 4
+; GCN: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
+; GCN: ds_add_u32 [[VPTR]], [[DATA]]
+; GCN: s_endpgm
define void @lds_atomic_add_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_add_noret_i32_offset:
-; SI: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_add_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -349,9 +346,9 @@ define void @lds_atomic_add_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_add_noret_i32_bad_si_offset
-; SI: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}} [M0]
-; CI: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16 [M0]
-; SI: s_endpgm
+; SI: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}}
+; CIVI: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_add_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
@@ -361,20 +358,18 @@ define void @lds_atomic_add_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32
}
; FUNC-LABEL: {{^}}lds_atomic_inc_noret_i32:
-; SI: s_mov_b32 [[SNEGONE:s[0-9]+]], -1
-; SI: v_mov_b32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
-; SI: ds_inc_u32 v{{[0-9]+}}, [[NEGONE]] [M0]
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 [[NEGONE:v[0-9]+]], -1
+; GCN: ds_inc_u32 v{{[0-9]+}}, [[NEGONE]]
+; GCN: s_endpgm
define void @lds_atomic_inc_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i32 addrspace(3)* %ptr, i32 1 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_inc_noret_i32_offset:
-; SI: s_mov_b32 [[SNEGONE:s[0-9]+]], -1
-; SI: v_mov_b32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
-; SI: ds_inc_u32 v{{[0-9]+}}, [[NEGONE]] offset:16
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 [[NEGONE:v[0-9]+]], -1
+; GCN: ds_inc_u32 v{{[0-9]+}}, [[NEGONE]] offset:16
+; GCN: s_endpgm
define void @lds_atomic_inc_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
@@ -383,8 +378,8 @@ define void @lds_atomic_inc_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_inc_noret_i32_bad_si_offset:
; SI: ds_inc_u32 v{{[0-9]+}}, v{{[0-9]+}}
-; CI: ds_inc_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; CIVI: ds_inc_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_inc_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
@@ -394,16 +389,16 @@ define void @lds_atomic_inc_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32
}
; FUNC-LABEL: {{^}}lds_atomic_sub_noret_i32:
-; SI: ds_sub_u32
-; SI: s_endpgm
+; GCN: ds_sub_u32
+; GCN: s_endpgm
define void @lds_atomic_sub_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_sub_noret_i32_offset:
-; SI: ds_sub_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_sub_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_sub_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -411,20 +406,18 @@ define void @lds_atomic_sub_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_dec_noret_i32:
-; SI: s_mov_b32 [[SNEGONE:s[0-9]+]], -1
-; SI: v_mov_b32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
-; SI: ds_dec_u32 v{{[0-9]+}}, [[NEGONE]]
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 [[NEGONE:v[0-9]+]], -1
+; GCN: ds_dec_u32 v{{[0-9]+}}, [[NEGONE]]
+; GCN: s_endpgm
define void @lds_atomic_dec_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i32 addrspace(3)* %ptr, i32 1 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_dec_noret_i32_offset:
-; SI: s_mov_b32 [[SNEGONE:s[0-9]+]], -1
-; SI: v_mov_b32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]]
-; SI: ds_dec_u32 v{{[0-9]+}}, [[NEGONE]] offset:16
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 [[NEGONE:v[0-9]+]], -1
+; GCN: ds_dec_u32 v{{[0-9]+}}, [[NEGONE]] offset:16
+; GCN: s_endpgm
define void @lds_atomic_dec_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i32 addrspace(3)* %gep, i32 1 seq_cst
@@ -432,16 +425,16 @@ define void @lds_atomic_dec_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_and_noret_i32:
-; SI: ds_and_b32
-; SI: s_endpgm
+; GCN: ds_and_b32
+; GCN: s_endpgm
define void @lds_atomic_and_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw and i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_and_noret_i32_offset:
-; SI: ds_and_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_and_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_and_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw and i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -449,16 +442,16 @@ define void @lds_atomic_and_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_or_noret_i32:
-; SI: ds_or_b32
-; SI: s_endpgm
+; GCN: ds_or_b32
+; GCN: s_endpgm
define void @lds_atomic_or_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw or i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_or_noret_i32_offset:
-; SI: ds_or_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_or_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_or_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw or i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -466,16 +459,16 @@ define void @lds_atomic_or_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_xor_noret_i32:
-; SI: ds_xor_b32
-; SI: s_endpgm
+; GCN: ds_xor_b32
+; GCN: s_endpgm
define void @lds_atomic_xor_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xor i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_xor_noret_i32_offset:
-; SI: ds_xor_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_xor_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_xor_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw xor i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -490,16 +483,16 @@ define void @lds_atomic_xor_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
; }
; FUNC-LABEL: {{^}}lds_atomic_min_noret_i32:
-; SI: ds_min_i32
-; SI: s_endpgm
+; GCN: ds_min_i32
+; GCN: s_endpgm
define void @lds_atomic_min_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw min i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_min_noret_i32_offset:
-; SI: ds_min_i32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_min_i32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_min_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw min i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -507,16 +500,16 @@ define void @lds_atomic_min_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_max_noret_i32:
-; SI: ds_max_i32
-; SI: s_endpgm
+; GCN: ds_max_i32
+; GCN: s_endpgm
define void @lds_atomic_max_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw max i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_max_noret_i32_offset:
-; SI: ds_max_i32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_max_i32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_max_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw max i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -524,16 +517,16 @@ define void @lds_atomic_max_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_umin_noret_i32:
-; SI: ds_min_u32
-; SI: s_endpgm
+; GCN: ds_min_u32
+; GCN: s_endpgm
define void @lds_atomic_umin_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umin i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_umin_noret_i32_offset:
-; SI: ds_min_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_min_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_umin_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw umin i32 addrspace(3)* %gep, i32 4 seq_cst
@@ -541,16 +534,16 @@ define void @lds_atomic_umin_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_umax_noret_i32:
-; SI: ds_max_u32
-; SI: s_endpgm
+; GCN: ds_max_u32
+; GCN: s_endpgm
define void @lds_atomic_umax_noret_i32(i32 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umax i32 addrspace(3)* %ptr, i32 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_umax_noret_i32_offset:
-; SI: ds_max_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
-; SI: s_endpgm
+; GCN: ds_max_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
+; GCN: s_endpgm
define void @lds_atomic_umax_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw umax i32 addrspace(3)* %gep, i32 4 seq_cst
diff --git a/test/CodeGen/R600/local-atomics64.ll b/test/CodeGen/R600/local-atomics64.ll
index ce0cf59..50d039f 100644
--- a/test/CodeGen/R600/local-atomics64.ll
+++ b/test/CodeGen/R600/local-atomics64.ll
@@ -1,8 +1,9 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=SI -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=VI -check-prefix=GCN %s
; FUNC-LABEL: {{^}}lds_atomic_xchg_ret_i64:
-; SI: ds_wrxchg_rtn_b64
-; SI: s_endpgm
+; GCN: ds_wrxchg_rtn_b64
+; GCN: s_endpgm
define void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -10,8 +11,8 @@ define void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %
}
; FUNC-LABEL: {{^}}lds_atomic_xchg_ret_i64_offset:
-; SI: ds_wrxchg_rtn_b64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_wrxchg_rtn_b64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -20,8 +21,8 @@ define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspac
}
; FUNC-LABEL: {{^}}lds_atomic_add_ret_i64:
-; SI: ds_add_rtn_u64
-; SI: s_endpgm
+; GCN: ds_add_rtn_u64
+; GCN: s_endpgm
define void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -29,14 +30,14 @@ define void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
}
; FUNC-LABEL: {{^}}lds_atomic_add_ret_i64_offset:
+; GCN: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], 9
+; GCN: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], 0
; SI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
-; SI: s_mov_b64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, 9
-; SI-DAG: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
-; SI-DAG: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
-; SI-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
-; SI: ds_add_rtn_u64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}} offset:32 [M0]
-; SI: buffer_store_dwordx2 [[RESULT]],
-; SI: s_endpgm
+; VI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; GCN-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; GCN: ds_add_rtn_u64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}} offset:32
+; GCN: buffer_store_dwordx2 [[RESULT]],
+; GCN: s_endpgm
define void @lds_atomic_add_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i64 4
%result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
@@ -45,12 +46,11 @@ define void @lds_atomic_add_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
}
; FUNC-LABEL: {{^}}lds_atomic_inc_ret_i64:
-; SI: s_mov_b64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
-; SI-DAG: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
-; SI-DAG: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
-; SI: ds_inc_rtn_u64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}
-; SI: buffer_store_dwordx2 [[RESULT]],
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], -1
+; GCN: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], -1
+; GCN: ds_inc_rtn_u64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}
+; GCN: buffer_store_dwordx2 [[RESULT]],
+; GCN: s_endpgm
define void @lds_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -58,8 +58,8 @@ define void @lds_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
}
; FUNC-LABEL: {{^}}lds_atomic_inc_ret_i64_offset:
-; SI: ds_inc_rtn_u64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_inc_rtn_u64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
@@ -68,8 +68,8 @@ define void @lds_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
}
; FUNC-LABEL: {{^}}lds_atomic_sub_ret_i64:
-; SI: ds_sub_rtn_u64
-; SI: s_endpgm
+; GCN: ds_sub_rtn_u64
+; GCN: s_endpgm
define void @lds_atomic_sub_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -77,8 +77,8 @@ define void @lds_atomic_sub_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
}
; FUNC-LABEL: {{^}}lds_atomic_sub_ret_i64_offset:
-; SI: ds_sub_rtn_u64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_sub_rtn_u64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -87,12 +87,11 @@ define void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
}
; FUNC-LABEL: {{^}}lds_atomic_dec_ret_i64:
-; SI: s_mov_b64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
-; SI-DAG: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
-; SI-DAG: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
-; SI: ds_dec_rtn_u64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}
-; SI: buffer_store_dwordx2 [[RESULT]],
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], -1
+; GCN: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], -1
+; GCN: ds_dec_rtn_u64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}
+; GCN: buffer_store_dwordx2 [[RESULT]],
+; GCN: s_endpgm
define void @lds_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -100,8 +99,8 @@ define void @lds_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
}
; FUNC-LABEL: {{^}}lds_atomic_dec_ret_i64_offset:
-; SI: ds_dec_rtn_u64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_dec_rtn_u64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
@@ -110,8 +109,8 @@ define void @lds_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
}
; FUNC-LABEL: {{^}}lds_atomic_and_ret_i64:
-; SI: ds_and_rtn_b64
-; SI: s_endpgm
+; GCN: ds_and_rtn_b64
+; GCN: s_endpgm
define void @lds_atomic_and_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -119,8 +118,8 @@ define void @lds_atomic_and_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
}
; FUNC-LABEL: {{^}}lds_atomic_and_ret_i64_offset:
-; SI: ds_and_rtn_b64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_and_rtn_b64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -129,8 +128,8 @@ define void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
}
; FUNC-LABEL: {{^}}lds_atomic_or_ret_i64:
-; SI: ds_or_rtn_b64
-; SI: s_endpgm
+; GCN: ds_or_rtn_b64
+; GCN: s_endpgm
define void @lds_atomic_or_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -138,8 +137,8 @@ define void @lds_atomic_or_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %pt
}
; FUNC-LABEL: {{^}}lds_atomic_or_ret_i64_offset:
-; SI: ds_or_rtn_b64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_or_rtn_b64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -148,8 +147,8 @@ define void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(
}
; FUNC-LABEL: {{^}}lds_atomic_xor_ret_i64:
-; SI: ds_xor_rtn_b64
-; SI: s_endpgm
+; GCN: ds_xor_rtn_b64
+; GCN: s_endpgm
define void @lds_atomic_xor_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -157,8 +156,8 @@ define void @lds_atomic_xor_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
}
; FUNC-LABEL: {{^}}lds_atomic_xor_ret_i64_offset:
-; SI: ds_xor_rtn_b64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_xor_rtn_b64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -175,8 +174,8 @@ define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
; }
; FUNC-LABEL: {{^}}lds_atomic_min_ret_i64:
-; SI: ds_min_rtn_i64
-; SI: s_endpgm
+; GCN: ds_min_rtn_i64
+; GCN: s_endpgm
define void @lds_atomic_min_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -184,8 +183,8 @@ define void @lds_atomic_min_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
}
; FUNC-LABEL: {{^}}lds_atomic_min_ret_i64_offset:
-; SI: ds_min_rtn_i64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_min_rtn_i64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -194,8 +193,8 @@ define void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
}
; FUNC-LABEL: {{^}}lds_atomic_max_ret_i64:
-; SI: ds_max_rtn_i64
-; SI: s_endpgm
+; GCN: ds_max_rtn_i64
+; GCN: s_endpgm
define void @lds_atomic_max_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -203,8 +202,8 @@ define void @lds_atomic_max_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %p
}
; FUNC-LABEL: {{^}}lds_atomic_max_ret_i64_offset:
-; SI: ds_max_rtn_i64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_max_rtn_i64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -213,8 +212,8 @@ define void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace
}
; FUNC-LABEL: {{^}}lds_atomic_umin_ret_i64:
-; SI: ds_min_rtn_u64
-; SI: s_endpgm
+; GCN: ds_min_rtn_u64
+; GCN: s_endpgm
define void @lds_atomic_umin_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -222,8 +221,8 @@ define void @lds_atomic_umin_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %
}
; FUNC-LABEL: {{^}}lds_atomic_umin_ret_i64_offset:
-; SI: ds_min_rtn_u64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_min_rtn_u64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -232,8 +231,8 @@ define void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspac
}
; FUNC-LABEL: {{^}}lds_atomic_umax_ret_i64:
-; SI: ds_max_rtn_u64
-; SI: s_endpgm
+; GCN: ds_max_rtn_u64
+; GCN: s_endpgm
define void @lds_atomic_umax_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
@@ -241,8 +240,8 @@ define void @lds_atomic_umax_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %
}
; FUNC-LABEL: {{^}}lds_atomic_umax_ret_i64_offset:
-; SI: ds_max_rtn_u64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_max_rtn_u64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -251,16 +250,16 @@ define void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspac
}
; FUNC-LABEL: {{^}}lds_atomic_xchg_noret_i64:
-; SI: ds_wrxchg_rtn_b64
-; SI: s_endpgm
+; GCN: ds_wrxchg_rtn_b64
+; GCN: s_endpgm
define void @lds_atomic_xchg_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_xchg_noret_i64_offset:
-; SI: ds_wrxchg_rtn_b64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_wrxchg_rtn_b64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_xchg_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -268,8 +267,8 @@ define void @lds_atomic_xchg_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_add_noret_i64:
-; SI: ds_add_u64
-; SI: s_endpgm
+; GCN: ds_add_u64
+; GCN: s_endpgm
define void @lds_atomic_add_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
@@ -277,12 +276,12 @@ define void @lds_atomic_add_noret_i64(i64 addrspace(3)* %ptr) nounwind {
; FUNC-LABEL: {{^}}lds_atomic_add_noret_i64_offset:
; SI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
-; SI: s_mov_b64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, 9
-; SI-DAG: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
-; SI-DAG: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
-; SI-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
-; SI: ds_add_u64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}} offset:32 [M0]
-; SI: s_endpgm
+; VI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x24
+; GCN: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], 9
+; GCN: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], 0
+; GCN: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
+; GCN: ds_add_u64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_add_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i64 4
%result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
@@ -290,19 +289,18 @@ define void @lds_atomic_add_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_inc_noret_i64:
-; SI: s_mov_b64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
-; SI-DAG: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
-; SI-DAG: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
-; SI: ds_inc_u64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], -1
+; GCN: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], -1
+; GCN: ds_inc_u64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}
+; GCN: s_endpgm
define void @lds_atomic_inc_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_inc_noret_i64_offset:
-; SI: ds_inc_u64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_inc_u64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_inc_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
@@ -310,16 +308,16 @@ define void @lds_atomic_inc_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_sub_noret_i64:
-; SI: ds_sub_u64
-; SI: s_endpgm
+; GCN: ds_sub_u64
+; GCN: s_endpgm
define void @lds_atomic_sub_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_sub_noret_i64_offset:
-; SI: ds_sub_u64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_sub_u64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_sub_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -327,19 +325,18 @@ define void @lds_atomic_sub_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_dec_noret_i64:
-; SI: s_mov_b64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
-; SI-DAG: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
-; SI-DAG: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
-; SI: ds_dec_u64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}
-; SI: s_endpgm
+; GCN: v_mov_b32_e32 v[[LOVDATA:[0-9]+]], -1
+; GCN: v_mov_b32_e32 v[[HIVDATA:[0-9]+]], -1
+; GCN: ds_dec_u64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}
+; GCN: s_endpgm
define void @lds_atomic_dec_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_dec_noret_i64_offset:
-; SI: ds_dec_u64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_dec_u64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_dec_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
@@ -347,16 +344,16 @@ define void @lds_atomic_dec_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_and_noret_i64:
-; SI: ds_and_b64
-; SI: s_endpgm
+; GCN: ds_and_b64
+; GCN: s_endpgm
define void @lds_atomic_and_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_and_noret_i64_offset:
-; SI: ds_and_b64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_and_b64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_and_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -364,16 +361,16 @@ define void @lds_atomic_and_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_or_noret_i64:
-; SI: ds_or_b64
-; SI: s_endpgm
+; GCN: ds_or_b64
+; GCN: s_endpgm
define void @lds_atomic_or_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_or_noret_i64_offset:
-; SI: ds_or_b64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_or_b64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_or_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -381,16 +378,16 @@ define void @lds_atomic_or_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_xor_noret_i64:
-; SI: ds_xor_b64
-; SI: s_endpgm
+; GCN: ds_xor_b64
+; GCN: s_endpgm
define void @lds_atomic_xor_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_xor_noret_i64_offset:
-; SI: ds_xor_b64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_xor_b64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -405,16 +402,16 @@ define void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
; }
; FUNC-LABEL: {{^}}lds_atomic_min_noret_i64:
-; SI: ds_min_i64
-; SI: s_endpgm
+; GCN: ds_min_i64
+; GCN: s_endpgm
define void @lds_atomic_min_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_min_noret_i64_offset:
-; SI: ds_min_i64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_min_i64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_min_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -422,16 +419,16 @@ define void @lds_atomic_min_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_max_noret_i64:
-; SI: ds_max_i64
-; SI: s_endpgm
+; GCN: ds_max_i64
+; GCN: s_endpgm
define void @lds_atomic_max_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_max_noret_i64_offset:
-; SI: ds_max_i64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_max_i64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_max_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -439,16 +436,16 @@ define void @lds_atomic_max_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_umin_noret_i64:
-; SI: ds_min_u64
-; SI: s_endpgm
+; GCN: ds_min_u64
+; GCN: s_endpgm
define void @lds_atomic_umin_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_umin_noret_i64_offset:
-; SI: ds_min_u64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_min_u64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_umin_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
@@ -456,16 +453,16 @@ define void @lds_atomic_umin_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
}
; FUNC-LABEL: {{^}}lds_atomic_umax_noret_i64:
-; SI: ds_max_u64
-; SI: s_endpgm
+; GCN: ds_max_u64
+; GCN: s_endpgm
define void @lds_atomic_umax_noret_i64(i64 addrspace(3)* %ptr) nounwind {
%result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst
ret void
}
; FUNC-LABEL: {{^}}lds_atomic_umax_noret_i64_offset:
-; SI: ds_max_u64 {{.*}} offset:32
-; SI: s_endpgm
+; GCN: ds_max_u64 {{.*}} offset:32
+; GCN: s_endpgm
define void @lds_atomic_umax_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
%gep = getelementptr i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst
diff --git a/test/CodeGen/R600/local-memory-two-objects.ll b/test/CodeGen/R600/local-memory-two-objects.ll
index 88ef05d..3d90ab1 100644
--- a/test/CodeGen/R600/local-memory-two-objects.ll
+++ b/test/CodeGen/R600/local-memory-two-objects.ll
@@ -1,38 +1,38 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK --check-prefix=SI %s
-; RUN: llc < %s -march=r600 -mcpu=bonaire -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK --check-prefix=CI %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
+; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=SI %s
+; RUN: llc < %s -march=amdgcn -mcpu=bonaire -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=CI %s
@local_memory_two_objects.local_mem0 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4
@local_memory_two_objects.local_mem1 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4
-; EG-CHECK: {{^}}local_memory_two_objects:
+; EG: {{^}}local_memory_two_objects:
; Check that the LDS size emitted correctly
-; EG-CHECK: .long 166120
-; EG-CHECK-NEXT: .long 8
-; SI-CHECK: .long 47180
-; SI-CHECK-NEXT: .long 32768
+; EG: .long 166120
+; EG-NEXT: .long 8
+; GCN: .long 47180
+; GCN-NEXT: .long 38792
; We would like to check the the lds writes are using different
; addresses, but due to variations in the scheduler, we can't do
; this consistently on evergreen GPUs.
-; EG-CHECK: LDS_WRITE
-; EG-CHECK: LDS_WRITE
-; SI-CHECK: ds_write_b32 {{v[0-9]*}}, v[[ADDRW:[0-9]*]]
-; SI-CHECK-NOT: ds_write_b32 {{v[0-9]*}}, v[[ADDRW]]
+; EG: LDS_WRITE
+; EG: LDS_WRITE
+; GCN: ds_write_b32 {{v[0-9]*}}, v[[ADDRW:[0-9]*]]
+; GCN-NOT: ds_write_b32 {{v[0-9]*}}, v[[ADDRW]]
; GROUP_BARRIER must be the last instruction in a clause
-; EG-CHECK: GROUP_BARRIER
-; EG-CHECK-NEXT: ALU clause
+; EG: GROUP_BARRIER
+; EG-NEXT: ALU clause
; Make sure the lds reads are using different addresses, at different
; constant offsets.
-; EG-CHECK: LDS_READ_RET {{[*]*}} OQAP, {{PV|T}}[[ADDRR:[0-9]*\.[XYZW]]]
-; EG-CHECK-NOT: LDS_READ_RET {{[*]*}} OQAP, T[[ADDRR]]
+; EG: LDS_READ_RET {{[*]*}} OQAP, {{PV|T}}[[ADDRR:[0-9]*\.[XYZW]]]
+; EG-NOT: LDS_READ_RET {{[*]*}} OQAP, T[[ADDRR]]
; SI: v_add_i32_e32 [[SIPTR:v[0-9]+]], 16, v{{[0-9]+}}
-; SI: ds_read_b32 {{v[0-9]+}}, [[SIPTR]] [M0]
-; CI: ds_read_b32 {{v[0-9]+}}, [[ADDRR:v[0-9]+]] offset:16 [M0]
-; CI: ds_read_b32 {{v[0-9]+}}, [[ADDRR]] [M0]
+; SI: ds_read_b32 {{v[0-9]+}}, [[SIPTR]]
+; CI: ds_read_b32 {{v[0-9]+}}, [[ADDRR:v[0-9]+]]
+; CI: ds_read_b32 {{v[0-9]+}}, [[ADDRR]] offset:16
define void @local_memory_two_objects(i32 addrspace(1)* %out) {
entry:
diff --git a/test/CodeGen/R600/local-memory.ll b/test/CodeGen/R600/local-memory.ll
index 9b13cb2..68e72c5 100644
--- a/test/CodeGen/R600/local-memory.ll
+++ b/test/CodeGen/R600/local-memory.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=FUNC %s
@local_memory.local_mem = internal unnamed_addr addrspace(3) global [128 x i32] undef, align 4
@@ -10,9 +10,9 @@
; EG: .long 166120
; EG-NEXT: .long 128
; SI: .long 47180
-; SI-NEXT: .long 65536
+; SI-NEXT: .long 71560
; CI: .long 47180
-; CI-NEXT: .long 32768
+; CI-NEXT: .long 38792
; EG: LDS_WRITE
; SI-NOT: s_wqm_b64
diff --git a/test/CodeGen/R600/loop-address.ll b/test/CodeGen/R600/loop-address.ll
index b46d8e9..03e0f01 100644
--- a/test/CodeGen/R600/loop-address.ll
+++ b/test/CodeGen/R600/loop-address.ll
@@ -31,7 +31,7 @@ attributes #0 = { nounwind "fp-contract-model"="standard" "relocation-model"="pi
!opencl.kernels = !{!0, !1, !2, !3}
-!0 = metadata !{void (i32 addrspace(1)*, i32)* @loop_ge}
-!1 = metadata !{null}
-!2 = metadata !{null}
-!3 = metadata !{null}
+!0 = !{void (i32 addrspace(1)*, i32)* @loop_ge}
+!1 = !{null}
+!2 = !{null}
+!3 = !{null}
diff --git a/test/CodeGen/R600/loop-idiom.ll b/test/CodeGen/R600/loop-idiom.ll
index 0478bdb..a0b00ab 100644
--- a/test/CodeGen/R600/loop-idiom.ll
+++ b/test/CodeGen/R600/loop-idiom.ll
@@ -1,5 +1,6 @@
; RUN: opt -basicaa -loop-idiom -S < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600 --check-prefix=FUNC %s
-; RUN: opt -basicaa -loop-idiom -S < %s -march=r600 -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: opt -basicaa -loop-idiom -S < %s -march=amdgcn -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: opt -basicaa -loop-idiom -S < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
target triple = "r600--"
diff --git a/test/CodeGen/R600/lshl.ll b/test/CodeGen/R600/lshl.ll
index 9785866..9ac988d 100644
--- a/test/CodeGen/R600/lshl.ll
+++ b/test/CodeGen/R600/lshl.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
;CHECK: s_lshl_b32 s{{[0-9]}}, s{{[0-9]}}, 1
diff --git a/test/CodeGen/R600/lshr.ll b/test/CodeGen/R600/lshr.ll
index acfc1fd..50e444a 100644
--- a/test/CodeGen/R600/lshr.ll
+++ b/test/CodeGen/R600/lshr.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
;CHECK: s_lshr_b32 s{{[0-9]}}, s{{[0-9]}}, 1
diff --git a/test/CodeGen/R600/m0-spill.ll b/test/CodeGen/R600/m0-spill.ll
index a8b0e0d..4dade82 100644
--- a/test/CodeGen/R600/m0-spill.ll
+++ b/test/CodeGen/R600/m0-spill.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
@lds = external addrspace(3) global [64 x float]
diff --git a/test/CodeGen/R600/mad-combine.ll b/test/CodeGen/R600/mad-combine.ll
new file mode 100644
index 0000000..8c4e09b
--- /dev/null
+++ b/test/CodeGen/R600/mad-combine.ll
@@ -0,0 +1,567 @@
+; Make sure we still form mad even when unsafe math or fp-contract is allowed instead of fma.
+
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=SI-STD -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -fp-contract=fast < %s | FileCheck -check-prefix=SI -check-prefix=SI-STD -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=SI -check-prefix=SI-STD -check-prefix=FUNC %s
+
+; Make sure we don't form mad with denormals
+; RUN: llc -march=amdgcn -mcpu=tahiti -mattr=+fp32-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=SI-DENORM -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -mattr=+fp32-denormals -fp-contract=fast -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=SI-DENORM-SLOWFMAF -check-prefix=FUNC %s
+
+declare i32 @llvm.r600.read.tidig.x() #0
+declare float @llvm.fabs.f32(float) #0
+declare float @llvm.fma.f32(float, float, float) #0
+declare float @llvm.fmuladd.f32(float, float, float) #0
+
+; (fadd (fmul x, y), z) -> (fma x, y, z)
+; FUNC-LABEL: {{^}}combine_to_mad_f32_0:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+
+; SI-STD: v_mad_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], [[C]]
+
+; SI-DENORM: v_fma_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], [[C]]
+
+; SI-DENORM-SLOWFMAF-NOT: v_fma
+; SI-DENORM-SLOWFMAF-NOT: v_mad
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
+; SI-DENORM-SLOWFMAF: v_add_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP]]
+
+; SI: buffer_store_dword [[RESULT]]
+define void @combine_to_mad_f32_0(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %gep.0
+ %b = load float addrspace(1)* %gep.1
+ %c = load float addrspace(1)* %gep.2
+
+ %mul = fmul float %a, %b
+ %fma = fadd float %mul, %c
+ store float %fma, float addrspace(1)* %gep.out
+ ret void
+}
+
+; (fadd (fmul x, y), z) -> (fma x, y, z)
+; FUNC-LABEL: {{^}}combine_to_mad_f32_0_2use:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dword [[D:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}}
+
+; SI-STD-DAG: v_mad_f32 [[RESULT0:v[0-9]+]], [[A]], [[B]], [[C]]
+; SI-STD-DAG: v_mad_f32 [[RESULT1:v[0-9]+]], [[A]], [[B]], [[D]]
+
+; SI-DENORM-DAG: v_fma_f32 [[RESULT0:v[0-9]+]], [[A]], [[B]], [[C]]
+; SI-DENORM-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], [[A]], [[B]], [[D]]
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
+; SI-DENORM-SLOWFMAF-DAG: v_add_f32_e32 [[RESULT0:v[0-9]+]], [[C]], [[TMP]]
+; SI-DENORM-SLOWFMAF-DAG: v_add_f32_e32 [[RESULT1:v[0-9]+]], [[D]], [[TMP]]
+
+; SI-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI: s_endpgm
+define void @combine_to_mad_f32_0_2use(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr float addrspace(1)* %gep.out.0, i32 1
+
+ %a = load float addrspace(1)* %gep.0
+ %b = load float addrspace(1)* %gep.1
+ %c = load float addrspace(1)* %gep.2
+ %d = load float addrspace(1)* %gep.3
+
+ %mul = fmul float %a, %b
+ %fma0 = fadd float %mul, %c
+ %fma1 = fadd float %mul, %d
+
+ store float %fma0, float addrspace(1)* %gep.out.0
+ store float %fma1, float addrspace(1)* %gep.out.1
+ ret void
+}
+
+; (fadd x, (fmul y, z)) -> (fma y, z, x)
+; FUNC-LABEL: {{^}}combine_to_mad_f32_1:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+
+; SI-STD: v_mad_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], [[C]]
+; SI-DENORM: v_fma_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], [[C]]
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
+; SI-DENORM-SLOWFMAF: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[C]]
+
+; SI: buffer_store_dword [[RESULT]]
+define void @combine_to_mad_f32_1(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %gep.0
+ %b = load float addrspace(1)* %gep.1
+ %c = load float addrspace(1)* %gep.2
+
+ %mul = fmul float %a, %b
+ %fma = fadd float %c, %mul
+ store float %fma, float addrspace(1)* %gep.out
+ ret void
+}
+
+; (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
+; FUNC-LABEL: {{^}}combine_to_mad_fsub_0_f32:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+
+; SI-STD: v_mad_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], -[[C]]
+; SI-DENORM: v_fma_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], -[[C]]
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
+; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP]]
+
+; SI: buffer_store_dword [[RESULT]]
+define void @combine_to_mad_fsub_0_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %gep.0
+ %b = load float addrspace(1)* %gep.1
+ %c = load float addrspace(1)* %gep.2
+
+ %mul = fmul float %a, %b
+ %fma = fsub float %mul, %c
+ store float %fma, float addrspace(1)* %gep.out
+ ret void
+}
+
+; (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
+; FUNC-LABEL: {{^}}combine_to_mad_fsub_0_f32_2use:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dword [[D:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}}
+
+; SI-STD-DAG: v_mad_f32 [[RESULT0:v[0-9]+]], [[A]], [[B]], -[[C]]
+; SI-STD-DAG: v_mad_f32 [[RESULT1:v[0-9]+]], [[A]], [[B]], -[[D]]
+
+; SI-DENORM-DAG: v_fma_f32 [[RESULT0:v[0-9]+]], [[A]], [[B]], -[[C]]
+; SI-DENORM-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], [[A]], [[B]], -[[D]]
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
+; SI-DENORM-SLOWFMAF-DAG: v_subrev_f32_e32 [[RESULT0:v[0-9]+]], [[C]], [[TMP]]
+; SI-DENORM-SLOWFMAF-DAG: v_subrev_f32_e32 [[RESULT1:v[0-9]+]], [[D]], [[TMP]]
+
+; SI-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI: s_endpgm
+define void @combine_to_mad_fsub_0_f32_2use(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr float addrspace(1)* %gep.out.0, i32 1
+
+ %a = load float addrspace(1)* %gep.0
+ %b = load float addrspace(1)* %gep.1
+ %c = load float addrspace(1)* %gep.2
+ %d = load float addrspace(1)* %gep.3
+
+ %mul = fmul float %a, %b
+ %fma0 = fsub float %mul, %c
+ %fma1 = fsub float %mul, %d
+ store float %fma0, float addrspace(1)* %gep.out.0
+ store float %fma1, float addrspace(1)* %gep.out.1
+ ret void
+}
+
+; (fsub x, (fmul y, z)) -> (fma (fneg y), z, x)
+; FUNC-LABEL: {{^}}combine_to_mad_fsub_1_f32:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+
+; SI-STD: v_mad_f32 [[RESULT:v[0-9]+]], -[[A]], [[B]], [[C]]
+; SI-DENORM: v_fma_f32 [[RESULT:v[0-9]+]], -[[A]], [[B]], [[C]]
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
+; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[C]]
+
+; SI: buffer_store_dword [[RESULT]]
+define void @combine_to_mad_fsub_1_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %gep.0
+ %b = load float addrspace(1)* %gep.1
+ %c = load float addrspace(1)* %gep.2
+
+ %mul = fmul float %a, %b
+ %fma = fsub float %c, %mul
+ store float %fma, float addrspace(1)* %gep.out
+ ret void
+}
+
+; (fsub x, (fmul y, z)) -> (fma (fneg y), z, x)
+; FUNC-LABEL: {{^}}combine_to_mad_fsub_1_f32_2use:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+
+; SI-STD-DAG: v_mad_f32 [[RESULT0:v[0-9]+]], -[[A]], [[B]], [[C]]
+; SI-STD-DAG: v_mad_f32 [[RESULT1:v[0-9]+]], -[[A]], [[B]], [[D]]
+
+; SI-DENORM-DAG: v_fma_f32 [[RESULT0:v[0-9]+]], -[[A]], [[B]], [[C]]
+; SI-DENORM-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], -[[A]], [[B]], [[D]]
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
+; SI-DENORM-SLOWFMAF-DAG: v_subrev_f32_e32 [[RESULT0:v[0-9]+]], [[TMP]], [[C]]
+; SI-DENORM-SLOWFMAF-DAG: v_subrev_f32_e32 [[RESULT1:v[0-9]+]], [[TMP]], [[D]]
+
+; SI-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI: s_endpgm
+define void @combine_to_mad_fsub_1_f32_2use(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr float addrspace(1)* %gep.out.0, i32 1
+
+ %a = load float addrspace(1)* %gep.0
+ %b = load float addrspace(1)* %gep.1
+ %c = load float addrspace(1)* %gep.2
+ %d = load float addrspace(1)* %gep.3
+
+ %mul = fmul float %a, %b
+ %fma0 = fsub float %c, %mul
+ %fma1 = fsub float %d, %mul
+ store float %fma0, float addrspace(1)* %gep.out.0
+ store float %fma1, float addrspace(1)* %gep.out.1
+ ret void
+}
+
+; (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z))
+; FUNC-LABEL: {{^}}combine_to_mad_fsub_2_f32:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+
+; SI-STD: v_mad_f32 [[RESULT:v[0-9]+]], -[[A]], [[B]], -[[C]]
+
+; SI-DENORM: v_fma_f32 [[RESULT:v[0-9]+]], -[[A]], [[B]], -[[C]]
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
+; SI-DENORM-SLOWFMAF: v_sub_f32_e64 [[RESULT:v[0-9]+]], -[[TMP]], [[C]]
+
+; SI: buffer_store_dword [[RESULT]]
+define void @combine_to_mad_fsub_2_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %gep.0
+ %b = load float addrspace(1)* %gep.1
+ %c = load float addrspace(1)* %gep.2
+
+ %mul = fmul float %a, %b
+ %mul.neg = fsub float -0.0, %mul
+ %fma = fsub float %mul.neg, %c
+
+ store float %fma, float addrspace(1)* %gep.out
+ ret void
+}
+
+; (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z))
+; FUNC-LABEL: {{^}}combine_to_mad_fsub_2_f32_2uses_neg:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+
+; SI-STD-DAG: v_mad_f32 [[RESULT0:v[0-9]+]], -[[A]], [[B]], -[[C]]
+; SI-STD-DAG: v_mad_f32 [[RESULT1:v[0-9]+]], -[[A]], [[B]], -[[D]]
+
+; SI-DENORM-DAG: v_fma_f32 [[RESULT0:v[0-9]+]], -[[A]], [[B]], -[[C]]
+; SI-DENORM-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], -[[A]], [[B]], -[[D]]
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
+; SI-DENORM-SLOWFMAF-DAG: v_sub_f32_e64 [[RESULT0:v[0-9]+]], -[[TMP]], [[C]]
+; SI-DENORM-SLOWFMAF-DAG: v_sub_f32_e64 [[RESULT1:v[0-9]+]], -[[TMP]], [[D]]
+
+; SI-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI: s_endpgm
+define void @combine_to_mad_fsub_2_f32_2uses_neg(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr float addrspace(1)* %gep.out.0, i32 1
+
+ %a = load float addrspace(1)* %gep.0
+ %b = load float addrspace(1)* %gep.1
+ %c = load float addrspace(1)* %gep.2
+ %d = load float addrspace(1)* %gep.3
+
+ %mul = fmul float %a, %b
+ %mul.neg = fsub float -0.0, %mul
+ %fma0 = fsub float %mul.neg, %c
+ %fma1 = fsub float %mul.neg, %d
+
+ store float %fma0, float addrspace(1)* %gep.out.0
+ store float %fma1, float addrspace(1)* %gep.out.1
+ ret void
+}
+
+; (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z))
+; FUNC-LABEL: {{^}}combine_to_mad_fsub_2_f32_2uses_mul:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+
+; SI-STD-DAG: v_mad_f32 [[RESULT0:v[0-9]+]], -[[A]], [[B]], -[[C]]
+; SI-STD-DAG: v_mad_f32 [[RESULT1:v[0-9]+]], [[A]], [[B]], -[[D]]
+
+; SI-DENORM-DAG: v_fma_f32 [[RESULT0:v[0-9]+]], -[[A]], [[B]], -[[C]]
+; SI-DENORM-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], [[A]], [[B]], -[[D]]
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP:v[0-9]+]], [[B]], [[A]]
+; SI-DENORM-SLOWFMAF-DAG: v_sub_f32_e64 [[RESULT0:v[0-9]+]], -[[TMP]], [[C]]
+; SI-DENORM-SLOWFMAF-DAG: v_subrev_f32_e32 [[RESULT1:v[0-9]+]], [[D]], [[TMP]]
+
+; SI-DAG: buffer_store_dword [[RESULT0]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_store_dword [[RESULT1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI: s_endpgm
+define void @combine_to_mad_fsub_2_f32_2uses_mul(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr float addrspace(1)* %gep.out.0, i32 1
+
+ %a = load float addrspace(1)* %gep.0
+ %b = load float addrspace(1)* %gep.1
+ %c = load float addrspace(1)* %gep.2
+ %d = load float addrspace(1)* %gep.3
+
+ %mul = fmul float %a, %b
+ %mul.neg = fsub float -0.0, %mul
+ %fma0 = fsub float %mul.neg, %c
+ %fma1 = fsub float %mul, %d
+
+ store float %fma0, float addrspace(1)* %gep.out.0
+ store float %fma1, float addrspace(1)* %gep.out.1
+ ret void
+}
+
+; fold (fsub (fma x, y, (fmul u, v)), z) -> (fma x, y (fma u, v, (fneg z)))
+
+; FUNC-LABEL: {{^}}aggressive_combine_to_mad_fsub_0_f32:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dword [[D:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}}
+; SI-DAG: buffer_load_dword [[E:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+
+; SI-STD: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
+; SI-STD: v_fma_f32 [[TMP1:v[0-9]+]], [[A]], [[B]], [[TMP0]]
+; SI-STD: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP1]]
+
+; SI-DENORM: v_fma_f32 [[TMP0:v[0-9]+]], [[D]], [[E]], -[[C]]
+; SI-DENORM: v_fma_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], [[TMP0]]
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
+; SI-DENORM-SLOWFMAF: v_fma_f32 [[TMP1:v[0-9]+]], [[A]], [[B]], [[TMP0]]
+; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT1:v[0-9]+]], [[C]], [[TMP1]]
+
+; SI: buffer_store_dword [[RESULT]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+define void @aggressive_combine_to_mad_fsub_0_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
+ %gep.4 = getelementptr float addrspace(1)* %gep.0, i32 4
+ %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %x = load float addrspace(1)* %gep.0
+ %y = load float addrspace(1)* %gep.1
+ %z = load float addrspace(1)* %gep.2
+ %u = load float addrspace(1)* %gep.3
+ %v = load float addrspace(1)* %gep.4
+
+ %tmp0 = fmul float %u, %v
+ %tmp1 = call float @llvm.fma.f32(float %x, float %y, float %tmp0) #0
+ %tmp2 = fsub float %tmp1, %z
+
+ store float %tmp2, float addrspace(1)* %gep.out
+ ret void
+}
+
+; fold (fsub x, (fma y, z, (fmul u, v)))
+; -> (fma (fneg y), z, (fma (fneg u), v, x))
+
+; FUNC-LABEL: {{^}}aggressive_combine_to_mad_fsub_1_f32:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dword [[D:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}}
+; SI-DAG: buffer_load_dword [[E:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+
+; SI-STD: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
+; SI-STD: v_fma_f32 [[TMP1:v[0-9]+]], [[B]], [[C]], [[TMP0]]
+; SI-STD: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP1]], [[A]]
+
+; SI-DENORM: v_fma_f32 [[TMP0:v[0-9]+]], -[[D]], [[E]], [[A]]
+; SI-DENORM: v_fma_f32 [[RESULT:v[0-9]+]], -[[B]], [[C]], [[TMP0]]
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
+; SI-DENORM-SLOWFMAF: v_fma_f32 [[TMP1:v[0-9]+]], [[B]], [[C]], [[TMP0]]
+; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP1]], [[A]]
+
+; SI: buffer_store_dword [[RESULT]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI: s_endpgm
+define void @aggressive_combine_to_mad_fsub_1_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
+ %gep.4 = getelementptr float addrspace(1)* %gep.0, i32 4
+ %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %x = load float addrspace(1)* %gep.0
+ %y = load float addrspace(1)* %gep.1
+ %z = load float addrspace(1)* %gep.2
+ %u = load float addrspace(1)* %gep.3
+ %v = load float addrspace(1)* %gep.4
+
+ %tmp0 = fmul float %u, %v
+ %tmp1 = call float @llvm.fma.f32(float %y, float %z, float %tmp0) #0
+ %tmp2 = fsub float %x, %tmp1
+
+ store float %tmp2, float addrspace(1)* %gep.out
+ ret void
+}
+
+; fold (fsub (fma x, y, (fmul u, v)), z) -> (fma x, y (fma u, v, (fneg z)))
+
+; FUNC-LABEL: {{^}}aggressive_combine_to_mad_fsub_2_f32:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dword [[D:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}}
+; SI-DAG: buffer_load_dword [[E:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+
+; SI-STD: v_mad_f32 [[TMP:v[0-9]+]], [[D]], [[E]], -[[C]]
+; SI-STD: v_mad_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], [[TMP]]
+
+; SI-DENORM: v_fma_f32 [[TMP:v[0-9]+]], [[D]], [[E]], -[[C]]
+; SI-DENORM: v_fma_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], [[TMP]]
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP1:v[0-9]+]], [[B]], [[A]]
+; SI-DENORM-SLOWFMAF: v_add_f32_e32 [[TMP2:v[0-9]+]], [[TMP0]], [[TMP1]]
+; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP2]]
+
+; SI: buffer_store_dword [[RESULT]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI: s_endpgm
+define void @aggressive_combine_to_mad_fsub_2_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
+ %gep.4 = getelementptr float addrspace(1)* %gep.0, i32 4
+ %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %x = load float addrspace(1)* %gep.0
+ %y = load float addrspace(1)* %gep.1
+ %z = load float addrspace(1)* %gep.2
+ %u = load float addrspace(1)* %gep.3
+ %v = load float addrspace(1)* %gep.4
+
+ %tmp0 = fmul float %u, %v
+ %tmp1 = call float @llvm.fmuladd.f32(float %x, float %y, float %tmp0) #0
+ %tmp2 = fsub float %tmp1, %z
+
+ store float %tmp2, float addrspace(1)* %gep.out
+ ret void
+}
+
+; fold (fsub x, (fmuladd y, z, (fmul u, v)))
+; -> (fmuladd (fneg y), z, (fmuladd (fneg u), v, x))
+
+; FUNC-LABEL: {{^}}aggressive_combine_to_mad_fsub_3_f32:
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
+; SI-DAG: buffer_load_dword [[D:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}}
+; SI-DAG: buffer_load_dword [[E:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
+
+; SI-STD: v_mad_f32 [[TMP:v[0-9]+]], -[[D]], [[E]], [[A]]
+; SI-STD: v_mad_f32 [[RESULT:v[0-9]+]], -[[B]], [[C]], [[TMP]]
+
+; SI-DENORM: v_fma_f32 [[TMP:v[0-9]+]], -[[D]], [[E]], [[A]]
+; SI-DENORM: v_fma_f32 [[RESULT:v[0-9]+]], -[[B]], [[C]], [[TMP]]
+
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP0:v[0-9]+]], [[E]], [[D]]
+; SI-DENORM-SLOWFMAF: v_mul_f32_e32 [[TMP1:v[0-9]+]], [[C]], [[B]]
+; SI-DENORM-SLOWFMAF: v_add_f32_e32 [[TMP2:v[0-9]+]], [[TMP0]], [[TMP1]]
+; SI-DENORM-SLOWFMAF: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP2]], [[A]]
+
+; SI: buffer_store_dword [[RESULT]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI: s_endpgm
+define void @aggressive_combine_to_mad_fsub_3_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() #0
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
+ %gep.4 = getelementptr float addrspace(1)* %gep.0, i32 4
+ %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %x = load float addrspace(1)* %gep.0
+ %y = load float addrspace(1)* %gep.1
+ %z = load float addrspace(1)* %gep.2
+ %u = load float addrspace(1)* %gep.3
+ %v = load float addrspace(1)* %gep.4
+
+ %tmp0 = fmul float %u, %v
+ %tmp1 = call float @llvm.fmuladd.f32(float %y, float %z, float %tmp0) #0
+ %tmp2 = fsub float %x, %tmp1
+
+ store float %tmp2, float addrspace(1)* %gep.out
+ ret void
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/R600/mad-sub.ll b/test/CodeGen/R600/mad-sub.ll
index 240abd0..7b4020d 100644
--- a/test/CodeGen/R600/mad-sub.ll
+++ b/test/CodeGen/R600/mad-sub.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() #0
declare float @llvm.fabs.f32(float) #0
@@ -171,7 +171,7 @@ define void @mad_fabs_sub_f32(float addrspace(1)* noalias nocapture %out, float
; FUNC-LABEL: {{^}}fsub_c_fadd_a_a:
; SI-DAG: buffer_load_dword [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_mad_f32 [[RESULT:v[0-9]+]], -2.0, [[R1]], [[R2]]
; SI: buffer_store_dword [[RESULT]]
define void @fsub_c_fadd_a_a(float addrspace(1)* %out, float addrspace(1)* %in) {
@@ -192,7 +192,7 @@ define void @fsub_c_fadd_a_a(float addrspace(1)* %out, float addrspace(1)* %in)
; FUNC-LABEL: {{^}}fsub_fadd_a_a_c:
; SI-DAG: buffer_load_dword [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
-; SI-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4
+; SI-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; SI: v_mad_f32 [[RESULT:v[0-9]+]], 2.0, [[R1]], -[[R2]]
; SI: buffer_store_dword [[RESULT]]
define void @fsub_fadd_a_a_c(float addrspace(1)* %out, float addrspace(1)* %in) {
diff --git a/test/CodeGen/R600/mad_int24.ll b/test/CodeGen/R600/mad_int24.ll
index c8dd377..86d75a6 100644
--- a/test/CodeGen/R600/mad_int24.ll
+++ b/test/CodeGen/R600/mad_int24.ll
@@ -1,6 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
declare i32 @llvm.AMDGPU.imul24(i32, i32) nounwind readnone
diff --git a/test/CodeGen/R600/mad_uint24.ll b/test/CodeGen/R600/mad_uint24.ll
index b7b32fe..95fe341 100644
--- a/test/CodeGen/R600/mad_uint24.ll
+++ b/test/CodeGen/R600/mad_uint24.ll
@@ -1,6 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
; FUNC-LABEL: {{^}}u32_mad24:
; EG: MULADD_UINT24
diff --git a/test/CodeGen/R600/madak.ll b/test/CodeGen/R600/madak.ll
new file mode 100644
index 0000000..505a49b
--- /dev/null
+++ b/test/CodeGen/R600/madak.ll
@@ -0,0 +1,193 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s
+; XUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN %s
+
+; FIXME: Enable VI
+
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
+declare float @llvm.fabs.f32(float) nounwind readnone
+
+; GCN-LABEL: {{^}}madak_f32:
+; GCN: buffer_load_dword [[VA:v[0-9]+]]
+; GCN: buffer_load_dword [[VB:v[0-9]+]]
+; GCN: v_madak_f32 {{v[0-9]+}}, [[VB]], [[VA]], 0x41200000
+define void @madak_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
+ %in.b.gep = getelementptr float addrspace(1)* %in.b, i32 %tid
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %in.a.gep, align 4
+ %b = load float addrspace(1)* %in.b.gep, align 4
+
+ %mul = fmul float %a, %b
+ %madak = fadd float %mul, 10.0
+ store float %madak, float addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; Make sure this is only folded with one use. This is a code size
+; optimization and if we fold the immediate multiple times, we'll undo
+; it.
+
+; GCN-LABEL: {{^}}madak_2_use_f32:
+; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; GCN-DAG: buffer_load_dword [[VC:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8
+; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
+; GCN-DAG: v_mad_f32 {{v[0-9]+}}, [[VA]], [[VB]], [[VK]]
+; GCN-DAG: v_mad_f32 {{v[0-9]+}}, [[VA]], [[VC]], [[VK]]
+; GCN: s_endpgm
+define void @madak_2_use_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+
+ %in.gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %in.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
+ %in.gep.2 = getelementptr float addrspace(1)* %in.gep.0, i32 2
+
+ %out.gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
+ %out.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
+
+ %a = load float addrspace(1)* %in.gep.0, align 4
+ %b = load float addrspace(1)* %in.gep.1, align 4
+ %c = load float addrspace(1)* %in.gep.2, align 4
+
+ %mul0 = fmul float %a, %b
+ %mul1 = fmul float %a, %c
+ %madak0 = fadd float %mul0, 10.0
+ %madak1 = fadd float %mul1, 10.0
+
+ store float %madak0, float addrspace(1)* %out.gep.0, align 4
+ store float %madak1, float addrspace(1)* %out.gep.1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}madak_m_inline_imm_f32:
+; GCN: buffer_load_dword [[VA:v[0-9]+]]
+; GCN: v_madak_f32 {{v[0-9]+}}, 4.0, [[VA]], 0x41200000
+define void @madak_m_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %in.a.gep, align 4
+
+ %mul = fmul float 4.0, %a
+ %madak = fadd float %mul, 10.0
+ store float %madak, float addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; Make sure nothing weird happens with a value that is also allowed as
+; an inline immediate.
+
+; GCN-LABEL: {{^}}madak_inline_imm_f32:
+; GCN: buffer_load_dword [[VA:v[0-9]+]]
+; GCN: buffer_load_dword [[VB:v[0-9]+]]
+; GCN: v_mad_f32 {{v[0-9]+}}, [[VA]], [[VB]], 4.0
+define void @madak_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
+ %in.b.gep = getelementptr float addrspace(1)* %in.b, i32 %tid
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %in.a.gep, align 4
+ %b = load float addrspace(1)* %in.b.gep, align 4
+
+ %mul = fmul float %a, %b
+ %madak = fadd float %mul, 4.0
+ store float %madak, float addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; We can't use an SGPR when forming madak
+; GCN-LABEL: {{^}}s_v_madak_f32:
+; GCN: s_load_dword [[SB:s[0-9]+]]
+; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
+; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]]
+; GCN-NOT: v_madak_f32
+; GCN: v_mad_f32 {{v[0-9]+}}, [[SB]], [[VA]], [[VK]]
+define void @s_v_madak_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float %b) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %in.a.gep, align 4
+
+ %mul = fmul float %a, %b
+ %madak = fadd float %mul, 10.0
+ store float %madak, float addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; GCN-LABEL: @v_s_madak_f32
+; GCN-DAG: s_load_dword [[SB:s[0-9]+]]
+; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
+; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]]
+; GCN-NOT: v_madak_f32
+; GCN: v_mad_f32 {{v[0-9]+}}, [[VA]], [[SB]], [[VK]]
+define void @v_s_madak_f32(float addrspace(1)* noalias %out, float %a, float addrspace(1)* noalias %in.b) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %in.b.gep = getelementptr float addrspace(1)* %in.b, i32 %tid
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %b = load float addrspace(1)* %in.b.gep, align 4
+
+ %mul = fmul float %a, %b
+ %madak = fadd float %mul, 10.0
+ store float %madak, float addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_s_madak_f32:
+; GCN-NOT: v_madak_f32
+; GCN: v_mad_f32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+define void @s_s_madak_f32(float addrspace(1)* %out, float %a, float %b) nounwind {
+ %mul = fmul float %a, %b
+ %madak = fadd float %mul, 10.0
+ store float %madak, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}no_madak_src0_modifier_f32:
+; GCN: buffer_load_dword [[VA:v[0-9]+]]
+; GCN: buffer_load_dword [[VB:v[0-9]+]]
+; GCN: v_mad_f32 {{v[0-9]+}}, |{{v[0-9]+}}|, {{v[0-9]+}}, {{[sv][0-9]+}}
+; GCN: s_endpgm
+define void @no_madak_src0_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
+ %in.b.gep = getelementptr float addrspace(1)* %in.b, i32 %tid
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %in.a.gep, align 4
+ %b = load float addrspace(1)* %in.b.gep, align 4
+
+ %a.fabs = call float @llvm.fabs.f32(float %a) nounwind readnone
+
+ %mul = fmul float %a.fabs, %b
+ %madak = fadd float %mul, 10.0
+ store float %madak, float addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}no_madak_src1_modifier_f32:
+; GCN: buffer_load_dword [[VA:v[0-9]+]]
+; GCN: buffer_load_dword [[VB:v[0-9]+]]
+; GCN: v_mad_f32 {{v[0-9]+}}, {{v[0-9]+}}, |{{v[0-9]+}}|, {{[sv][0-9]+}}
+; GCN: s_endpgm
+define void @no_madak_src1_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
+ %in.b.gep = getelementptr float addrspace(1)* %in.b, i32 %tid
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %in.a.gep, align 4
+ %b = load float addrspace(1)* %in.b.gep, align 4
+
+ %b.fabs = call float @llvm.fabs.f32(float %b) nounwind readnone
+
+ %mul = fmul float %a, %b.fabs
+ %madak = fadd float %mul, 10.0
+ store float %madak, float addrspace(1)* %out.gep, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/madmk.ll b/test/CodeGen/R600/madmk.ll
new file mode 100644
index 0000000..249e48e
--- /dev/null
+++ b/test/CodeGen/R600/madmk.ll
@@ -0,0 +1,181 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; XUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
+declare float @llvm.fabs.f32(float) nounwind readnone
+
+; GCN-LABEL: {{^}}madmk_f32:
+; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; GCN: v_madmk_f32 {{v[0-9]+}}, [[VA]], [[VB]], 0x41200000
+define void @madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %gep.0, align 4
+ %b = load float addrspace(1)* %gep.1, align 4
+
+ %mul = fmul float %a, 10.0
+ %madmk = fadd float %mul, %b
+ store float %madmk, float addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}madmk_2_use_f32:
+; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; GCN-DAG: buffer_load_dword [[VC:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8
+; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
+; GCN-DAG: v_mad_f32 {{v[0-9]+}}, [[VA]], [[VK]], [[VB]]
+; GCN-DAG: v_mad_f32 {{v[0-9]+}}, [[VA]], [[VK]], [[VC]]
+; GCN: s_endpgm
+define void @madmk_2_use_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+
+ %in.gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %in.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
+ %in.gep.2 = getelementptr float addrspace(1)* %in.gep.0, i32 2
+
+ %out.gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
+ %out.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
+
+ %a = load float addrspace(1)* %in.gep.0, align 4
+ %b = load float addrspace(1)* %in.gep.1, align 4
+ %c = load float addrspace(1)* %in.gep.2, align 4
+
+ %mul0 = fmul float %a, 10.0
+ %mul1 = fmul float %a, 10.0
+ %madmk0 = fadd float %mul0, %b
+ %madmk1 = fadd float %mul1, %c
+
+ store float %madmk0, float addrspace(1)* %out.gep.0, align 4
+ store float %madmk1, float addrspace(1)* %out.gep.1, align 4
+ ret void
+}
+
+; We don't get any benefit if the constant is an inline immediate.
+; GCN-LABEL: {{^}}madmk_inline_imm_f32:
+; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; GCN: v_mad_f32 {{v[0-9]+}}, 4.0, [[VA]], [[VB]]
+define void @madmk_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %gep.0, align 4
+ %b = load float addrspace(1)* %gep.1, align 4
+
+ %mul = fmul float %a, 4.0
+ %madmk = fadd float %mul, %b
+ store float %madmk, float addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_s_madmk_f32:
+; GCN-NOT: v_madmk_f32
+; GCN: v_mad_f32
+; GCN: s_endpgm
+define void @s_s_madmk_f32(float addrspace(1)* noalias %out, float %a, float %b) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %mul = fmul float %a, 10.0
+ %madmk = fadd float %mul, %b
+ store float %madmk, float addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}v_s_madmk_f32:
+; GCN-NOT: v_madmk_f32
+; GCN: v_mad_f32
+; GCN: s_endpgm
+define void @v_s_madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in, float %b) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %a = load float addrspace(1)* %gep.0, align 4
+
+ %mul = fmul float %a, 10.0
+ %madmk = fadd float %mul, %b
+ store float %madmk, float addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}scalar_vector_madmk_f32:
+; GCN-NOT: v_madmk_f32
+; GCN: v_mad_f32
+; GCN: s_endpgm
+define void @scalar_vector_madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in, float %a) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %b = load float addrspace(1)* %gep.0, align 4
+
+ %mul = fmul float %a, 10.0
+ %madmk = fadd float %mul, %b
+ store float %madmk, float addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}no_madmk_src0_modifier_f32:
+; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; GCN: v_mad_f32 {{v[0-9]+}}, |{{v[0-9]+}}|, {{v[0-9]+}}, {{[sv][0-9]+}}
+define void @no_madmk_src0_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %gep.0, align 4
+ %b = load float addrspace(1)* %gep.1, align 4
+
+ %a.fabs = call float @llvm.fabs.f32(float %a) nounwind readnone
+
+ %mul = fmul float %a.fabs, 10.0
+ %madmk = fadd float %mul, %b
+ store float %madmk, float addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}no_madmk_src2_modifier_f32:
+; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; GCN: v_mad_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, |{{[sv][0-9]+}}|
+define void @no_madmk_src2_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %gep.0, align 4
+ %b = load float addrspace(1)* %gep.1, align 4
+
+ %b.fabs = call float @llvm.fabs.f32(float %b) nounwind readnone
+
+ %mul = fmul float %a, 10.0
+ %madmk = fadd float %mul, %b.fabs
+ store float %madmk, float addrspace(1)* %out.gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}madmk_add_inline_imm_f32:
+; GCN: buffer_load_dword [[A:v[0-9]+]]
+; GCN: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
+; GCN: v_mad_f32 {{v[0-9]+}}, [[VK]], [[A]], 2.0
+define void @madmk_add_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+
+ %a = load float addrspace(1)* %gep.0, align 4
+
+ %mul = fmul float %a, 10.0
+ %madmk = fadd float %mul, 2.0
+ store float %madmk, float addrspace(1)* %out.gep, align 4
+ ret void
+}
diff --git a/test/CodeGen/R600/max.ll b/test/CodeGen/R600/max.ll
index d67ef47..20af993 100644
--- a/test/CodeGen/R600/max.ll
+++ b/test/CodeGen/R600/max.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
diff --git a/test/CodeGen/R600/max3.ll b/test/CodeGen/R600/max3.ll
index 74b08f6..f905e17 100644
--- a/test/CodeGen/R600/max3.ll
+++ b/test/CodeGen/R600/max3.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
diff --git a/test/CodeGen/R600/min.ll b/test/CodeGen/R600/min.ll
index 88c0dff..00ba5c6 100644
--- a/test/CodeGen/R600/min.ll
+++ b/test/CodeGen/R600/min.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
@@ -97,3 +97,24 @@ define void @s_test_umin_ult_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
store i32 %val, i32 addrspace(1)* %out, align 4
ret void
}
+
+; FUNC-LABEL: @v_test_umin_ult_i32_multi_use
+; SI-NOT: v_min
+; SI: v_cmp_lt_u32
+; SI-NEXT: v_cndmask_b32
+; SI-NOT: v_min
+; SI: s_endpgm
+define void @v_test_umin_ult_i32_multi_use(i32 addrspace(1)* %out0, i1 addrspace(1)* %out1, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
+ %outgep0 = getelementptr i32 addrspace(1)* %out0, i32 %tid
+ %outgep1 = getelementptr i1 addrspace(1)* %out1, i32 %tid
+ %a = load i32 addrspace(1)* %gep0, align 4
+ %b = load i32 addrspace(1)* %gep1, align 4
+ %cmp = icmp ult i32 %a, %b
+ %val = select i1 %cmp, i32 %a, i32 %b
+ store i32 %val, i32 addrspace(1)* %outgep0, align 4
+ store i1 %cmp, i1 addrspace(1)* %outgep1
+ ret void
+}
diff --git a/test/CodeGen/R600/min3.ll b/test/CodeGen/R600/min3.ll
index f852cff..6c11a65 100644
--- a/test/CodeGen/R600/min3.ll
+++ b/test/CodeGen/R600/min3.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
diff --git a/test/CodeGen/R600/missing-store.ll b/test/CodeGen/R600/missing-store.ll
index 5346046..8ddef35 100644
--- a/test/CodeGen/R600/missing-store.ll
+++ b/test/CodeGen/R600/missing-store.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
@ptr_load = addrspace(3) global i32 addrspace(2)* undef, align 8
diff --git a/test/CodeGen/R600/mubuf.ll b/test/CodeGen/R600/mubuf.ll
index c2efda4..988e5c1 100644
--- a/test/CodeGen/R600/mubuf.ll
+++ b/test/CodeGen/R600/mubuf.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -show-mc-encoding -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=SI -show-mc-encoding -verify-machineinstrs < %s | FileCheck %s
declare i32 @llvm.r600.read.tidig.x() readnone
@@ -8,7 +8,7 @@ declare i32 @llvm.r600.read.tidig.x() readnone
; MUBUF load with an immediate byte offset that fits into 12-bits
; CHECK-LABEL: {{^}}mubuf_load0:
-; CHECK: buffer_load_dword v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:0x4 ; encoding: [0x04,0x00,0x30,0xe0
+; CHECK: buffer_load_dword v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:4 ; encoding: [0x04,0x00,0x30,0xe0
define void @mubuf_load0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%0 = getelementptr i32 addrspace(1)* %in, i64 1
@@ -19,7 +19,7 @@ entry:
; MUBUF load with the largest possible immediate offset
; CHECK-LABEL: {{^}}mubuf_load1:
-; CHECK: buffer_load_ubyte v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:0xfff ; encoding: [0xff,0x0f,0x20,0xe0
+; CHECK: buffer_load_ubyte v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe0
define void @mubuf_load1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
%0 = getelementptr i8 addrspace(1)* %in, i64 4095
@@ -30,7 +30,8 @@ entry:
; MUBUF load with an immediate byte offset that doesn't fit into 12-bits
; CHECK-LABEL: {{^}}mubuf_load2:
-; CHECK: buffer_load_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 ; encoding: [0x00,0x80,0x30,0xe0
+; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x1000
+; CHECK: buffer_load_dword v{{[0-9]}}, s[{{[0-9]+:[0-9]+}}], [[SOFFSET]] ; encoding: [0x00,0x00,0x30,0xe0
define void @mubuf_load2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%0 = getelementptr i32 addrspace(1)* %in, i64 1024
@@ -42,7 +43,7 @@ entry:
; MUBUF load with a 12-bit immediate offset and a register offset
; CHECK-LABEL: {{^}}mubuf_load3:
; CHECK-NOT: ADD
-; CHECK: buffer_load_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:0x4 ; encoding: [0x04,0x80,0x30,0xe0
+; CHECK: buffer_load_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:4 ; encoding: [0x04,0x80,0x30,0xe0
define void @mubuf_load3(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i64 %offset) {
entry:
%0 = getelementptr i32 addrspace(1)* %in, i64 %offset
@@ -52,13 +53,46 @@ entry:
ret void
}
+; CHECK-LABEL: {{^}}soffset_max_imm:
+; CHECK: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 64 offen glc
+define void @soffset_max_imm([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) #1 {
+main_body:
+ %tmp0 = getelementptr [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0
+ %tmp1 = load <16 x i8> addrspace(2)* %tmp0
+ %tmp2 = shl i32 %6, 2
+ %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp1, i32 %tmp2, i32 64, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
+ %tmp4 = add i32 %6, 16
+ %tmp5 = bitcast float 0.0 to i32
+ call void @llvm.SI.tbuffer.store.i32(<16 x i8> %tmp1, i32 %tmp5, i32 1, i32 %tmp4, i32 %4, i32 0, i32 4, i32 4, i32 1, i32 0, i32 1, i32 1, i32 0)
+ ret void
+}
+
+; Make sure immediates that aren't inline constants don't get folded into
+; the soffset operand.
+; FIXME: for this test we should be smart enough to shift the immediate into
+; the offset field.
+; CHECK-LABEL: {{^}}soffset_no_fold:
+; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x41
+; CHECK: buffer_load_dword v{{[0-9+]}}, v{{[0-9+]}}, s[{{[0-9]+}}:{{[0-9]+}}], [[SOFFSET]] offen glc
+define void @soffset_no_fold([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) #1 {
+main_body:
+ %tmp0 = getelementptr [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0
+ %tmp1 = load <16 x i8> addrspace(2)* %tmp0
+ %tmp2 = shl i32 %6, 2
+ %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp1, i32 %tmp2, i32 65, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
+ %tmp4 = add i32 %6, 16
+ %tmp5 = bitcast float 0.0 to i32
+ call void @llvm.SI.tbuffer.store.i32(<16 x i8> %tmp1, i32 %tmp5, i32 1, i32 %tmp4, i32 %4, i32 0, i32 4, i32 4, i32 1, i32 0, i32 1, i32 1, i32 0)
+ ret void
+}
+
;;;==========================================================================;;;
;;; MUBUF STORE TESTS
;;;==========================================================================;;;
; MUBUF store with an immediate byte offset that fits into 12-bits
; CHECK-LABEL: {{^}}mubuf_store0:
-; CHECK: buffer_store_dword v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:0x4 ; encoding: [0x04,0x00,0x70,0xe0
+; CHECK: buffer_store_dword v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:4 ; encoding: [0x04,0x00,0x70,0xe0
define void @mubuf_store0(i32 addrspace(1)* %out) {
entry:
%0 = getelementptr i32 addrspace(1)* %out, i64 1
@@ -68,7 +102,7 @@ entry:
; MUBUF store with the largest possible immediate offset
; CHECK-LABEL: {{^}}mubuf_store1:
-; CHECK: buffer_store_byte v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:0xfff ; encoding: [0xff,0x0f,0x60,0xe0
+; CHECK: buffer_store_byte v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0
define void @mubuf_store1(i8 addrspace(1)* %out) {
entry:
@@ -79,7 +113,8 @@ entry:
; MUBUF store with an immediate byte offset that doesn't fit into 12-bits
; CHECK-LABEL: {{^}}mubuf_store2:
-; CHECK: buffer_store_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0 addr64 ; encoding: [0x00,0x80,0x70,0xe0
+; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x1000
+; CHECK: buffer_store_dword v{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[SOFFSET]] ; encoding: [0x00,0x00,0x70,0xe0
define void @mubuf_store2(i32 addrspace(1)* %out) {
entry:
%0 = getelementptr i32 addrspace(1)* %out, i64 1024
@@ -90,7 +125,7 @@ entry:
; MUBUF store with a 12-bit immediate offset and a register offset
; CHECK-LABEL: {{^}}mubuf_store3:
; CHECK-NOT: ADD
-; CHECK: buffer_store_dword v{{[0-9]}}, v[{{[0-9]:[0-9]}}], s[{{[0-9]:[0-9]}}], 0 addr64 offset:0x4 ; encoding: [0x04,0x80,0x70,0xe0
+; CHECK: buffer_store_dword v{{[0-9]}}, v[{{[0-9]:[0-9]}}], s[{{[0-9]:[0-9]}}], 0 addr64 offset:4 ; encoding: [0x04,0x80,0x70,0xe0
define void @mubuf_store3(i32 addrspace(1)* %out, i64 %offset) {
entry:
%0 = getelementptr i32 addrspace(1)* %out, i64 %offset
@@ -107,7 +142,7 @@ define void @store_sgpr_ptr(i32 addrspace(1)* %out) #0 {
}
; CHECK-LABEL: {{^}}store_sgpr_ptr_offset:
-; CHECK: buffer_store_dword v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:0x28
+; CHECK: buffer_store_dword v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:40
define void @store_sgpr_ptr_offset(i32 addrspace(1)* %out) #0 {
%out.gep = getelementptr i32 addrspace(1)* %out, i32 10
store i32 99, i32 addrspace(1)* %out.gep, align 4
@@ -115,13 +150,23 @@ define void @store_sgpr_ptr_offset(i32 addrspace(1)* %out) #0 {
}
; CHECK-LABEL: {{^}}store_sgpr_ptr_large_offset:
-; CHECK: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
+; CHECK: s_mov_b32 [[SOFFSET:s[0-9]+]], 0x20000
+; CHECK: buffer_store_dword v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, [[SOFFSET]]
define void @store_sgpr_ptr_large_offset(i32 addrspace(1)* %out) #0 {
%out.gep = getelementptr i32 addrspace(1)* %out, i32 32768
store i32 99, i32 addrspace(1)* %out.gep, align 4
ret void
}
+; CHECK-LABEL: {{^}}store_sgpr_ptr_large_offset_atomic:
+; CHECK: s_mov_b32 [[SOFFSET:s[0-9]+]], 0x20000
+; CHECK: buffer_atomic_add v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, [[SOFFSET]]
+define void @store_sgpr_ptr_large_offset_atomic(i32 addrspace(1)* %out) #0 {
+ %gep = getelementptr i32 addrspace(1)* %out, i32 32768
+ %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 5 seq_cst
+ ret void
+}
+
; CHECK-LABEL: {{^}}store_vgpr_ptr:
; CHECK: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
define void @store_vgpr_ptr(i32 addrspace(1)* %out) #0 {
@@ -130,3 +175,9 @@ define void @store_vgpr_ptr(i32 addrspace(1)* %out) #0 {
store i32 99, i32 addrspace(1)* %out.gep, align 4
ret void
}
+
+declare i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #3
+declare void @llvm.SI.tbuffer.store.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
+
+attributes #1 = { "ShaderType"="2" "unsafe-fp-math"="true" }
+attributes #3 = { nounwind readonly }
diff --git a/test/CodeGen/R600/mul.ll b/test/CodeGen/R600/mul.ll
index be5d6a0..6f15e70 100644
--- a/test/CodeGen/R600/mul.ll
+++ b/test/CodeGen/R600/mul.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG %s -check-prefix=FUNC
-; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; mul24 and mad24 are affected
diff --git a/test/CodeGen/R600/mul_int24.ll b/test/CodeGen/R600/mul_int24.ll
index be58f7e..7609dcc 100644
--- a/test/CodeGen/R600/mul_int24.ll
+++ b/test/CodeGen/R600/mul_int24.ll
@@ -1,6 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
; FUNC-LABEL: {{^}}i32_mul24:
; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
diff --git a/test/CodeGen/R600/mul_uint24.ll b/test/CodeGen/R600/mul_uint24.ll
index 8d1cda8..e640a7c 100644
--- a/test/CodeGen/R600/mul_uint24.ll
+++ b/test/CodeGen/R600/mul_uint24.ll
@@ -1,6 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
; FUNC-LABEL: {{^}}u32_mul24:
; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
diff --git a/test/CodeGen/R600/mulhu.ll b/test/CodeGen/R600/mulhu.ll
index 82a0783..29b0944 100644
--- a/test/CodeGen/R600/mulhu.ll
+++ b/test/CodeGen/R600/mulhu.ll
@@ -1,7 +1,8 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
;CHECK: v_mov_b32_e32 v{{[0-9]+}}, 0xaaaaaaab
-;CHECK: v_mul_hi_u32 v0, {{[sv][0-9]+}}, {{v[0-9]+}}
+;CHECK: v_mul_hi_u32 v0, {{v[0-9]+}}, {{s[0-9]+}}
;CHECK-NEXT: v_lshrrev_b32_e32 v0, 1, v0
define void @test(i32 %p) {
diff --git a/test/CodeGen/R600/no-initializer-constant-addrspace.ll b/test/CodeGen/R600/no-initializer-constant-addrspace.ll
index cd2dca3..532edf0 100644
--- a/test/CodeGen/R600/no-initializer-constant-addrspace.ll
+++ b/test/CodeGen/R600/no-initializer-constant-addrspace.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -o /dev/null %s
+; RUN: llc -march=amdgcn -mcpu=SI -o /dev/null %s
+; RUN: llc -march=amdgcn -mcpu=tonga -o /dev/null %s
; RUN: llc -march=r600 -mcpu=cypress -o /dev/null %s
@extern_const_addrspace = external unnamed_addr addrspace(2) constant [5 x i32], align 4
diff --git a/test/CodeGen/R600/no-shrink-extloads.ll b/test/CodeGen/R600/no-shrink-extloads.ll
new file mode 100644
index 0000000..3079492
--- /dev/null
+++ b/test/CodeGen/R600/no-shrink-extloads.ll
@@ -0,0 +1,191 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
+
+; Make sure we don't turn the 32-bit argument load into a 16-bit
+; load. There aren't extending scalar lods, so that would require
+; using a buffer_load instruction.
+
+; FUNC-LABEL: {{^}}truncate_kernarg_i32_to_i16:
+; SI: s_load_dword s
+; SI: buffer_store_short v
+define void @truncate_kernarg_i32_to_i16(i16 addrspace(1)* %out, i32 %arg) nounwind {
+ %trunc = trunc i32 %arg to i16
+ store i16 %trunc, i16 addrspace(1)* %out
+ ret void
+}
+
+; It should be OK (and probably performance neutral) to reduce this,
+; but we don't know if the load is uniform yet.
+
+; FUNC-LABEL: {{^}}truncate_buffer_load_i32_to_i16:
+; SI: buffer_load_dword v
+; SI: buffer_store_short v
+define void @truncate_buffer_load_i32_to_i16(i16 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i16 addrspace(1)* %out, i32 %tid
+ %load = load i32 addrspace(1)* %gep.in
+ %trunc = trunc i32 %load to i16
+ store i16 %trunc, i16 addrspace(1)* %gep.out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}truncate_kernarg_i32_to_i8:
+; SI: s_load_dword s
+; SI: buffer_store_byte v
+define void @truncate_kernarg_i32_to_i8(i8 addrspace(1)* %out, i32 %arg) nounwind {
+ %trunc = trunc i32 %arg to i8
+ store i8 %trunc, i8 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}truncate_buffer_load_i32_to_i8:
+; SI: buffer_load_dword v
+; SI: buffer_store_byte v
+define void @truncate_buffer_load_i32_to_i8(i8 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i8 addrspace(1)* %out, i32 %tid
+ %load = load i32 addrspace(1)* %gep.in
+ %trunc = trunc i32 %load to i8
+ store i8 %trunc, i8 addrspace(1)* %gep.out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}truncate_kernarg_i32_to_i1:
+; SI: s_load_dword s
+; SI: buffer_store_byte v
+define void @truncate_kernarg_i32_to_i1(i1 addrspace(1)* %out, i32 %arg) nounwind {
+ %trunc = trunc i32 %arg to i1
+ store i1 %trunc, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}truncate_buffer_load_i32_to_i1:
+; SI: buffer_load_dword v
+; SI: buffer_store_byte v
+define void @truncate_buffer_load_i32_to_i1(i1 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i1 addrspace(1)* %out, i32 %tid
+ %load = load i32 addrspace(1)* %gep.in
+ %trunc = trunc i32 %load to i1
+ store i1 %trunc, i1 addrspace(1)* %gep.out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}truncate_kernarg_i64_to_i32:
+; SI: s_load_dword s
+; SI: buffer_store_dword v
+define void @truncate_kernarg_i64_to_i32(i32 addrspace(1)* %out, i64 %arg) nounwind {
+ %trunc = trunc i64 %arg to i32
+ store i32 %trunc, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}truncate_buffer_load_i64_to_i32:
+; SI: buffer_load_dword v
+; SI: buffer_store_dword v
+define void @truncate_buffer_load_i64_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %load = load i64 addrspace(1)* %gep.in
+ %trunc = trunc i64 %load to i32
+ store i32 %trunc, i32 addrspace(1)* %gep.out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}srl_kernarg_i64_to_i32:
+; SI: s_load_dword s
+; SI: buffer_store_dword v
+define void @srl_kernarg_i64_to_i32(i32 addrspace(1)* %out, i64 %arg) nounwind {
+ %srl = lshr i64 %arg, 32
+ %trunc = trunc i64 %srl to i32
+ store i32 %trunc, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}srl_buffer_load_i64_to_i32:
+; SI: buffer_load_dword v
+; SI: buffer_store_dword v
+define void @srl_buffer_load_i64_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %load = load i64 addrspace(1)* %gep.in
+ %srl = lshr i64 %load, 32
+ %trunc = trunc i64 %srl to i32
+ store i32 %trunc, i32 addrspace(1)* %gep.out
+ ret void
+}
+
+; Might as well reduce to 8-bit loads.
+; FUNC-LABEL: {{^}}truncate_kernarg_i16_to_i8:
+; SI: s_load_dword s
+; SI: buffer_store_byte v
+define void @truncate_kernarg_i16_to_i8(i8 addrspace(1)* %out, i16 %arg) nounwind {
+ %trunc = trunc i16 %arg to i8
+ store i8 %trunc, i8 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}truncate_buffer_load_i16_to_i8:
+; SI: buffer_load_ubyte v
+; SI: buffer_store_byte v
+define void @truncate_buffer_load_i16_to_i8(i8 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.in = getelementptr i16 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i8 addrspace(1)* %out, i32 %tid
+ %load = load i16 addrspace(1)* %gep.in
+ %trunc = trunc i16 %load to i8
+ store i8 %trunc, i8 addrspace(1)* %gep.out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}srl_kernarg_i64_to_i8:
+; SI: s_load_dword s
+; SI: buffer_store_byte v
+define void @srl_kernarg_i64_to_i8(i8 addrspace(1)* %out, i64 %arg) nounwind {
+ %srl = lshr i64 %arg, 32
+ %trunc = trunc i64 %srl to i8
+ store i8 %trunc, i8 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}srl_buffer_load_i64_to_i8:
+; SI: buffer_load_dword v
+; SI: buffer_store_byte v
+define void @srl_buffer_load_i64_to_i8(i8 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i8 addrspace(1)* %out, i32 %tid
+ %load = load i64 addrspace(1)* %gep.in
+ %srl = lshr i64 %load, 32
+ %trunc = trunc i64 %srl to i8
+ store i8 %trunc, i8 addrspace(1)* %gep.out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}truncate_kernarg_i64_to_i8:
+; SI: s_load_dword s
+; SI: buffer_store_byte v
+define void @truncate_kernarg_i64_to_i8(i8 addrspace(1)* %out, i64 %arg) nounwind {
+ %trunc = trunc i64 %arg to i8
+ store i8 %trunc, i8 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}truncate_buffer_load_i64_to_i8:
+; SI: buffer_load_dword v
+; SI: buffer_store_byte v
+define void @truncate_buffer_load_i64_to_i8(i8 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i8 addrspace(1)* %out, i32 %tid
+ %load = load i64 addrspace(1)* %gep.in
+ %trunc = trunc i64 %load to i8
+ store i8 %trunc, i8 addrspace(1)* %gep.out
+ ret void
+}
diff --git a/test/CodeGen/R600/operand-folding.ll b/test/CodeGen/R600/operand-folding.ll
new file mode 100644
index 0000000..88a8145
--- /dev/null
+++ b/test/CodeGen/R600/operand-folding.ll
@@ -0,0 +1,113 @@
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s
+
+; CHECK-LABEL: {{^}}fold_sgpr:
+; CHECK: v_add_i32_e32 v{{[0-9]+}}, s
+define void @fold_sgpr(i32 addrspace(1)* %out, i32 %fold) {
+entry:
+ %tmp0 = icmp ne i32 %fold, 0
+ br i1 %tmp0, label %if, label %endif
+
+if:
+ %id = call i32 @llvm.r600.read.tidig.x()
+ %offset = add i32 %fold, %id
+ %tmp1 = getelementptr i32 addrspace(1)* %out, i32 %offset
+ store i32 0, i32 addrspace(1)* %tmp1
+ br label %endif
+
+endif:
+ ret void
+}
+
+; CHECK-LABEL: {{^}}fold_imm:
+; CHECK v_or_i32_e32 v{{[0-9]+}}, 5
+define void @fold_imm(i32 addrspace(1)* %out, i32 %cmp) {
+entry:
+ %fold = add i32 3, 2
+ %tmp0 = icmp ne i32 %cmp, 0
+ br i1 %tmp0, label %if, label %endif
+
+if:
+ %id = call i32 @llvm.r600.read.tidig.x()
+ %val = or i32 %id, %fold
+ store i32 %val, i32 addrspace(1)* %out
+ br label %endif
+
+endif:
+ ret void
+}
+
+; CHECK-LABEL: {{^}}fold_64bit_constant_add:
+; CHECK-NOT: s_mov_b64
+; FIXME: It would be better if we could use v_add here and drop the extra
+; v_mov_b32 instructions.
+; CHECK-DAG: s_add_u32 [[LO:s[0-9]+]], s{{[0-9]+}}, 1
+; CHECK-DAG: s_addc_u32 [[HI:s[0-9]+]], s{{[0-9]+}}, 0
+; CHECK-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], [[LO]]
+; CHECK-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[HI]]
+; CHECK: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}},
+
+define void @fold_64bit_constant_add(i64 addrspace(1)* %out, i32 %cmp, i64 %val) {
+entry:
+ %tmp0 = add i64 %val, 1
+ store i64 %tmp0, i64 addrspace(1)* %out
+ ret void
+}
+
+; Inline constants should always be folded.
+
+; CHECK-LABEL: {{^}}vector_inline:
+; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
+; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
+; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
+; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
+
+define void @vector_inline(<4 x i32> addrspace(1)* %out) {
+entry:
+ %tmp0 = call i32 @llvm.r600.read.tidig.x()
+ %tmp1 = add i32 %tmp0, 1
+ %tmp2 = add i32 %tmp0, 2
+ %tmp3 = add i32 %tmp0, 3
+ %vec0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0
+ %vec1 = insertelement <4 x i32> %vec0, i32 %tmp1, i32 1
+ %vec2 = insertelement <4 x i32> %vec1, i32 %tmp2, i32 2
+ %vec3 = insertelement <4 x i32> %vec2, i32 %tmp3, i32 3
+ %tmp4 = xor <4 x i32> <i32 5, i32 5, i32 5, i32 5>, %vec3
+ store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; Immediates with one use should be folded
+; CHECK-LABEL: {{^}}imm_one_use:
+; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 0x64, v{{[0-9]+}}
+
+define void @imm_one_use(i32 addrspace(1)* %out) {
+entry:
+ %tmp0 = call i32 @llvm.r600.read.tidig.x()
+ %tmp1 = xor i32 %tmp0, 100
+ store i32 %tmp1, i32 addrspace(1)* %out
+ ret void
+}
+; CHECK-LABEL: {{^}}vector_imm:
+; CHECK: s_movk_i32 [[IMM:s[0-9]+]], 0x64
+; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
+; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
+; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
+; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
+
+define void @vector_imm(<4 x i32> addrspace(1)* %out) {
+entry:
+ %tmp0 = call i32 @llvm.r600.read.tidig.x()
+ %tmp1 = add i32 %tmp0, 1
+ %tmp2 = add i32 %tmp0, 2
+ %tmp3 = add i32 %tmp0, 3
+ %vec0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0
+ %vec1 = insertelement <4 x i32> %vec0, i32 %tmp1, i32 1
+ %vec2 = insertelement <4 x i32> %vec1, i32 %tmp2, i32 2
+ %vec3 = insertelement <4 x i32> %vec2, i32 %tmp3, i32 3
+ %tmp4 = xor <4 x i32> <i32 100, i32 100, i32 100, i32 100>, %vec3
+ store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+declare i32 @llvm.r600.read.tidig.x() #0
+attributes #0 = { readnone }
diff --git a/test/CodeGen/R600/operand-spacing.ll b/test/CodeGen/R600/operand-spacing.ll
index f0d228d..20420a8 100644
--- a/test/CodeGen/R600/operand-spacing.ll
+++ b/test/CodeGen/R600/operand-spacing.ll
@@ -1,13 +1,16 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=SI -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -strict-whitespace -check-prefix=VI -check-prefix=GCN %s
; Make sure there isn't an extra space between the instruction name and first operands.
-; SI-LABEL: {{^}}add_f32:
+; GCN-LABEL: {{^}}add_f32:
; SI-DAG: s_load_dword [[SREGA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: s_load_dword [[SREGB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
-; SI: v_mov_b32_e32 [[VREGB:v[0-9]+]], [[SREGB]]
-; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], [[SREGA]], [[VREGB]]
-; SI: buffer_store_dword [[RESULT]],
+; VI-DAG: s_load_dword [[SREGA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI-DAG: s_load_dword [[SREGB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30
+; GCN: v_mov_b32_e32 [[VREGB:v[0-9]+]], [[SREGB]]
+; GCN: v_add_f32_e32 [[RESULT:v[0-9]+]], [[SREGA]], [[VREGB]]
+; GCN: buffer_store_dword [[RESULT]],
define void @add_f32(float addrspace(1)* %out, float %a, float %b) {
%result = fadd float %a, %b
store float %result, float addrspace(1)* %out
diff --git a/test/CodeGen/R600/or.ll b/test/CodeGen/R600/or.ll
index b7493d3..78879a8 100644
--- a/test/CodeGen/R600/or.ll
+++ b/test/CodeGen/R600/or.ll
@@ -1,14 +1,14 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; EG-LABEL: {{^}}or_v2i32:
+
+; FUNC-LABEL: {{^}}or_v2i32:
; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; SI-LABEL: {{^}}or_v2i32:
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-
define void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32> addrspace(1) * %in
@@ -18,18 +18,16 @@ define void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in)
ret void
}
-; EG-LABEL: {{^}}or_v4i32:
+; FUNC-LABEL: {{^}}or_v4i32:
; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; SI-LABEL: {{^}}or_v4i32:
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-
define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32> addrspace(1) * %in
@@ -39,7 +37,7 @@ define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in)
ret void
}
-; SI-LABEL: {{^}}scalar_or_i32:
+; FUNC-LABEL: {{^}}scalar_or_i32:
; SI: s_or_b32
define void @scalar_or_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%or = or i32 %a, %b
@@ -47,7 +45,7 @@ define void @scalar_or_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
ret void
}
-; SI-LABEL: {{^}}vector_or_i32:
+; FUNC-LABEL: {{^}}vector_or_i32:
; SI: v_or_b32_e32 v{{[0-9]}}
define void @vector_or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 %b) {
%loada = load i32 addrspace(1)* %a
@@ -56,7 +54,7 @@ define void @vector_or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 %b)
ret void
}
-; SI-LABEL: {{^}}scalar_or_literal_i32:
+; FUNC-LABEL: {{^}}scalar_or_literal_i32:
; SI: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x1869f
define void @scalar_or_literal_i32(i32 addrspace(1)* %out, i32 %a) {
%or = or i32 %a, 99999
@@ -64,7 +62,7 @@ define void @scalar_or_literal_i32(i32 addrspace(1)* %out, i32 %a) {
ret void
}
-; SI-LABEL: {{^}}vector_or_literal_i32:
+; FUNC-LABEL: {{^}}vector_or_literal_i32:
; SI: v_or_b32_e32 v{{[0-9]+}}, 0xffff, v{{[0-9]+}}
define void @vector_or_literal_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
%loada = load i32 addrspace(1)* %a, align 4
@@ -73,7 +71,7 @@ define void @vector_or_literal_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a,
ret void
}
-; SI-LABEL: {{^}}vector_or_inline_immediate_i32:
+; FUNC-LABEL: {{^}}vector_or_inline_immediate_i32:
; SI: v_or_b32_e32 v{{[0-9]+}}, 4, v{{[0-9]+}}
define void @vector_or_inline_immediate_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
%loada = load i32 addrspace(1)* %a, align 4
@@ -82,10 +80,10 @@ define void @vector_or_inline_immediate_i32(i32 addrspace(1)* %out, i32 addrspac
ret void
}
-; EG-LABEL: {{^}}scalar_or_i64:
+; FUNC-LABEL: {{^}}scalar_or_i64:
; EG-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
; EG-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
-; SI-LABEL: {{^}}scalar_or_i64:
+
; SI: s_or_b64
define void @scalar_or_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%or = or i64 %a, %b
@@ -93,7 +91,7 @@ define void @scalar_or_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
ret void
}
-; SI-LABEL: {{^}}vector_or_i64:
+; FUNC-LABEL: {{^}}vector_or_i64:
; SI: v_or_b32_e32 v{{[0-9]}}
; SI: v_or_b32_e32 v{{[0-9]}}
define void @vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
@@ -104,7 +102,7 @@ define void @vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 add
ret void
}
-; SI-LABEL: {{^}}scalar_vector_or_i64:
+; FUNC-LABEL: {{^}}scalar_vector_or_i64:
; SI: v_or_b32_e32 v{{[0-9]}}
; SI: v_or_b32_e32 v{{[0-9]}}
define void @scalar_vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 %b) {
@@ -114,7 +112,7 @@ define void @scalar_vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a,
ret void
}
-; SI-LABEL: {{^}}vector_or_i64_loadimm:
+; FUNC-LABEL: {{^}}vector_or_i64_loadimm:
; SI-DAG: s_mov_b32 [[LO_S_IMM:s[0-9]+]], 0xdf77987f
; SI-DAG: s_movk_i32 [[HI_S_IMM:s[0-9]+]], 0x146f
; SI-DAG: buffer_load_dwordx2 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}},
@@ -129,7 +127,7 @@ define void @vector_or_i64_loadimm(i64 addrspace(1)* %out, i64 addrspace(1)* %a,
}
; FIXME: The or 0 should really be removed.
-; SI-LABEL: {{^}}vector_or_i64_imm:
+; FUNC-LABEL: {{^}}vector_or_i64_imm:
; SI: buffer_load_dwordx2 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}},
; SI: v_or_b32_e32 {{v[0-9]+}}, 8, v[[LO_VREG]]
; SI: v_or_b32_e32 {{v[0-9]+}}, 0, {{.*}}
@@ -141,7 +139,7 @@ define void @vector_or_i64_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64
ret void
}
-; SI-LABEL: {{^}}trunc_i64_or_to_i32:
+; FUNC-LABEL: {{^}}trunc_i64_or_to_i32:
; SI: s_load_dword s[[SREG0:[0-9]+]]
; SI: s_load_dword s[[SREG1:[0-9]+]]
; SI: s_or_b32 s[[SRESULT:[0-9]+]], s[[SREG1]], s[[SREG0]]
@@ -154,14 +152,13 @@ define void @trunc_i64_or_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
ret void
}
-; EG-CHECK: {{^}}or_i1:
-; EG-CHECK: OR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}}
+; FUNC-LABEL: {{^}}or_i1:
+; EG: OR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}}
-; SI-CHECK: {{^}}or_i1:
-; SI-CHECK: s_or_b64 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
+; SI: s_or_b64 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
define void @or_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
- %a = load float addrspace(1) * %in0
- %b = load float addrspace(1) * %in1
+ %a = load float addrspace(1)* %in0
+ %b = load float addrspace(1)* %in1
%acmp = fcmp oge float %a, 0.000000e+00
%bcmp = fcmp oge float %b, 0.000000e+00
%or = or i1 %acmp, %bcmp
@@ -169,3 +166,13 @@ define void @or_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float add
store float %result, float addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: {{^}}s_or_i1:
+; SI: s_or_b64 s[{{[0-9]+:[0-9]+}}], vcc, s[{{[0-9]+:[0-9]+}}]
+define void @s_or_i1(i1 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
+ %cmp0 = icmp eq i32 %a, %b
+ %cmp1 = icmp eq i32 %c, %d
+ %or = or i1 %cmp0, %cmp1
+ store i1 %or, i1 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/private-memory-atomics.ll b/test/CodeGen/R600/private-memory-atomics.ll
index def4f9d..3ceb0c0 100644
--- a/test/CodeGen/R600/private-memory-atomics.ll
+++ b/test/CodeGen/R600/private-memory-atomics.ll
@@ -1,4 +1,5 @@
-; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=SI < %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s
; This works because promote allocas pass replaces these with LDS atomics.
diff --git a/test/CodeGen/R600/private-memory-broken.ll b/test/CodeGen/R600/private-memory-broken.ll
index 4086085..10590a9 100644
--- a/test/CodeGen/R600/private-memory-broken.ll
+++ b/test/CodeGen/R600/private-memory-broken.ll
@@ -1,4 +1,5 @@
-; RUN: not llc -verify-machineinstrs -march=r600 -mcpu=SI %s -o /dev/null 2>&1 | FileCheck %s
+; RUN: not llc -verify-machineinstrs -march=amdgcn -mcpu=SI %s -o /dev/null 2>&1 | FileCheck %s
+; RUN: not llc -verify-machineinstrs -march=amdgcn -mcpu=tonga %s -o /dev/null 2>&1 | FileCheck %s
; Make sure promote alloca pass doesn't crash
diff --git a/test/CodeGen/R600/private-memory.ll b/test/CodeGen/R600/private-memory.ll
index bfb4a6a..b03029c 100644
--- a/test/CodeGen/R600/private-memory.ll
+++ b/test/CodeGen/R600/private-memory.ll
@@ -1,6 +1,8 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
-; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
-; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -march=amdgcn -mcpu=SI < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -march=amdgcn -mcpu=SI < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
+; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
@@ -117,7 +119,7 @@ for.end:
; R600: MOVA_INT
; SI-PROMOTE-DAG: buffer_store_short v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen ; encoding: [0x00,0x10,0x68,0xe0
-; SI-PROMOTE-DAG: buffer_store_short v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:0x2 ; encoding: [0x02,0x10,0x68,0xe0
+; SI-PROMOTE-DAG: buffer_store_short v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:2 ; encoding: [0x02,0x10,0x68,0xe0
; SI-PROMOTE: buffer_load_sshort v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}}
define void @short_array(i32 addrspace(1)* %out, i32 %index) {
entry:
@@ -138,7 +140,7 @@ entry:
; R600: MOVA_INT
; SI-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen ; encoding: [0x00,0x10,0x60,0xe0
-; SI-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:0x1 ; encoding: [0x01,0x10,0x60,0xe0
+; SI-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:1 ; encoding: [0x01,0x10,0x60,0xe0
define void @char_array(i32 addrspace(1)* %out, i32 %index) {
entry:
%0 = alloca [2 x i8]
@@ -296,7 +298,7 @@ entry:
; FUNC-LABEL: ptrtoint:
; SI-NOT: ds_write
; SI: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen
-; SI: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:0x5
+; SI: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:5
define void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%alloca = alloca [16 x i32]
%tmp0 = getelementptr [16 x i32]* %alloca, i32 0, i32 %a
diff --git a/test/CodeGen/R600/r600-encoding.ll b/test/CodeGen/R600/r600-encoding.ll
index 112cdac..3a82ee3 100644
--- a/test/CodeGen/R600/r600-encoding.ll
+++ b/test/CodeGen/R600/r600-encoding.ll
@@ -1,14 +1,14 @@
-; RUN: llc < %s -march=r600 -show-mc-encoding -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-; RUN: llc < %s -march=r600 -show-mc-encoding -mcpu=rs880 | FileCheck --check-prefix=R600-CHECK %s
+; RUN: llc < %s -march=r600 -show-mc-encoding -mcpu=redwood | FileCheck --check-prefix=EG %s
+; RUN: llc < %s -march=r600 -show-mc-encoding -mcpu=rs880 | FileCheck --check-prefix=R600 %s
; The earliest R600 GPUs have a slightly different encoding than the rest of
; the VLIW4/5 GPUs.
-; EG-CHECK: {{^}}test:
-; EG-CHECK: MUL_IEEE {{[ *TXYZWPVxyzw.,0-9]+}} ; encoding: [{{0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x10,0x01,0x[0-9a-f]+,0x[0-9a-f]+}}]
+; EG: {{^}}test:
+; EG: MUL_IEEE {{[ *TXYZWPVxyzw.,0-9]+}} ; encoding: [{{0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x10,0x01,0x[0-9a-f]+,0x[0-9a-f]+}}]
-; R600-CHECK: {{^}}test:
-; R600-CHECK: MUL_IEEE {{[ *TXYZWPVxyzw.,0-9]+}} ; encoding: [{{0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x10,0x02,0x[0-9a-f]+,0x[0-9a-f]+}}]
+; R600: {{^}}test:
+; R600: MUL_IEEE {{[ *TXYZWPVxyzw.,0-9]+}} ; encoding: [{{0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x10,0x02,0x[0-9a-f]+,0x[0-9a-f]+}}]
define void @test(<4 x float> inreg %reg0) #0 {
entry:
diff --git a/test/CodeGen/R600/register-count-comments.ll b/test/CodeGen/R600/register-count-comments.ll
index 61d1b5e..2b49f97 100644
--- a/test/CodeGen/R600/register-count-comments.ll
+++ b/test/CodeGen/R600/register-count-comments.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs -asm-verbose < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs -asm-verbose < %s | FileCheck -check-prefix=SI %s
declare i32 @llvm.SI.tid() nounwind readnone
diff --git a/test/CodeGen/R600/reorder-stores.ll b/test/CodeGen/R600/reorder-stores.ll
index 30c0171..ea50d5e 100644
--- a/test/CodeGen/R600/reorder-stores.ll
+++ b/test/CodeGen/R600/reorder-stores.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}no_reorder_v2f64_global_load_store:
; SI: buffer_load_dwordx2
diff --git a/test/CodeGen/R600/rotl.i64.ll b/test/CodeGen/R600/rotl.i64.ll
index 84a35b6..6da17a4 100644
--- a/test/CodeGen/R600/rotl.i64.ll
+++ b/test/CodeGen/R600/rotl.i64.ll
@@ -1,11 +1,12 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=BOTH %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=BOTH %s
-; FUNC-LABEL: {{^}}s_rotl_i64:
-; SI-DAG: s_lshl_b64
-; SI-DAG: s_sub_i32
-; SI-DAG: s_lshr_b64
-; SI: s_or_b64
-; SI: s_endpgm
+; BOTH-LABEL: {{^}}s_rotl_i64:
+; BOTH-DAG: s_lshl_b64
+; BOTH-DAG: s_sub_i32
+; BOTH-DAG: s_lshr_b64
+; BOTH: s_or_b64
+; BOTH: s_endpgm
define void @s_rotl_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
entry:
%0 = shl i64 %x, %y
@@ -16,13 +17,15 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}v_rotl_i64:
+; BOTH-LABEL: {{^}}v_rotl_i64:
; SI-DAG: v_lshl_b64
-; SI-DAG: v_sub_i32
+; VI-DAG: v_lshlrev_b64
+; BOTH-DAG: v_sub_i32
; SI: v_lshr_b64
-; SI: v_or_b32
-; SI: v_or_b32
-; SI: s_endpgm
+; VI: v_lshrrev_b64
+; BOTH: v_or_b32
+; BOTH: v_or_b32
+; BOTH: s_endpgm
define void @v_rotl_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
entry:
%x = load i64 addrspace(1)* %xptr, align 8
diff --git a/test/CodeGen/R600/rotl.ll b/test/CodeGen/R600/rotl.ll
index 6c8e503..6c144cd 100644
--- a/test/CodeGen/R600/rotl.ll
+++ b/test/CodeGen/R600/rotl.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}rotl_i32:
; R600: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x
diff --git a/test/CodeGen/R600/rotr.i64.ll b/test/CodeGen/R600/rotr.i64.ll
index 9e14570..f1d1d26 100644
--- a/test/CodeGen/R600/rotr.i64.ll
+++ b/test/CodeGen/R600/rotr.i64.ll
@@ -1,10 +1,11 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=BOTH %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=BOTH %s
-; FUNC-LABEL: {{^}}s_rotr_i64:
-; SI-DAG: s_sub_i32
-; SI-DAG: s_lshr_b64
-; SI-DAG: s_lshl_b64
-; SI: s_or_b64
+; BOTH-LABEL: {{^}}s_rotr_i64:
+; BOTH-DAG: s_sub_i32
+; BOTH-DAG: s_lshr_b64
+; BOTH-DAG: s_lshl_b64
+; BOTH: s_or_b64
define void @s_rotr_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
entry:
%tmp0 = sub i64 64, %y
@@ -15,12 +16,14 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}v_rotr_i64:
-; SI-DAG: v_sub_i32
+; BOTH-LABEL: {{^}}v_rotr_i64:
+; BOTH-DAG: v_sub_i32
; SI-DAG: v_lshr_b64
; SI-DAG: v_lshl_b64
-; SI: v_or_b32
-; SI: v_or_b32
+; VI-DAG: v_lshrrev_b64
+; VI-DAG: v_lshlrev_b64
+; BOTH: v_or_b32
+; BOTH: v_or_b32
define void @v_rotr_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
entry:
%x = load i64 addrspace(1)* %xptr, align 8
@@ -33,7 +36,7 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}s_rotr_v2i64:
+; BOTH-LABEL: {{^}}s_rotr_v2i64:
define void @s_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> %x, <2 x i64> %y) {
entry:
%tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
@@ -44,7 +47,7 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}v_rotr_v2i64:
+; BOTH-LABEL: {{^}}v_rotr_v2i64:
define void @v_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> addrspace(1)* %xptr, <2 x i64> addrspace(1)* %yptr) {
entry:
%x = load <2 x i64> addrspace(1)* %xptr, align 8
diff --git a/test/CodeGen/R600/rotr.ll b/test/CodeGen/R600/rotr.ll
index a1add11..044f9ff 100644
--- a/test/CodeGen/R600/rotr.ll
+++ b/test/CodeGen/R600/rotr.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}rotr_i32:
; R600: BIT_ALIGN_INT
diff --git a/test/CodeGen/R600/rsq.ll b/test/CodeGen/R600/rsq.ll
index d792c9f..b8a23df 100644
--- a/test/CodeGen/R600/rsq.ll
+++ b/test/CodeGen/R600/rsq.ll
@@ -1,6 +1,7 @@
-; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=SI-UNSAFE -check-prefix=SI %s
-; RUN: llc -march=r600 -mcpu=SI -mattr=-fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=-fp32-denormals -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=SI-UNSAFE -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -mattr=-fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI %s
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
declare float @llvm.sqrt.f32(float) nounwind readnone
declare double @llvm.sqrt.f64(double) nounwind readnone
@@ -36,3 +37,38 @@ define void @rsq_f32_sgpr(float addrspace(1)* noalias %out, float %val) nounwind
store float %div, float addrspace(1)* %out, align 4
ret void
}
+
+; Recognize that this is rsqrt(a) * rcp(b) * c,
+; not 1 / ( 1 / sqrt(a)) * rcp(b) * c.
+
+; SI-LABEL: @rsqrt_fmul
+; SI-DAG: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
+; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; SI-DAG: buffer_load_dword [[C:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8
+
+; SI-UNSAFE-DAG: v_rsq_f32_e32 [[RSQA:v[0-9]+]], [[A]]
+; SI-UNSAFE-DAG: v_rcp_f32_e32 [[RCPB:v[0-9]+]], [[B]]
+; SI-UNSAFE-DAG: v_mul_f32_e32 [[TMP:v[0-9]+]], [[RCPB]], [[RSQA]]
+; SI-UNSAFE: v_mul_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[C]]
+; SI-UNSAFE: buffer_store_dword [[RESULT]]
+
+; SI-SAFE-NOT: v_rsq_f32
+
+; SI: s_endpgm
+define void @rsqrt_fmul(float addrspace(1)* %out, float addrspace(1)* %in) {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+
+ %a = load float addrspace(1)* %gep.0
+ %b = load float addrspace(1)* %gep.1
+ %c = load float addrspace(1)* %gep.2
+
+ %x = call float @llvm.sqrt.f32(float %a)
+ %y = fmul float %x, %b
+ %z = fdiv float %c, %y
+ store float %z, float addrspace(1)* %out.gep
+ ret void
+}
diff --git a/test/CodeGen/R600/s_movk_i32.ll b/test/CodeGen/R600/s_movk_i32.ll
index 71f9a41..8be2d1d 100644
--- a/test/CodeGen/R600/s_movk_i32.ll
+++ b/test/CodeGen/R600/s_movk_i32.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}s_movk_i32_k0:
; SI-DAG: s_mov_b32 [[LO_S_IMM:s[0-9]+]], 0xffff{{$}}
diff --git a/test/CodeGen/R600/saddo.ll b/test/CodeGen/R600/saddo.ll
index 654967c..8e625c1 100644
--- a/test/CodeGen/R600/saddo.ll
+++ b/test/CodeGen/R600/saddo.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
diff --git a/test/CodeGen/R600/salu-to-valu.ll b/test/CodeGen/R600/salu-to-valu.ll
index 23af3e4..dfb181d 100644
--- a/test/CodeGen/R600/salu-to-valu.ll
+++ b/test/CodeGen/R600/salu-to-valu.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s
; In this test both the pointer and the offset operands to the
; BUFFER_LOAD instructions end up being stored in vgprs. This
diff --git a/test/CodeGen/R600/scalar_to_vector.ll b/test/CodeGen/R600/scalar_to_vector.ll
index dc9ebe0..b82e552 100644
--- a/test/CodeGen/R600/scalar_to_vector.ll
+++ b/test/CodeGen/R600/scalar_to_vector.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}scalar_to_vector_v2i32:
diff --git a/test/CodeGen/R600/schedule-global-loads.ll b/test/CodeGen/R600/schedule-global-loads.ll
index 5422ca7..b6437d2 100644
--- a/test/CodeGen/R600/schedule-global-loads.ll
+++ b/test/CodeGen/R600/schedule-global-loads.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
declare i32 @llvm.r600.read.tidig.x() #1
@@ -10,7 +10,7 @@ declare i32 @llvm.r600.read.tidig.x() #1
; FUNC-LABEL: {{^}}cluster_global_arg_loads:
; SI-DAG: buffer_load_dword [[REG0:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
-; SI-DAG: buffer_load_dword [[REG1:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:0x4
+; SI-DAG: buffer_load_dword [[REG1:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:4
; SI: buffer_store_dword [[REG0]]
; SI: buffer_store_dword [[REG1]]
define void @cluster_global_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %ptr) #0 {
diff --git a/test/CodeGen/R600/schedule-kernel-arg-loads.ll b/test/CodeGen/R600/schedule-kernel-arg-loads.ll
index e774157..f9641fa 100644
--- a/test/CodeGen/R600/schedule-kernel-arg-loads.ll
+++ b/test/CodeGen/R600/schedule-kernel-arg-loads.ll
@@ -1,10 +1,18 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=VI %s
; FUNC-LABEL: {{^}}cluster_arg_loads:
; SI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x9
; SI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xe
+; VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24
+; VI-NEXT: s_nop 0
+; VI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI-NEXT: s_nop 0
+; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34
+; VI-NEXT: s_nop 0
+; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x38
define void @cluster_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) nounwind {
store i32 %x, i32 addrspace(1)* %out0, align 4
store i32 %y, i32 addrspace(1)* %out1, align 4
diff --git a/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll b/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
index baac5b5..76b655d 100644
--- a/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
+++ b/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
@@ -1,6 +1,7 @@
; XFAIL: *
; REQUIRES: asserts
-; RUN: llc -O0 -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck %s -check-prefix=SI
+; RUN: llc -O0 -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck %s -check-prefix=SI
+; RUN: llc -O0 -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck %s -check-prefix=SI
declare void @llvm.AMDGPU.barrier.local() nounwind noduplicate
diff --git a/test/CodeGen/R600/scratch-buffer.ll b/test/CodeGen/R600/scratch-buffer.ll
new file mode 100644
index 0000000..8c5a990
--- /dev/null
+++ b/test/CodeGen/R600/scratch-buffer.ll
@@ -0,0 +1,87 @@
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=SI < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck %s
+
+; When a frame index offset is more than 12-bits, make sure we don't store
+; it in mubuf's offset field.
+
+; Also, make sure we use the same register for storing the scratch buffer addresss
+; for both stores. This register is allocated by the register scavenger, so we
+; should be able to reuse the same regiser for each scratch buffer access.
+
+; CHECK-LABEL: {{^}}legal_offset_fi:
+; CHECK: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0{{$}}
+; CHECK: buffer_store_dword v{{[0-9]+}}, [[OFFSET]], s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offen
+; CHECK: v_mov_b32_e32 [[OFFSET]], 0x8000
+; CHECK: buffer_store_dword v{{[0-9]+}}, [[OFFSET]], s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offen{{$}}
+
+define void @legal_offset_fi(i32 addrspace(1)* %out, i32 %cond, i32 %if_offset, i32 %else_offset) {
+entry:
+ %scratch0 = alloca [8192 x i32]
+ %scratch1 = alloca [8192 x i32]
+
+ %scratchptr0 = getelementptr [8192 x i32]* %scratch0, i32 0, i32 0
+ store i32 1, i32* %scratchptr0
+
+ %scratchptr1 = getelementptr [8192 x i32]* %scratch1, i32 0, i32 0
+ store i32 2, i32* %scratchptr1
+
+ %cmp = icmp eq i32 %cond, 0
+ br i1 %cmp, label %if, label %else
+
+if:
+ %if_ptr = getelementptr [8192 x i32]* %scratch0, i32 0, i32 %if_offset
+ %if_value = load i32* %if_ptr
+ br label %done
+
+else:
+ %else_ptr = getelementptr [8192 x i32]* %scratch1, i32 0, i32 %else_offset
+ %else_value = load i32* %else_ptr
+ br label %done
+
+done:
+ %value = phi i32 [%if_value, %if], [%else_value, %else]
+ store i32 %value, i32 addrspace(1)* %out
+ ret void
+
+ ret void
+
+}
+
+; CHECK-LABEL: {{^}}legal_offset_fi_offset
+; CHECK: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offen
+; CHECK: v_add_i32_e32 [[OFFSET:v[0-9]+]], 0x8000
+; CHECK: buffer_store_dword v{{[0-9]+}}, [[OFFSET]], s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offen{{$}}
+
+define void @legal_offset_fi_offset(i32 addrspace(1)* %out, i32 %cond, i32 addrspace(1)* %offsets, i32 %if_offset, i32 %else_offset) {
+entry:
+ %scratch0 = alloca [8192 x i32]
+ %scratch1 = alloca [8192 x i32]
+
+ %offset0 = load i32 addrspace(1)* %offsets
+ %scratchptr0 = getelementptr [8192 x i32]* %scratch0, i32 0, i32 %offset0
+ store i32 %offset0, i32* %scratchptr0
+
+ %offsetptr1 = getelementptr i32 addrspace(1)* %offsets, i32 1
+ %offset1 = load i32 addrspace(1)* %offsetptr1
+ %scratchptr1 = getelementptr [8192 x i32]* %scratch1, i32 0, i32 %offset1
+ store i32 %offset1, i32* %scratchptr1
+
+ %cmp = icmp eq i32 %cond, 0
+ br i1 %cmp, label %if, label %else
+
+if:
+ %if_ptr = getelementptr [8192 x i32]* %scratch0, i32 0, i32 %if_offset
+ %if_value = load i32* %if_ptr
+ br label %done
+
+else:
+ %else_ptr = getelementptr [8192 x i32]* %scratch1, i32 0, i32 %else_offset
+ %else_value = load i32* %else_ptr
+ br label %done
+
+done:
+ %value = phi i32 [%if_value, %if], [%else_value, %else]
+ store i32 %value, i32 addrspace(1)* %out
+ ret void
+}
+
diff --git a/test/CodeGen/R600/sdiv.ll b/test/CodeGen/R600/sdiv.ll
index 16853e0..07bb417 100644
--- a/test/CodeGen/R600/sdiv.ll
+++ b/test/CodeGen/R600/sdiv.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; The code generated by sdiv is long and complex and may frequently change.
@@ -35,7 +36,7 @@ define void @sdiv_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; FUNC-LABEL: {{^}}slow_sdiv_i32_3435:
; SI: buffer_load_dword [[VAL:v[0-9]+]],
; SI: v_mov_b32_e32 [[MAGIC:v[0-9]+]], 0x98a1930b
-; SI: v_mul_hi_i32 [[TMP:v[0-9]+]], [[VAL]], [[MAGIC]]
+; SI: v_mul_hi_i32 [[TMP:v[0-9]+]], [[MAGIC]], [[VAL]]
; SI: v_add_i32
; SI: v_lshrrev_b32
; SI: v_ashrrev_i32
diff --git a/test/CodeGen/R600/sdivrem24.ll b/test/CodeGen/R600/sdivrem24.ll
index 228cf76..e8c5c25 100644
--- a/test/CodeGen/R600/sdivrem24.ll
+++ b/test/CodeGen/R600/sdivrem24.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}sdiv24_i8:
diff --git a/test/CodeGen/R600/sdivrem64.ll b/test/CodeGen/R600/sdivrem64.ll
new file mode 100644
index 0000000..a9b2b7f
--- /dev/null
+++ b/test/CodeGen/R600/sdivrem64.ll
@@ -0,0 +1,225 @@
+;RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck --check-prefix=SI --check-prefix=GCN --check-prefix=FUNC %s
+;RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck --check-prefix=VI --check-prefix=GCN --check-prefix=FUNC %s
+;RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=EG --check-prefix=FUNC %s
+
+;FUNC-LABEL: {{^}}test_sdiv:
+;EG: RECIP_UINT
+;EG: LSHL {{.*}}, 1,
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN: v_bfe_u32
+;GCN-NOT: v_mad_f32
+;SI-NOT: v_lshr_b64
+;VI-NOT: v_lshrrev_b64
+;GCN: s_endpgm
+define void @test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+ %result = sdiv i64 %x, %y
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: {{^}}test_srem:
+;EG: RECIP_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: BFE_UINT
+;EG: AND_INT {{.*}}, 1,
+
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN-NOT: v_mad_f32
+;SI-NOT: v_lshr_b64
+;VI-NOT: v_lshrrev_b64
+;GCN: s_endpgm
+define void @test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+ %result = urem i64 %x, %y
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: {{^}}test_sdiv3264:
+;EG: RECIP_UINT
+;EG-NOT: BFE_UINT
+
+;GCN-NOT: s_bfe_u32
+;GCN-NOT: v_mad_f32
+;SI-NOT: v_lshr_b64
+;VI-NOT: v_lshrrev_b64
+;GCN: s_endpgm
+define void @test_sdiv3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+ %1 = ashr i64 %x, 33
+ %2 = ashr i64 %y, 33
+ %result = sdiv i64 %1, %2
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: {{^}}test_srem3264:
+;EG: RECIP_UINT
+;EG-NOT: BFE_UINT
+
+;GCN-NOT: s_bfe_u32
+;GCN-NOT: v_mad_f32
+;SI-NOT: v_lshr_b64
+;VI-NOT: v_lshrrev_b64
+;GCN: s_endpgm
+define void @test_srem3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+ %1 = ashr i64 %x, 33
+ %2 = ashr i64 %y, 33
+ %result = srem i64 %1, %2
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: {{^}}test_sdiv2464:
+;EG: INT_TO_FLT
+;EG: INT_TO_FLT
+;EG: FLT_TO_INT
+;EG-NOT: RECIP_UINT
+;EG-NOT: BFE_UINT
+
+;GCN-NOT: s_bfe_u32
+;GCN: v_mad_f32
+;SI-NOT: v_lshr_b64
+;VI-NOT: v_lshrrev_b64
+;GCN: s_endpgm
+define void @test_sdiv2464(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+ %1 = ashr i64 %x, 40
+ %2 = ashr i64 %y, 40
+ %result = sdiv i64 %1, %2
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: {{^}}test_srem2464:
+;EG: INT_TO_FLT
+;EG: INT_TO_FLT
+;EG: FLT_TO_INT
+;EG-NOT: RECIP_UINT
+;EG-NOT: BFE_UINT
+
+;GCN-NOT: s_bfe_u32
+;GCN: v_mad_f32
+;SI-NOT: v_lshr_b64
+;VI-NOT: v_lshrrev_b64
+;GCN: s_endpgm
+define void @test_srem2464(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+ %1 = ashr i64 %x, 40
+ %2 = ashr i64 %y, 40
+ %result = srem i64 %1, %2
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/select-i1.ll b/test/CodeGen/R600/select-i1.ll
index 2e2d0e4..6735394 100644
--- a/test/CodeGen/R600/select-i1.ll
+++ b/test/CodeGen/R600/select-i1.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FIXME: This should go in existing select.ll test, except the current testcase there is broken on SI
diff --git a/test/CodeGen/R600/select-vectors.ll b/test/CodeGen/R600/select-vectors.ll
index 7d8df2e..59082c6 100644
--- a/test/CodeGen/R600/select-vectors.ll
+++ b/test/CodeGen/R600/select-vectors.ll
@@ -1,4 +1,5 @@
-; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; Test expansion of scalar selects on vectors.
; Evergreen not enabled since it seems to be having problems with doubles.
diff --git a/test/CodeGen/R600/select64.ll b/test/CodeGen/R600/select64.ll
index 8de34d5..0245dae 100644
--- a/test/CodeGen/R600/select64.ll
+++ b/test/CodeGen/R600/select64.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
; CHECK-LABEL: {{^}}select0:
; i64 select should be split into two i32 selects, and we shouldn't need
@@ -48,3 +49,20 @@ define void @v_select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 addrspa
store i32 %trunc, i32 addrspace(1)* %out, align 4
ret void
}
+
+; CHECK-LABEL: {{^}}v_select_i64_split_imm:
+; CHECK: s_mov_b32 [[SHI:s[0-9]+]], 63
+; CHECK: s_mov_b32 [[SLO:s[0-9]+]], 0
+; CHECK-DAG: v_mov_b32_e32 [[VHI:v[0-9]+]], [[SHI]]
+; CHECK-DAG: v_mov_b32_e32 [[VLO:v[0-9]+]], [[SLO]]
+; CHECK-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, [[VLO]], {{v[0-9]+}}
+; CHECK-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, [[VHI]], {{v[0-9]+}}
+; CHECK: s_endpgm
+define void @v_select_i64_split_imm(i64 addrspace(1)* %out, i32 %cond, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
+ %cmp = icmp ugt i32 %cond, 5
+ %a = load i64 addrspace(1)* %aptr, align 8
+ %b = load i64 addrspace(1)* %bptr, align 8
+ %sel = select i1 %cmp, i64 %a, i64 270582939648 ; 63 << 32
+ store i64 %sel, i64 addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/selectcc-opt.ll b/test/CodeGen/R600/selectcc-opt.ll
index 82577bb..7780371 100644
--- a/test/CodeGen/R600/selectcc-opt.ll
+++ b/test/CodeGen/R600/selectcc-opt.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
diff --git a/test/CodeGen/R600/selectcc.ll b/test/CodeGen/R600/selectcc.ll
index 5a09b5c..f378e15 100644
--- a/test/CodeGen/R600/selectcc.ll
+++ b/test/CodeGen/R600/selectcc.ll
@@ -1,5 +1,6 @@
; RUN: llc -verify-machineinstrs -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}selectcc_i64:
; EG: XOR_INT
diff --git a/test/CodeGen/R600/setcc-opt.ll b/test/CodeGen/R600/setcc-opt.ll
index af48df8..93860f5 100644
--- a/test/CodeGen/R600/setcc-opt.ll
+++ b/test/CodeGen/R600/setcc-opt.ll
@@ -1,15 +1,236 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; SI-LABEL: {{^}}sext_bool_icmp_ne:
-; SI: v_cmp_ne_i32
-; SI-NEXT: v_cndmask_b32
-; SI-NOT: v_cmp_ne_i32
-; SI-NOT: v_cndmask_b32
-; SI: s_endpgm
-define void @sext_bool_icmp_ne(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+; FUNC-LABEL: {{^}}sext_bool_icmp_eq_0:
+; GCN-NOT: v_cmp
+; GCN: v_cmp_ne_i32_e32 vcc,
+; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
+; GCN-NEXT:buffer_store_byte [[RESULT]]
+; GCN-NEXT: s_endpgm
+
+; EG: SETNE_INT * [[CMP:T[0-9]+]].[[CMPCHAN:[XYZW]]], KC0[2].Z, KC0[2].W
+; EG: AND_INT T{{[0-9]+.[XYZW]}}, PS, 1
+define void @sext_bool_icmp_eq_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp eq i32 %a, %b
+ %ext = sext i1 %icmp0 to i32
+ %icmp1 = icmp eq i32 %ext, 0
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sext_bool_icmp_ne_0:
+; GCN-NOT: v_cmp
+; GCN: v_cmp_ne_i32_e32 vcc,
+; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
+; GCN-NEXT: buffer_store_byte [[RESULT]]
+; GCN-NEXT: s_endpgm
+
+; EG: SETNE_INT * [[CMP:T[0-9]+]].[[CMPCHAN:[XYZW]]], KC0[2].Z, KC0[2].W
+; EG: AND_INT T{{[0-9]+.[XYZW]}}, PS, 1
+define void @sext_bool_icmp_ne_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%icmp0 = icmp ne i32 %a, %b
%ext = sext i1 %icmp0 to i32
%icmp1 = icmp ne i32 %ext, 0
store i1 %icmp1, i1 addrspace(1)* %out
ret void
}
+
+; This really folds away to false
+; FUNC-LABEL: {{^}}sext_bool_icmp_eq_1:
+; GCN: v_cmp_eq_i32_e32 vcc,
+; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc
+; GCN-NEXT: v_cmp_eq_i32_e64 {{s\[[0-9]+:[0-9]+\]}}, [[TMP]], 1{{$}}
+; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1,
+; GCN-NEXT: buffer_store_byte [[TMP]]
+; GCN-NEXT: s_endpgm
+define void @sext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp eq i32 %a, %b
+ %ext = sext i1 %icmp0 to i32
+ %icmp1 = icmp eq i32 %ext, 1
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
+
+; This really folds away to true
+; FUNC-LABEL: {{^}}sext_bool_icmp_ne_1:
+; GCN: v_cmp_ne_i32_e32 vcc,
+; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc
+; GCN-NEXT: v_cmp_ne_i32_e64 {{s\[[0-9]+:[0-9]+\]}}, [[TMP]], 1{{$}}
+; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1,
+; GCN-NEXT: buffer_store_byte [[TMP]]
+; GCN-NEXT: s_endpgm
+define void @sext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp ne i32 %a, %b
+ %ext = sext i1 %icmp0 to i32
+ %icmp1 = icmp ne i32 %ext, 1
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zext_bool_icmp_eq_0:
+; GCN-NOT: v_cmp
+; GCN: v_cmp_ne_i32_e32 vcc,
+; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
+; GCN-NEXT: buffer_store_byte [[RESULT]]
+; GCN-NEXT: s_endpgm
+define void @zext_bool_icmp_eq_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp eq i32 %a, %b
+ %ext = zext i1 %icmp0 to i32
+ %icmp1 = icmp eq i32 %ext, 0
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zext_bool_icmp_ne_0:
+; GCN-NOT: v_cmp
+; GCN: v_cmp_ne_i32_e32 vcc,
+; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
+; GCN-NEXT: buffer_store_byte [[RESULT]]
+; GCN-NEXT: s_endpgm
+define void @zext_bool_icmp_ne_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp ne i32 %a, %b
+ %ext = zext i1 %icmp0 to i32
+ %icmp1 = icmp ne i32 %ext, 0
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zext_bool_icmp_eq_1:
+; GCN-NOT: v_cmp
+; GCN: v_cmp_eq_i32_e32 vcc,
+; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
+; GCN-NEXT: buffer_store_byte [[RESULT]]
+; GCN-NEXT: s_endpgm
+define void @zext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp eq i32 %a, %b
+ %ext = zext i1 %icmp0 to i32
+ %icmp1 = icmp eq i32 %ext, 1
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zext_bool_icmp_ne_1:
+; GCN-NOT: v_cmp
+; GCN: v_cmp_eq_i32_e32 vcc,
+; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
+; GCN-NEXT: buffer_store_byte [[RESULT]]
+define void @zext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp ne i32 %a, %b
+ %ext = zext i1 %icmp0 to i32
+ %icmp1 = icmp ne i32 %ext, 1
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sext_bool_icmp_ne_k:
+; SI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
+; VI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30
+; GCN: v_mov_b32_e32 [[VB:v[0-9]+]], [[B]]
+; GCN: v_cmp_ne_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[VB]], 2{{$}}
+; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[CMP]]
+; GCN: buffer_store_byte
+; GCN: s_endpgm
+define void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp ne i32 %a, %b
+ %ext = sext i1 %icmp0 to i32
+ %icmp1 = icmp ne i32 %ext, 2
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}cmp_zext_k_i8max:
+; GCN: buffer_load_ubyte [[B:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:44
+; GCN: v_mov_b32_e32 [[K255:v[0-9]+]], 0xff{{$}}
+; GCN: v_cmp_ne_i32_e32 vcc, [[B]], [[K255]]
+; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
+; GCN-NEXT: buffer_store_byte [[RESULT]]
+; GCN: s_endpgm
+define void @cmp_zext_k_i8max(i1 addrspace(1)* %out, i8 %b) nounwind {
+ %b.ext = zext i8 %b to i32
+ %icmp0 = icmp ne i32 %b.ext, 255
+ store i1 %icmp0, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}cmp_sext_k_neg1:
+; GCN: buffer_load_sbyte [[B:v[0-9]+]]
+; GCN: v_cmp_ne_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[B]], -1{{$}}
+; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[CMP]]
+; GCN-NEXT: buffer_store_byte [[RESULT]]
+; GCN: s_endpgm
+define void @cmp_sext_k_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %b.ptr) nounwind {
+ %b = load i8 addrspace(1)* %b.ptr
+ %b.ext = sext i8 %b to i32
+ %icmp0 = icmp ne i32 %b.ext, -1
+ store i1 %icmp0, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}cmp_sext_k_neg1_i8_sext_arg:
+; GCN: s_load_dword [[B:s[0-9]+]]
+; GCN: v_cmp_ne_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[B]], -1{{$}}
+; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[CMP]]
+; GCN-NEXT: buffer_store_byte [[RESULT]]
+; GCN: s_endpgm
+define void @cmp_sext_k_neg1_i8_sext_arg(i1 addrspace(1)* %out, i8 signext %b) nounwind {
+ %b.ext = sext i8 %b to i32
+ %icmp0 = icmp ne i32 %b.ext, -1
+ store i1 %icmp0, i1 addrspace(1)* %out
+ ret void
+}
+
+; FIXME: This ends up doing a buffer_load_ubyte, and and compare to
+; 255. Seems to be because of ordering problems when not allowing load widths to be reduced.
+; Should do a buffer_load_sbyte and compare with -1
+
+; FUNC-LABEL: {{^}}cmp_sext_k_neg1_i8_arg:
+; GCN-DAG: buffer_load_ubyte [[B:v[0-9]+]]
+; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0xff{{$}}
+; GCN: v_cmp_ne_i32_e32 vcc, [[B]], [[K]]{{$}}
+; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
+; GCN-NEXT: buffer_store_byte [[RESULT]]
+; GCN: s_endpgm
+define void @cmp_sext_k_neg1_i8_arg(i1 addrspace(1)* %out, i8 %b) nounwind {
+ %b.ext = sext i8 %b to i32
+ %icmp0 = icmp ne i32 %b.ext, -1
+ store i1 %icmp0, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}cmp_zext_k_neg1:
+; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], 1{{$}}
+; GCN: buffer_store_byte [[RESULT]]
+; GCN: s_endpgm
+define void @cmp_zext_k_neg1(i1 addrspace(1)* %out, i8 %b) nounwind {
+ %b.ext = zext i8 %b to i32
+ %icmp0 = icmp ne i32 %b.ext, -1
+ store i1 %icmp0, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zext_bool_icmp_ne_k:
+; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], 1{{$}}
+; GCN: buffer_store_byte [[RESULT]]
+; GCN-NEXT: s_endpgm
+define void @zext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp ne i32 %a, %b
+ %ext = zext i1 %icmp0 to i32
+ %icmp1 = icmp ne i32 %ext, 2
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zext_bool_icmp_eq_k:
+; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
+; GCN: buffer_store_byte [[RESULT]]
+; GCN-NEXT: s_endpgm
+define void @zext_bool_icmp_eq_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp ne i32 %a, %b
+ %ext = zext i1 %icmp0 to i32
+ %icmp1 = icmp eq i32 %ext, 2
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/setcc.ll b/test/CodeGen/R600/setcc.ll
index 8dd2ce4..f9c7e4f 100644
--- a/test/CodeGen/R600/setcc.ll
+++ b/test/CodeGen/R600/setcc.ll
@@ -1,5 +1,7 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600 --check-prefix=FUNC %s
-;RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600 --check-prefix=FUNC %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
; FUNC-LABEL: {{^}}setcc_v2i32:
; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW]}}, KC0[3].X, KC0[3].Z
@@ -94,11 +96,9 @@ entry:
; R600-DAG: SETNE_DX10
; R600-DAG: AND_INT
; R600-DAG: SETNE_INT
-; SI: v_cmp_o_f32
-; SI: v_cmp_neq_f32
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
-; SI: v_and_b32_e32
+
+; SI: v_cmp_lg_f32_e32 vcc
+; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
define void @f32_one(i32 addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fcmp one float %a, %b
@@ -128,11 +128,9 @@ entry:
; R600-DAG: SETE_DX10
; R600-DAG: OR_INT
; R600-DAG: SETNE_INT
-; SI: v_cmp_u_f32
-; SI: v_cmp_eq_f32
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
-; SI: v_or_b32_e32
+
+; SI: v_cmp_nlg_f32_e32 vcc
+; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
define void @f32_ueq(i32 addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fcmp ueq float %a, %b
@@ -144,11 +142,8 @@ entry:
; FUNC-LABEL: {{^}}f32_ugt:
; R600: SETGE
; R600: SETE_DX10
-; SI: v_cmp_u_f32
-; SI: v_cmp_gt_f32
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
-; SI: v_or_b32_e32
+; SI: v_cmp_nle_f32_e32 vcc
+; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
define void @f32_ugt(i32 addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fcmp ugt float %a, %b
@@ -160,11 +155,9 @@ entry:
; FUNC-LABEL: {{^}}f32_uge:
; R600: SETGT
; R600: SETE_DX10
-; SI: v_cmp_u_f32
-; SI: v_cmp_ge_f32
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
-; SI: v_or_b32_e32
+
+; SI: v_cmp_nlt_f32_e32 vcc
+; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
define void @f32_uge(i32 addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fcmp uge float %a, %b
@@ -176,11 +169,9 @@ entry:
; FUNC-LABEL: {{^}}f32_ult:
; R600: SETGE
; R600: SETE_DX10
-; SI: v_cmp_u_f32
-; SI: v_cmp_lt_f32
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
-; SI: v_or_b32_e32
+
+; SI: v_cmp_nge_f32_e32 vcc
+; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
define void @f32_ult(i32 addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fcmp ult float %a, %b
@@ -192,11 +183,9 @@ entry:
; FUNC-LABEL: {{^}}f32_ule:
; R600: SETGT
; R600: SETE_DX10
-; SI: v_cmp_u_f32
-; SI: v_cmp_le_f32
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
-; SI: v_or_b32_e32
+
+; SI: v_cmp_ngt_f32_e32 vcc
+; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
define void @f32_ule(i32 addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fcmp ule float %a, %b
@@ -343,3 +332,46 @@ entry:
store i32 %1, i32 addrspace(1)* %out
ret void
}
+
+; FIXME: This does 4 compares
+; FUNC-LABEL: {{^}}v3i32_eq:
+; SI-DAG: v_cmp_eq_i32
+; SI-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
+; SI-DAG: v_cmp_eq_i32
+; SI-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
+; SI-DAG: v_cmp_eq_i32
+; SI-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
+; SI: s_endpgm
+define void @v3i32_eq(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %ptra, <3 x i32> addrspace(1)* %ptrb) {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.a = getelementptr <3 x i32> addrspace(1)* %ptra, i32 %tid
+ %gep.b = getelementptr <3 x i32> addrspace(1)* %ptrb, i32 %tid
+ %gep.out = getelementptr <3 x i32> addrspace(1)* %out, i32 %tid
+ %a = load <3 x i32> addrspace(1)* %gep.a
+ %b = load <3 x i32> addrspace(1)* %gep.b
+ %cmp = icmp eq <3 x i32> %a, %b
+ %ext = sext <3 x i1> %cmp to <3 x i32>
+ store <3 x i32> %ext, <3 x i32> addrspace(1)* %gep.out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v3i8_eq:
+; SI-DAG: v_cmp_eq_i32
+; SI-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
+; SI-DAG: v_cmp_eq_i32
+; SI-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
+; SI-DAG: v_cmp_eq_i32
+; SI-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
+; SI: s_endpgm
+define void @v3i8_eq(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %ptra, <3 x i8> addrspace(1)* %ptrb) {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep.a = getelementptr <3 x i8> addrspace(1)* %ptra, i32 %tid
+ %gep.b = getelementptr <3 x i8> addrspace(1)* %ptrb, i32 %tid
+ %gep.out = getelementptr <3 x i8> addrspace(1)* %out, i32 %tid
+ %a = load <3 x i8> addrspace(1)* %gep.a
+ %b = load <3 x i8> addrspace(1)* %gep.b
+ %cmp = icmp eq <3 x i8> %a, %b
+ %ext = sext <3 x i1> %cmp to <3 x i8>
+ store <3 x i8> %ext, <3 x i8> addrspace(1)* %gep.out
+ ret void
+}
diff --git a/test/CodeGen/R600/setcc64.ll b/test/CodeGen/R600/setcc64.ll
index 6e43172..231be7a 100644
--- a/test/CodeGen/R600/setcc64.ll
+++ b/test/CodeGen/R600/setcc64.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
+;RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
; XXX: Merge this into setcc, once R600 supports 64-bit operations
@@ -57,11 +58,8 @@ entry:
}
; FUNC-LABEL: {{^}}f64_one:
-; SI: v_cmp_o_f64
-; SI: v_cmp_neq_f64
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
-; SI: v_and_b32_e32
+; SI: v_cmp_lg_f64_e32 vcc
+; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
define void @f64_one(i32 addrspace(1)* %out, double %a, double %b) {
entry:
%0 = fcmp one double %a, %b
@@ -81,11 +79,8 @@ entry:
}
; FUNC-LABEL: {{^}}f64_ueq:
-; SI: v_cmp_u_f64
-; SI: v_cmp_eq_f64
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
-; SI: v_or_b32_e32
+; SI: v_cmp_nlg_f64_e32 vcc
+; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
define void @f64_ueq(i32 addrspace(1)* %out, double %a, double %b) {
entry:
%0 = fcmp ueq double %a, %b
@@ -95,11 +90,9 @@ entry:
}
; FUNC-LABEL: {{^}}f64_ugt:
-; SI: v_cmp_u_f64
-; SI: v_cmp_gt_f64
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
-; SI: v_or_b32_e32
+
+; SI: v_cmp_nle_f64_e32 vcc
+; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
define void @f64_ugt(i32 addrspace(1)* %out, double %a, double %b) {
entry:
%0 = fcmp ugt double %a, %b
@@ -109,11 +102,8 @@ entry:
}
; FUNC-LABEL: {{^}}f64_uge:
-; SI: v_cmp_u_f64
-; SI: v_cmp_ge_f64
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
-; SI: v_or_b32_e32
+; SI: v_cmp_nlt_f64_e32 vcc
+; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
define void @f64_uge(i32 addrspace(1)* %out, double %a, double %b) {
entry:
%0 = fcmp uge double %a, %b
@@ -123,11 +113,8 @@ entry:
}
; FUNC-LABEL: {{^}}f64_ult:
-; SI: v_cmp_u_f64
-; SI: v_cmp_lt_f64
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
-; SI: v_or_b32_e32
+; SI: v_cmp_nge_f64_e32 vcc
+; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
define void @f64_ult(i32 addrspace(1)* %out, double %a, double %b) {
entry:
%0 = fcmp ult double %a, %b
@@ -137,11 +124,8 @@ entry:
}
; FUNC-LABEL: {{^}}f64_ule:
-; SI: v_cmp_u_f64
-; SI: v_cmp_le_f64
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
-; SI: v_or_b32_e32
+; SI: v_cmp_ngt_f64_e32 vcc
+; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
define void @f64_ule(i32 addrspace(1)* %out, double %a, double %b) {
entry:
%0 = fcmp ule double %a, %b
diff --git a/test/CodeGen/R600/seto.ll b/test/CodeGen/R600/seto.ll
index 5fe6ff6..9b5d6b5 100644
--- a/test/CodeGen/R600/seto.ll
+++ b/test/CodeGen/R600/seto.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
; CHECK-LABEL: {{^}}main:
; CHECK: v_cmp_o_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[SREG:s[0-9]+]], [[SREG]]
diff --git a/test/CodeGen/R600/setuo.ll b/test/CodeGen/R600/setuo.ll
index a391177..76346c4 100644
--- a/test/CodeGen/R600/setuo.ll
+++ b/test/CodeGen/R600/setuo.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
; CHECK-LABEL: {{^}}main:
; CHECK: v_cmp_u_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[SREG:s[0-9]+]], [[SREG]]
diff --git a/test/CodeGen/R600/sext-in-reg.ll b/test/CodeGen/R600/sext-in-reg.ll
index d364e6b..3260179 100644
--- a/test/CodeGen/R600/sext-in-reg.ll
+++ b/test/CodeGen/R600/sext-in-reg.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
declare i32 @llvm.AMDGPU.imax(i32, i32) nounwind readnone
diff --git a/test/CodeGen/R600/sgpr-control-flow.ll b/test/CodeGen/R600/sgpr-control-flow.ll
index d8b8dff..f0236ac 100644
--- a/test/CodeGen/R600/sgpr-control-flow.ll
+++ b/test/CodeGen/R600/sgpr-control-flow.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
;
;
; Most SALU instructions ignore control flow, so we need to make sure
@@ -59,6 +59,47 @@ endif:
ret void
}
+; FIXME: Should write to different SGPR pairs instead of copying to
+; VALU for i1 phi.
+
+; SI-LABEL: {{^}}sgpr_if_else_valu_cmp_phi_br:
+; SI: buffer_load_dword [[AVAL:v[0-9]+]]
+; SI: v_cmp_lt_i32_e64 [[CMP_IF:s\[[0-9]+:[0-9]+\]]], [[AVAL]], 0
+; SI: v_cndmask_b32_e64 [[V_CMP:v[0-9]+]], 0, -1, [[CMP_IF]]
+
+; SI: BB2_1:
+; SI: buffer_load_dword [[AVAL:v[0-9]+]]
+; SI: v_cmp_eq_i32_e64 [[CMP_ELSE:s\[[0-9]+:[0-9]+\]]], [[AVAL]], 0
+; SI: v_cndmask_b32_e64 [[V_CMP]], 0, -1, [[CMP_ELSE]]
+
+; SI: v_cmp_ne_i32_e64 [[CMP_CMP:s\[[0-9]+:[0-9]+\]]], [[V_CMP]], 0
+; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP_CMP]]
+; SI: buffer_store_dword [[RESULT]]
+define void @sgpr_if_else_valu_cmp_phi_br(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
+entry:
+ %tid = call i32 @llvm.r600.read.tidig.x() #0
+ %tmp1 = icmp eq i32 %tid, 0
+ br i1 %tmp1, label %if, label %else
+
+if:
+ %gep.if = getelementptr i32 addrspace(1)* %a, i32 %tid
+ %a.val = load i32 addrspace(1)* %gep.if
+ %cmp.if = icmp eq i32 %a.val, 0
+ br label %endif
+
+else:
+ %gep.else = getelementptr i32 addrspace(1)* %b, i32 %tid
+ %b.val = load i32 addrspace(1)* %gep.else
+ %cmp.else = icmp slt i32 %b.val, 0
+ br label %endif
+
+endif:
+ %tmp4 = phi i1 [%cmp.if, %if], [%cmp.else, %else]
+ %ext = sext i1 %tmp4 to i32
+ store i32 %ext, i32 addrspace(1)* %out
+ ret void
+}
+
declare i32 @llvm.r600.read.tidig.x() #0
attributes #0 = { readnone }
diff --git a/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll b/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll
index aa97fbf..893f5a3 100644
--- a/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll
+++ b/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
; Copy VGPR -> SGPR used twice as an instruction operand, which is then
; used in an REG_SEQUENCE that also needs to be handled.
diff --git a/test/CodeGen/R600/sgpr-copy.ll b/test/CodeGen/R600/sgpr-copy.ll
index 8daf753..57cbadd 100644
--- a/test/CodeGen/R600/sgpr-copy.ll
+++ b/test/CodeGen/R600/sgpr-copy.ll
@@ -1,9 +1,10 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
; This test checks that no VGPR to SGPR copies are created by the register
; allocator.
; CHECK-LABEL: {{^}}phi1:
-; CHECK: s_buffer_load_dword [[DST:s[0-9]]], {{s\[[0-9]+:[0-9]+\]}}, 0
+; CHECK: s_buffer_load_dword [[DST:s[0-9]]], {{s\[[0-9]+:[0-9]+\]}}, 0x0
; CHECK: v_mov_b32_e32 v{{[0-9]}}, [[DST]]
define void @phi1(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
@@ -202,8 +203,8 @@ attributes #2 = { readonly }
attributes #3 = { readnone }
attributes #4 = { nounwind readonly }
-!0 = metadata !{metadata !"const", null}
-!1 = metadata !{metadata !0, metadata !0, i64 0, i32 1}
+!0 = !{!"const", null}
+!1 = !{!0, !0, i64 0, i32 1}
; Function Attrs: nounwind readnone
declare float @llvm.SI.fs.interp(i32, i32, i32, <2 x i32>) #1
@@ -267,7 +268,7 @@ endif:
ret void
}
-!2 = metadata !{metadata !"const", null, i32 1}
+!2 = !{!"const", null, i32 1}
; CHECK-LABEL: {{^}}copy1:
; CHECK: buffer_load_dword
diff --git a/test/CodeGen/R600/shl.ll b/test/CodeGen/R600/shl.ll
index 71c9fc4..f89353b 100644
--- a/test/CodeGen/R600/shl.ll
+++ b/test/CodeGen/R600/shl.ll
@@ -1,13 +1,18 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=VI %s
-;EG-CHECK: {{^}}shl_v2i32:
-;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: {{^}}shl_v2i32:
+;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: {{^}}shl_v2i32:
-;SI-CHECK: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: {{^}}shl_v2i32:
+;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+
+;VI: {{^}}shl_v2i32:
+;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
@@ -18,17 +23,23 @@ define void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in
ret void
}
-;EG-CHECK: {{^}}shl_v4i32:
-;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: {{^}}shl_v4i32:
+;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+;SI: {{^}}shl_v4i32:
+;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: {{^}}shl_v4i32:
-;SI-CHECK: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;VI: {{^}}shl_v4i32:
+;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
@@ -39,20 +50,23 @@ define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in
ret void
}
-;EG-CHECK: {{^}}shl_i64:
-;EG-CHECK: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
-;EG-CHECK: LSHR {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
-;EG-CHECK: LSHR {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
+;EG: {{^}}shl_i64:
+;EG: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
+;EG: LSHR {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
+;EG: LSHR {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
;EG_CHECK-DAG: ADD_INT {{\*? *}}[[BIGSH:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
-;EG-CHECK-DAG: LSHL {{\*? *}}[[HISMTMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], [[SHIFT]]
-;EG-CHECK-DAG: OR_INT {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], {{[[HISMTMP]]|PV.[XYZW]}}, {{[[OVERF]]|PV.[XYZW]}}
-;EG-CHECK-DAG: LSHL {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], [[OPLO]], {{PS|[[SHIFT]]}}
-;EG-CHECK-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
-;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
-;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
+;EG-DAG: LSHL {{\*? *}}[[HISMTMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], [[SHIFT]]
+;EG-DAG: OR_INT {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], {{[[HISMTMP]]|PV.[XYZW]}}, {{[[OVERF]]|PV.[XYZW]}}
+;EG-DAG: LSHL {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], [[OPLO]], {{PS|[[SHIFT]]}}
+;EG-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
+;EG-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
-;SI-CHECK: {{^}}shl_i64:
-;SI-CHECK: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI: {{^}}shl_i64:
+;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+;VI: {{^}}shl_i64:
+;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
@@ -63,31 +77,35 @@ define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
ret void
}
-;EG-CHECK: {{^}}shl_v2i64:
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHA]]
-;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHB]]
-;EG-CHECK-DAG: LSHR {{.*}}, 1
-;EG-CHECK-DAG: LSHR {{.*}}, 1
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: LSHL {{.*}}, [[SHA]]
-;EG-CHECK-DAG: LSHL {{.*}}, [[SHB]]
-;EG-CHECK-DAG: LSHL {{.*}}, [[SHA]]
-;EG-CHECK-DAG: LSHL {{.*}}, [[SHB]]
-;EG-CHECK-DAG: LSHL
-;EG-CHECK-DAG: LSHL
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
-;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
-;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-
-;SI-CHECK: {{^}}shl_v2i64:
-;SI-CHECK: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-;SI-CHECK: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;EG: {{^}}shl_v2i64:
+;EG-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-DAG: LSHR {{\*? *}}[[COMPSHA]]
+;EG-DAG: LSHR {{\*? *}}[[COMPSHB]]
+;EG-DAG: LSHR {{.*}}, 1
+;EG-DAG: LSHR {{.*}}, 1
+;EG-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-DAG: LSHL {{.*}}, [[SHA]]
+;EG-DAG: LSHL {{.*}}, [[SHB]]
+;EG-DAG: LSHL {{.*}}, [[SHA]]
+;EG-DAG: LSHL {{.*}}, [[SHB]]
+;EG-DAG: LSHL
+;EG-DAG: LSHL
+;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-DAG: CNDE_INT {{.*}}, 0.0
+;EG-DAG: CNDE_INT {{.*}}, 0.0
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+
+;SI: {{^}}shl_v2i64:
+;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+;VI: {{^}}shl_v2i64:
+;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
define void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
@@ -98,53 +116,59 @@ define void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in
ret void
}
-;EG-CHECK: {{^}}shl_v4i64:
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHA]]
-;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHB]]
-;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHC]]
-;EG-CHECK-DAG: LSHR {{\*? *}}[[COMPSHD]]
-;EG-CHECK-DAG: LSHR {{.*}}, 1
-;EG-CHECK-DAG: LSHR {{.*}}, 1
-;EG-CHECK-DAG: LSHR {{.*}}, 1
-;EG-CHECK-DAG: LSHR {{.*}}, 1
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: LSHL {{.*}}, [[SHA]]
-;EG-CHECK-DAG: LSHL {{.*}}, [[SHB]]
-;EG-CHECK-DAG: LSHL {{.*}}, [[SHC]]
-;EG-CHECK-DAG: LSHL {{.*}}, [[SHD]]
-;EG-CHECK-DAG: LSHL {{.*}}, [[SHA]]
-;EG-CHECK-DAG: LSHL {{.*}}, [[SHB]]
-;EG-CHECK-DAG: LSHL {{.*}}, [[SHC]]
-;EG-CHECK-DAG: LSHL {{.*}}, [[SHD]]
-;EG-CHECK-DAG: LSHL
-;EG-CHECK-DAG: LSHL
-;EG-CHECK-DAG: LSHL
-;EG-CHECK-DAG: LSHL
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal
-;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
-;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
-;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
-;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-
-;SI-CHECK: {{^}}shl_v4i64:
-;SI-CHECK: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-;SI-CHECK: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-;SI-CHECK: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-;SI-CHECK: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;EG: {{^}}shl_v4i64:
+;EG-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
+;EG-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]]
+;EG-DAG: LSHR {{\*? *}}[[COMPSHA]]
+;EG-DAG: LSHR {{\*? *}}[[COMPSHB]]
+;EG-DAG: LSHR {{\*? *}}[[COMPSHC]]
+;EG-DAG: LSHR {{\*? *}}[[COMPSHD]]
+;EG-DAG: LSHR {{.*}}, 1
+;EG-DAG: LSHR {{.*}}, 1
+;EG-DAG: LSHR {{.*}}, 1
+;EG-DAG: LSHR {{.*}}, 1
+;EG-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-DAG: LSHL {{.*}}, [[SHA]]
+;EG-DAG: LSHL {{.*}}, [[SHB]]
+;EG-DAG: LSHL {{.*}}, [[SHC]]
+;EG-DAG: LSHL {{.*}}, [[SHD]]
+;EG-DAG: LSHL {{.*}}, [[SHA]]
+;EG-DAG: LSHL {{.*}}, [[SHB]]
+;EG-DAG: LSHL {{.*}}, [[SHC]]
+;EG-DAG: LSHL {{.*}}, [[SHD]]
+;EG-DAG: LSHL
+;EG-DAG: LSHL
+;EG-DAG: LSHL
+;EG-DAG: LSHL
+;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal
+;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal
+;EG-DAG: CNDE_INT {{.*}}, 0.0
+;EG-DAG: CNDE_INT {{.*}}, 0.0
+;EG-DAG: CNDE_INT {{.*}}, 0.0
+;EG-DAG: CNDE_INT {{.*}}, 0.0
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+
+;SI: {{^}}shl_v4i64:
+;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+;VI: {{^}}shl_v4i64:
+;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
define void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
diff --git a/test/CodeGen/R600/shl_add_constant.ll b/test/CodeGen/R600/shl_add_constant.ll
index 801f77d..6915495 100644
--- a/test/CodeGen/R600/shl_add_constant.ll
+++ b/test/CodeGen/R600/shl_add_constant.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare i32 @llvm.r600.read.tidig.x() #1
diff --git a/test/CodeGen/R600/shl_add_ptr.ll b/test/CodeGen/R600/shl_add_ptr.ll
index 047cf25..d423153 100644
--- a/test/CodeGen/R600/shl_add_ptr.ll
+++ b/test/CodeGen/R600/shl_add_ptr.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s
; Test that doing a shift of a pointer with a constant add will be
; folded into the constant offset addressing mode even if the add has
@@ -16,7 +17,7 @@ declare i32 @llvm.r600.read.tidig.x() #1
; SI-LABEL: {{^}}load_shl_base_lds_0:
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: ds_read_b32 {{v[0-9]+}}, [[PTR]] offset:8 [M0]
+; SI: ds_read_b32 {{v[0-9]+}}, [[PTR]] offset:8
; SI: s_endpgm
define void @load_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
@@ -33,7 +34,7 @@ define void @load_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %ad
; SI-LABEL: {{^}}load_shl_base_lds_1:
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: ds_read_b32 [[RESULT:v[0-9]+]], [[PTR]] offset:8 [M0]
+; SI: ds_read_b32 [[RESULT:v[0-9]+]], [[PTR]] offset:8
; SI: v_add_i32_e32 [[ADDUSE:v[0-9]+]], 8, v{{[0-9]+}}
; SI-DAG: buffer_store_dword [[RESULT]]
; SI-DAG: buffer_store_dword [[ADDUSE]]
@@ -68,8 +69,9 @@ define void @load_shl_base_lds_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)
; pointer can be used with an offset into the second one.
; SI-LABEL: {{^}}load_shl_base_lds_2:
-; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI-NEXT: ds_read2st64_b32 {{v\[[0-9]+:[0-9]+\]}}, [[PTR]] offset0:1 offset1:9 [M0]
+; SI: s_mov_b32 m0, -1
+; SI-NEXT: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI-NEXT: ds_read2st64_b32 {{v\[[0-9]+:[0-9]+\]}}, [[PTR]] offset0:1 offset1:9
; SI: s_endpgm
define void @load_shl_base_lds_2(float addrspace(1)* %out) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
@@ -85,7 +87,7 @@ define void @load_shl_base_lds_2(float addrspace(1)* %out) #0 {
; SI-LABEL: {{^}}store_shl_base_lds_0:
; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: ds_write_b32 [[PTR]], {{v[0-9]+}} offset:8 [M0]
+; SI: ds_write_b32 [[PTR]], {{v[0-9]+}} offset:8
; SI: s_endpgm
define void @store_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
diff --git a/test/CodeGen/R600/si-annotate-cf-assertion.ll b/test/CodeGen/R600/si-annotate-cf-assertion.ll
index 6d60b0a..69d7193 100644
--- a/test/CodeGen/R600/si-annotate-cf-assertion.ll
+++ b/test/CodeGen/R600/si-annotate-cf-assertion.ll
@@ -1,6 +1,7 @@
; REQUIRES: asserts
; XFAIL: *
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs-asm-verbose=false < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs-asm-verbose=false < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs-asm-verbose=false < %s | FileCheck %s
define void @test(i32 addrspace(1)* %g, i8 addrspace(3)* %l, i32 %x) nounwind {
diff --git a/test/CodeGen/R600/si-lod-bias.ll b/test/CodeGen/R600/si-lod-bias.ll
index 60277d6..d6cbd0f 100644
--- a/test/CodeGen/R600/si-lod-bias.ll
+++ b/test/CodeGen/R600/si-lod-bias.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
; This shader has the potential to generated illegal VGPR to SGPR copies if
; the wrong register class is used for the REG_SEQUENCE instructions.
@@ -47,5 +48,5 @@ declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float
attributes #0 = { "ShaderType"="0" }
attributes #1 = { nounwind readnone }
-!0 = metadata !{metadata !"const", null}
-!1 = metadata !{metadata !0, metadata !0, i64 0, i32 1}
+!0 = !{!"const", null}
+!1 = !{!0, !0, i64 0, i32 1}
diff --git a/test/CodeGen/R600/si-sgpr-spill.ll b/test/CodeGen/R600/si-sgpr-spill.ll
index 439d8e2..18fda20 100644
--- a/test/CodeGen/R600/si-sgpr-spill.ll
+++ b/test/CodeGen/R600/si-sgpr-spill.ll
@@ -1,9 +1,11 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck %s
; These tests check that the compiler won't crash when it needs to spill
; SGPRs.
; CHECK-LABEL: {{^}}main:
+; CHECK: s_wqm
; Writing to M0 from an SMRD instruction will hang the GPU.
; CHECK-NOT: s_buffer_load_dword m0
; CHECK: s_endpgm
@@ -686,7 +688,7 @@ attributes #2 = { readnone }
attributes #3 = { readonly }
attributes #4 = { nounwind readonly }
-!0 = metadata !{metadata !"const", null, i32 1}
+!0 = !{!"const", null, i32 1}
; CHECK-LABEL: {{^}}main1:
; CHECK: s_endpgm
diff --git a/test/CodeGen/R600/si-triv-disjoint-mem-access.ll b/test/CodeGen/R600/si-triv-disjoint-mem-access.ll
index 2c146eb..a4475c0 100644
--- a/test/CodeGen/R600/si-triv-disjoint-mem-access.ll
+++ b/test/CodeGen/R600/si-triv-disjoint-mem-access.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -enable-misched -enable-aa-sched-mi < %s | FileCheck -check-prefix=FUNC -check-prefix=CI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -enable-misched -enable-aa-sched-mi < %s | FileCheck -check-prefix=FUNC -check-prefix=CI %s
declare void @llvm.SI.tbuffer.store.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
declare void @llvm.SI.tbuffer.store.v4i32(<16 x i8>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
@@ -51,8 +51,8 @@ define void @no_reorder_local_load_volatile_global_store_local_load(i32 addrspac
; FUNC-LABEL: @no_reorder_barrier_local_load_global_store_local_load
; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
-; CI: buffer_store_dword
; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:8
+; CI: buffer_store_dword
define void @no_reorder_barrier_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
@@ -94,12 +94,10 @@ define void @no_reorder_constant_load_global_store_constant_load(i32 addrspace(1
ret void
}
-; XXX: Should be able to reorder this, but the laods count as ordered
-
; FUNC-LABEL: @reorder_constant_load_local_store_constant_load
; CI: buffer_load_dword
-; CI: ds_write_b32
; CI: buffer_load_dword
+; CI: ds_write_b32
; CI: buffer_store_dword
define void @reorder_constant_load_local_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr) #0 {
%ptr0 = load i32 addrspace(2)* addrspace(3)* @stored_constant_ptr, align 8
@@ -183,11 +181,11 @@ define void @reorder_local_offsets(i32 addrspace(1)* nocapture %out, i32 addrspa
}
; FUNC-LABEL: @reorder_global_offsets
-; CI: buffer_store_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:0xc
-; CI: buffer_load_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:0x190
-; CI: buffer_load_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:0x194
-; CI: buffer_store_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:0x190
-; CI: buffer_store_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:0x194
+; CI: buffer_store_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12
+; CI: buffer_load_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
+; CI: buffer_load_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:404
+; CI: buffer_store_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
+; CI: buffer_store_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:404
; CI: buffer_store_dword
; CI: s_endpgm
define void @reorder_global_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(1)* noalias nocapture %ptr0) #0 {
diff --git a/test/CodeGen/R600/si-vector-hang.ll b/test/CodeGen/R600/si-vector-hang.ll
index 6f91c71..61812c6 100644
--- a/test/CodeGen/R600/si-vector-hang.ll
+++ b/test/CodeGen/R600/si-vector-hang.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
; CHECK: {{^}}test_8_min_char:
; CHECK: buffer_store_byte
@@ -96,12 +97,12 @@ attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"=
!opencl.kernels = !{!0, !1, !2, !3, !4, !5, !6, !7, !8}
-!0 = metadata !{null}
-!1 = metadata !{null}
-!2 = metadata !{null}
-!3 = metadata !{void (i8 addrspace(1)*, i8 addrspace(1)*, i8 addrspace(1)*)* @test_8_min_char}
-!4 = metadata !{null}
-!5 = metadata !{null}
-!6 = metadata !{null}
-!7 = metadata !{null}
-!8 = metadata !{null}
+!0 = !{null}
+!1 = !{null}
+!2 = !{null}
+!3 = !{void (i8 addrspace(1)*, i8 addrspace(1)*, i8 addrspace(1)*)* @test_8_min_char}
+!4 = !{null}
+!5 = !{null}
+!6 = !{null}
+!7 = !{null}
+!8 = !{null}
diff --git a/test/CodeGen/R600/sign_extend.ll b/test/CodeGen/R600/sign_extend.ll
index 94f4c46..f194759 100644
--- a/test/CodeGen/R600/sign_extend.ll
+++ b/test/CodeGen/R600/sign_extend.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}s_sext_i1_to_i32:
; SI: v_cndmask_b32_e64
@@ -23,8 +24,9 @@ entry:
}
; SI-LABEL: {{^}}s_sext_i1_to_i64:
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
+; SI: v_cndmask_b32_e64 v[[LOREG:[0-9]+]], 0, -1, vcc
+; SI: v_mov_b32_e32 v[[HIREG:[0-9]+]], v[[LOREG]]
+; SI: buffer_store_dwordx2 v{{\[}}[[LOREG]]:[[HIREG]]{{\]}}
; SI: s_endpgm
define void @s_sext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%cmp = icmp eq i32 %a, %b
diff --git a/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll b/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
index 8d9ee42..28a413c 100644
--- a/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
+++ b/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
@@ -1,5 +1,6 @@
; XFAIL: *
-; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI -mattr=-promote-alloca < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=SI -mattr=-promote-alloca < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=-promote-alloca < %s | FileCheck -check-prefix=SI %s
; 64-bit select was originally lowered with a build_pair, and this
; could be simplified to 1 cndmask instead of 2, but that broken when
diff --git a/test/CodeGen/R600/sint_to_fp.f64.ll b/test/CodeGen/R600/sint_to_fp.f64.ll
index 6e4f87c..893cfb3 100644
--- a/test/CodeGen/R600/sint_to_fp.f64.ll
+++ b/test/CodeGen/R600/sint_to_fp.f64.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
@@ -10,12 +10,13 @@ define void @sint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) {
ret void
}
+; FIXME: select on 0, 0
; SI-LABEL: {{^}}sint_to_fp_i1_f64:
; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
-; FIXME: We should the VGPR sources for V_CNDMASK are copied from SGPRs,
-; we should be able to fold the SGPRs into the V_CNDMASK instructions.
-; SI: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
-; SI: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
+; We can't fold the SGPRs into v_cndmask_b32_e64, because it already
+; uses an SGPR for [[CMP]]
+; SI: v_cndmask_b32_e64 v{{[0-9]+}}, 0, v{{[0-9]+}}, [[CMP]]
+; SI: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 0, [[CMP]]
; SI: buffer_store_dwordx2
; SI: s_endpgm
define void @sint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {
@@ -45,9 +46,9 @@ define void @s_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) {
; SI-LABEL: @v_sint_to_fp_i64_to_f64
; SI: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
-; SI-DAG: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]]
-; SI-DAG: v_cvt_f64_i32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]]
+; SI: v_cvt_f64_i32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]]
; SI: v_ldexp_f64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32
+; SI: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]]
; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]]
; SI: buffer_store_dwordx2 [[RESULT]]
define void @v_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
diff --git a/test/CodeGen/R600/sint_to_fp.ll b/test/CodeGen/R600/sint_to_fp.ll
index 7b6ce43..6a291cf 100644
--- a/test/CodeGen/R600/sint_to_fp.ll
+++ b/test/CodeGen/R600/sint_to_fp.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
diff --git a/test/CodeGen/R600/smrd.ll b/test/CodeGen/R600/smrd.ll
index 1c7df16..bad1668 100644
--- a/test/CodeGen/R600/smrd.ll
+++ b/test/CodeGen/R600/smrd.ll
@@ -1,8 +1,10 @@
-; RUN: llc < %s -march=r600 -mcpu=SI -show-mc-encoding -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=amdgcn -mcpu=SI -show-mc-encoding -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=GCN %s
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -show-mc-encoding -verify-machineinstrs | FileCheck --check-prefix=VI --check-prefix=GCN %s
; SMRD load with an immediate offset.
-; CHECK-LABEL: {{^}}smrd0:
-; CHECK: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x1 ; encoding: [0x01
+; GCN-LABEL: {{^}}smrd0:
+; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x1 ; encoding: [0x01
+; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4
define void @smrd0(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
entry:
%0 = getelementptr i32 addrspace(2)* %ptr, i64 1
@@ -12,8 +14,9 @@ entry:
}
; SMRD load with the largest possible immediate offset.
-; CHECK-LABEL: {{^}}smrd1:
-; CHECK: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
+; GCN-LABEL: {{^}}smrd1:
+; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
+; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
define void @smrd1(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
entry:
%0 = getelementptr i32 addrspace(2)* %ptr, i64 255
@@ -23,10 +26,11 @@ entry:
}
; SMRD load with an offset greater than the largest possible immediate.
-; CHECK-LABEL: {{^}}smrd2:
-; CHECK: s_movk_i32 s[[OFFSET:[0-9]]], 0x400
-; CHECK: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
-; CHECK: s_endpgm
+; GCN-LABEL: {{^}}smrd2:
+; SI: s_movk_i32 s[[OFFSET:[0-9]]], 0x400
+; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
+; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
+; GCN: s_endpgm
define void @smrd2(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
entry:
%0 = getelementptr i32 addrspace(2)* %ptr, i64 256
@@ -36,15 +40,18 @@ entry:
}
; SMRD load with a 64-bit offset
-; CHECK-LABEL: {{^}}smrd3:
-; CHECK-DAG: s_mov_b32 s[[SHI:[0-9]+]], 4
-; CHECK-DAG: s_mov_b32 s[[SLO:[0-9]+]], 0 ;
-; FIXME: We don't need to copy these values to VGPRs
-; CHECK-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[SLO]]
-; CHECK-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[SHI]]
+; GCN-LABEL: {{^}}smrd3:
+; FIXME: There are too many copies here because we don't fold immediates
+; through REG_SEQUENCE
+; SI: s_mov_b32 s[[SLO:[0-9]+]], 0 ;
+; SI: s_mov_b32 s[[SHI:[0-9]+]], 4
+; SI: s_mov_b32 s[[SSLO:[0-9]+]], s[[SLO]]
+; SI-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[SSLO]]
+; SI-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[SHI]]
; FIXME: We should be able to use s_load_dword here
-; CHECK: buffer_load_dword v{{[0-9]+}}, v{{\[}}[[VLO]]:[[VHI]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64
-; CHECK: s_endpgm
+; SI: buffer_load_dword v{{[0-9]+}}, v{{\[}}[[VLO]]:[[VHI]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64
+; TODO: Add VI checks
+; GCN: s_endpgm
define void @smrd3(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
entry:
%0 = getelementptr i32 addrspace(2)* %ptr, i64 4294967296 ; 2 ^ 32
@@ -54,8 +61,9 @@ entry:
}
; SMRD load using the load.const intrinsic with an immediate offset
-; CHECK-LABEL: {{^}}smrd_load_const0:
-; CHECK: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4 ; encoding: [0x04
+; GCN-LABEL: {{^}}smrd_load_const0:
+; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4 ; encoding: [0x04
+; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x10
define void @smrd_load_const0(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
%20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
@@ -67,8 +75,9 @@ main_body:
; SMRD load using the load.const intrinsic with the largest possible immediate
; offset.
-; CHECK-LABEL: {{^}}smrd_load_const1:
-; CHECK: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
+; GCN-LABEL: {{^}}smrd_load_const1:
+; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
+; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
define void @smrd_load_const1(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
%20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
@@ -80,9 +89,10 @@ main_body:
; SMRD load using the load.const intrinsic with an offset greater than the
; largets possible immediate.
; immediate offset.
-; CHECK-LABEL: {{^}}smrd_load_const2:
-; CHECK: s_movk_i32 s[[OFFSET:[0-9]]], 0x400
-; CHECK: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
+; GCN-LABEL: {{^}}smrd_load_const2:
+; SI: s_movk_i32 s[[OFFSET:[0-9]]], 0x400
+; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
+; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
define void @smrd_load_const2(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
%20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
diff --git a/test/CodeGen/R600/split-scalar-i64-add.ll b/test/CodeGen/R600/split-scalar-i64-add.ll
index e3448dc..ec50fd9 100644
--- a/test/CodeGen/R600/split-scalar-i64-add.ll
+++ b/test/CodeGen/R600/split-scalar-i64-add.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() readnone
diff --git a/test/CodeGen/R600/sra.ll b/test/CodeGen/R600/sra.ll
index 8ba9daa..d6c6ccd 100644
--- a/test/CodeGen/R600/sra.ll
+++ b/test/CodeGen/R600/sra.ll
@@ -1,13 +1,18 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=VI %s
-;EG-CHECK-LABEL: {{^}}ashr_v2i32:
-;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG-LABEL: {{^}}ashr_v2i32:
+;EG: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK-LABEL: {{^}}ashr_v2i32:
-;SI-CHECK: v_ashr_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_ashr_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI-LABEL: {{^}}ashr_v2i32:
+;SI: v_ashr_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_ashr_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+
+;VI-LABEL: {{^}}ashr_v2i32:
+;VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @ashr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
@@ -18,17 +23,23 @@ define void @ashr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %i
ret void
}
-;EG-CHECK-LABEL: {{^}}ashr_v4i32:
-;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG-LABEL: {{^}}ashr_v4i32:
+;EG: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+;SI-LABEL: {{^}}ashr_v4i32:
+;SI: v_ashr_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_ashr_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_ashr_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;SI: v_ashr_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK-LABEL: {{^}}ashr_v4i32:
-;SI-CHECK: v_ashr_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_ashr_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_ashr_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_ashr_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;VI-LABEL: {{^}}ashr_v4i32:
+;VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+;VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @ashr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
@@ -39,11 +50,15 @@ define void @ashr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %i
ret void
}
-;EG-CHECK-LABEL: {{^}}ashr_i64:
-;EG-CHECK: ASHR
+;EG-LABEL: {{^}}ashr_i64:
+;EG: ASHR
+
+;SI-LABEL: {{^}}ashr_i64:
+;SI: s_ashr_i64 s[{{[0-9]}}:{{[0-9]}}], s[{{[0-9]}}:{{[0-9]}}], 8
+
+;VI-LABEL: {{^}}ashr_i64:
+;VI: s_ashr_i64 s[{{[0-9]}}:{{[0-9]}}], s[{{[0-9]}}:{{[0-9]}}], 8
-;SI-CHECK-LABEL: {{^}}ashr_i64:
-;SI-CHECK: s_ashr_i64 s[{{[0-9]}}:{{[0-9]}}], s[{{[0-9]}}:{{[0-9]}}], 8
define void @ashr_i64(i64 addrspace(1)* %out, i32 %in) {
entry:
%0 = sext i32 %in to i64
@@ -52,22 +67,26 @@ entry:
ret void
}
-;EG-CHECK-LABEL: {{^}}ashr_i64_2:
-;EG-CHECK: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
-;EG-CHECK: LSHL {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
-;EG-CHECK: LSHL {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
+;EG-LABEL: {{^}}ashr_i64_2:
+;EG: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
+;EG: LSHL {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
+;EG: LSHL {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
;EG_CHECK-DAG: ADD_INT {{\*? *}}[[BIGSH:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
-;EG-CHECK-DAG: LSHR {{\*? *}}[[LOSMTMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], [[SHIFT]]
-;EG-CHECK-DAG: OR_INT {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], {{[[LOSMTMP]]|PV.[XYZW]}}, {{[[OVERF]]|PV.[XYZW]}}
-;EG-CHECK-DAG: ASHR {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], [[OPHI]], {{PS|[[SHIFT]]}}
-;EG-CHECK-DAG: ASHR {{\*? *}}[[LOBIG:T[0-9]+\.[XYZW]]], [[OPHI]], literal
-;EG-CHECK-DAG: ASHR {{\*? *}}[[HIBIG:T[0-9]+\.[XYZW]]], [[OPHI]], literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
-;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
-;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
-
-;SI-CHECK-LABEL: {{^}}ashr_i64_2:
-;SI-CHECK: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;EG-DAG: LSHR {{\*? *}}[[LOSMTMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], [[SHIFT]]
+;EG-DAG: OR_INT {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], {{[[LOSMTMP]]|PV.[XYZW]}}, {{[[OVERF]]|PV.[XYZW]}}
+;EG-DAG: ASHR {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], [[OPHI]], {{PS|[[SHIFT]]}}
+;EG-DAG: ASHR {{\*? *}}[[LOBIG:T[0-9]+\.[XYZW]]], [[OPHI]], literal
+;EG-DAG: ASHR {{\*? *}}[[HIBIG:T[0-9]+\.[XYZW]]], [[OPHI]], literal
+;EG-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+;EG-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
+;EG-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
+
+;SI-LABEL: {{^}}ashr_i64_2:
+;SI: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+;VI-LABEL: {{^}}ashr_i64_2:
+;VI: v_ashrrev_i64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+
define void @ashr_i64_2(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
entry:
%b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
@@ -78,35 +97,39 @@ entry:
ret void
}
-;EG-CHECK-LABEL: {{^}}ashr_v2i64:
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
-;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHB]]
-;EG-CHECK-DAG: LSHL {{.*}}, 1
-;EG-CHECK-DAG: LSHL {{.*}}, 1
-;EG-CHECK-DAG: ASHR {{.*}}, [[SHA]]
-;EG-CHECK-DAG: ASHR {{.*}}, [[SHB]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
-;EG-CHECK-DAG: OR_INT
-;EG-CHECK-DAG: OR_INT
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ASHR
-;EG-CHECK-DAG: ASHR
-;EG-CHECK-DAG: ASHR {{.*}}, literal
-;EG-CHECK-DAG: ASHR {{.*}}, literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-
-;SI-CHECK-LABEL: {{^}}ashr_v2i64:
-;SI-CHECK: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-;SI-CHECK: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;EG-LABEL: {{^}}ashr_v2i64:
+;EG-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-DAG: LSHL {{\*? *}}[[COMPSHA]]
+;EG-DAG: LSHL {{\*? *}}[[COMPSHB]]
+;EG-DAG: LSHL {{.*}}, 1
+;EG-DAG: LSHL {{.*}}, 1
+;EG-DAG: ASHR {{.*}}, [[SHA]]
+;EG-DAG: ASHR {{.*}}, [[SHB]]
+;EG-DAG: LSHR {{.*}}, [[SHA]]
+;EG-DAG: LSHR {{.*}}, [[SHB]]
+;EG-DAG: OR_INT
+;EG-DAG: OR_INT
+;EG-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-DAG: ASHR
+;EG-DAG: ASHR
+;EG-DAG: ASHR {{.*}}, literal
+;EG-DAG: ASHR {{.*}}, literal
+;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+
+;SI-LABEL: {{^}}ashr_v2i64:
+;SI: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+;VI-LABEL: {{^}}ashr_v2i64:
+;VI: v_ashrrev_i64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+;VI: v_ashrrev_i64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
define void @ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
@@ -117,61 +140,67 @@ define void @ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %i
ret void
}
-;EG-CHECK-LABEL: {{^}}ashr_v4i64:
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
-;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHB]]
-;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHC]]
-;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHD]]
-;EG-CHECK-DAG: LSHL {{.*}}, 1
-;EG-CHECK-DAG: LSHL {{.*}}, 1
-;EG-CHECK-DAG: LSHL {{.*}}, 1
-;EG-CHECK-DAG: LSHL {{.*}}, 1
-;EG-CHECK-DAG: ASHR {{.*}}, [[SHA]]
-;EG-CHECK-DAG: ASHR {{.*}}, [[SHB]]
-;EG-CHECK-DAG: ASHR {{.*}}, [[SHC]]
-;EG-CHECK-DAG: ASHR {{.*}}, [[SHD]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
-;EG-CHECK-DAG: OR_INT
-;EG-CHECK-DAG: OR_INT
-;EG-CHECK-DAG: OR_INT
-;EG-CHECK-DAG: OR_INT
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ASHR
-;EG-CHECK-DAG: ASHR
-;EG-CHECK-DAG: ASHR
-;EG-CHECK-DAG: ASHR
-;EG-CHECK-DAG: ASHR {{.*}}, literal
-;EG-CHECK-DAG: ASHR {{.*}}, literal
-;EG-CHECK-DAG: ASHR {{.*}}, literal
-;EG-CHECK-DAG: ASHR {{.*}}, literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-
-;SI-CHECK-LABEL: {{^}}ashr_v4i64:
-;SI-CHECK: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-;SI-CHECK: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-;SI-CHECK: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-;SI-CHECK: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;EG-LABEL: {{^}}ashr_v4i64:
+;EG-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+;EG-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+;EG-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
+;EG-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]]
+;EG-DAG: LSHL {{\*? *}}[[COMPSHA]]
+;EG-DAG: LSHL {{\*? *}}[[COMPSHB]]
+;EG-DAG: LSHL {{\*? *}}[[COMPSHC]]
+;EG-DAG: LSHL {{\*? *}}[[COMPSHD]]
+;EG-DAG: LSHL {{.*}}, 1
+;EG-DAG: LSHL {{.*}}, 1
+;EG-DAG: LSHL {{.*}}, 1
+;EG-DAG: LSHL {{.*}}, 1
+;EG-DAG: ASHR {{.*}}, [[SHA]]
+;EG-DAG: ASHR {{.*}}, [[SHB]]
+;EG-DAG: ASHR {{.*}}, [[SHC]]
+;EG-DAG: ASHR {{.*}}, [[SHD]]
+;EG-DAG: LSHR {{.*}}, [[SHA]]
+;EG-DAG: LSHR {{.*}}, [[SHB]]
+;EG-DAG: LSHR {{.*}}, [[SHA]]
+;EG-DAG: LSHR {{.*}}, [[SHB]]
+;EG-DAG: OR_INT
+;EG-DAG: OR_INT
+;EG-DAG: OR_INT
+;EG-DAG: OR_INT
+;EG-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal
+;EG-DAG: ASHR
+;EG-DAG: ASHR
+;EG-DAG: ASHR
+;EG-DAG: ASHR
+;EG-DAG: ASHR {{.*}}, literal
+;EG-DAG: ASHR {{.*}}, literal
+;EG-DAG: ASHR {{.*}}, literal
+;EG-DAG: ASHR {{.*}}, literal
+;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal
+;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+;EG-DAG: CNDE_INT
+
+;SI-LABEL: {{^}}ashr_v4i64:
+;SI: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+;SI: v_ashr_i64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+;VI-LABEL: {{^}}ashr_v4i64:
+;VI: v_ashrrev_i64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+;VI: v_ashrrev_i64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+;VI: v_ashrrev_i64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+;VI: v_ashrrev_i64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
define void @ashr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
diff --git a/test/CodeGen/R600/srem.ll b/test/CodeGen/R600/srem.ll
index 65e3395..510db0e 100644
--- a/test/CodeGen/R600/srem.ll
+++ b/test/CodeGen/R600/srem.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI < %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s
define void @srem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
@@ -17,6 +18,19 @@ define void @srem_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
ret void
}
+; FUNC-LABEL: {{^}}srem_i32_7:
+; SI: v_mov_b32_e32 [[MAGIC:v[0-9]+]], 0x92492493
+; SI: v_mul_hi_i32 {{v[0-9]+}}, [[MAGIC]],
+; SI: v_mul_lo_i32
+; SI: v_sub_i32
+; SI: s_endpgm
+define void @srem_i32_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %num = load i32 addrspace(1) * %in
+ %result = srem i32 %num, 7
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
define void @srem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%den_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
%num = load <2 x i32> addrspace(1) * %in
@@ -48,3 +62,51 @@ define void @srem_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)*
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
+
+define void @srem_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+ %den_ptr = getelementptr i64 addrspace(1)* %in, i64 1
+ %num = load i64 addrspace(1) * %in
+ %den = load i64 addrspace(1) * %den_ptr
+ %result = srem i64 %num, %den
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+define void @srem_i64_4(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+ %num = load i64 addrspace(1) * %in
+ %result = srem i64 %num, 4
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+define void @srem_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+ %den_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
+ %num = load <2 x i64> addrspace(1) * %in
+ %den = load <2 x i64> addrspace(1) * %den_ptr
+ %result = srem <2 x i64> %num, %den
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+define void @srem_v2i64_4(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+ %num = load <2 x i64> addrspace(1) * %in
+ %result = srem <2 x i64> %num, <i64 4, i64 4>
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+define void @srem_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+ %den_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
+ %num = load <4 x i64> addrspace(1) * %in
+ %den = load <4 x i64> addrspace(1) * %den_ptr
+ %result = srem <4 x i64> %num, %den
+ store <4 x i64> %result, <4 x i64> addrspace(1)* %out
+ ret void
+}
+
+define void @srem_v4i64_4(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+ %num = load <4 x i64> addrspace(1) * %in
+ %result = srem <4 x i64> %num, <i64 4, i64 4, i64 4, i64 4>
+ store <4 x i64> %result, <4 x i64> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/srl.ll b/test/CodeGen/R600/srl.ll
index 8c5daf6..1f9b620 100644
--- a/test/CodeGen/R600/srl.ll
+++ b/test/CodeGen/R600/srl.ll
@@ -1,166 +1,185 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+; FUNC-LABEL: {{^}}lshr_i32:
+; SI: v_lshrrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; VI: v_lshrrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+define void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %a = load i32 addrspace(1)* %in
+ %b = load i32 addrspace(1)* %b_ptr
+ %result = lshr i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
-;EG-CHECK: {{^}}lshr_v2i32:
-;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; FUNC-LABEL: {{^}}lshr_v2i32:
+; SI: v_lshr_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_lshr_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: {{^}}lshr_v2i32:
-;SI-CHECK: v_lshr_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_lshr_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; VI: v_lshrrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; VI: v_lshrrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @lshr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
- %a = load <2 x i32> addrspace(1) * %in
- %b = load <2 x i32> addrspace(1) * %b_ptr
+ %a = load <2 x i32> addrspace(1)* %in
+ %b = load <2 x i32> addrspace(1)* %b_ptr
%result = lshr <2 x i32> %a, %b
store <2 x i32> %result, <2 x i32> addrspace(1)* %out
ret void
}
-
-;EG-CHECK: {{^}}lshr_v4i32:
-;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
-;SI-CHECK: {{^}}lshr_v4i32:
-;SI-CHECK: v_lshr_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_lshr_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_lshr_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_lshr_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-
+; FUNC-LABEL: {{^}}lshr_v4i32:
+; SI: v_lshr_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_lshr_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_lshr_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_lshr_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+
+; VI: v_lshrrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; VI: v_lshrrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; VI: v_lshrrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; VI: v_lshrrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+
+; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @lshr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
- %a = load <4 x i32> addrspace(1) * %in
- %b = load <4 x i32> addrspace(1) * %b_ptr
+ %a = load <4 x i32> addrspace(1)* %in
+ %b = load <4 x i32> addrspace(1)* %b_ptr
%result = lshr <4 x i32> %a, %b
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
-;EG-CHECK: {{^}}lshr_i64:
-;EG-CHECK: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
-;EG-CHECK: LSHL {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
-;EG-CHECK: LSHL {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
-;EG_CHECK-DAG: ADD_INT {{\*? *}}[[BIGSH:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
-;EG-CHECK-DAG: LSHR {{\*? *}}[[LOSMTMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], [[SHIFT]]
-;EG-CHECK-DAG: OR_INT {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], {{[[LOSMTMP]]|PV.[XYZW]}}, {{[[OVERF]]|PV.[XYZW]}}
-;EG-CHECK-DAG: LSHR {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], [[OPHI]], {{PS|[[SHIFT]]}}
-;EG-CHECK-DAG: LSHR {{\*? *}}[[LOBIG:T[0-9]+\.[XYZW]]], [[OPHI]], {{PS|[[SHIFT]]}}
-;EG-CHECK-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
-;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
-;EG-CHECK-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
-
-;SI-CHECK: {{^}}lshr_i64:
-;SI-CHECK: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-
+; FUNC-LABEL: {{^}}lshr_i64:
+; SI: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+; VI: v_lshrrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+
+; EG: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
+; EG: LSHL {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
+; EG: LSHL {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
+; EG-DAG: ADD_INT {{\*? *}}[[BIGSH:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+; EG-DAG: LSHR {{\*? *}}[[LOSMTMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], [[SHIFT]]
+; EG-DAG: OR_INT {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], {{[[LOSMTMP]]|PV.[XYZW]}}, {{[[OVERF]]|PV.[XYZW]}}
+; EG-DAG: LSHR {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], [[OPHI]], {{PS|[[SHIFT]]}}
+; EG-DAG: LSHR {{\*? *}}[[LOBIG:T[0-9]+\.[XYZW]]], [[OPHI]], {{PS|[[SHIFT]]}}
+; EG-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
+; EG-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
+; EG-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
- %a = load i64 addrspace(1) * %in
- %b = load i64 addrspace(1) * %b_ptr
+ %a = load i64 addrspace(1)* %in
+ %b = load i64 addrspace(1)* %b_ptr
%result = lshr i64 %a, %b
store i64 %result, i64 addrspace(1)* %out
ret void
}
-;EG-CHECK: {{^}}lshr_v2i64:
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
-;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHB]]
-;EG-CHECK-DAG: LSHL {{.*}}, 1
-;EG-CHECK-DAG: LSHL {{.*}}, 1
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
-;EG-CHECK-DAG: OR_INT
-;EG-CHECK-DAG: OR_INT
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: LSHR
-;EG-CHECK-DAG: LSHR
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
-;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
-;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-
-;SI-CHECK: {{^}}lshr_v2i64:
-;SI-CHECK: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-;SI-CHECK: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-
+; FUNC-LABEL: {{^}}lshr_v2i64:
+; SI: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+; SI: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+; VI: v_lshrrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+; VI: v_lshrrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+
+; EG-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+; EG-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+; EG-DAG: LSHL {{\*? *}}[[COMPSHA]]
+; EG-DAG: LSHL {{\*? *}}[[COMPSHB]]
+; EG-DAG: LSHL {{.*}}, 1
+; EG-DAG: LSHL {{.*}}, 1
+; EG-DAG: LSHR {{.*}}, [[SHA]]
+; EG-DAG: LSHR {{.*}}, [[SHB]]
+; EG-DAG: LSHR {{.*}}, [[SHA]]
+; EG-DAG: LSHR {{.*}}, [[SHB]]
+; EG-DAG: OR_INT
+; EG-DAG: OR_INT
+; EG-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+; EG-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+; EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+; EG-DAG: CNDE_INT {{.*}}, 0.0
+; EG-DAG: CNDE_INT {{.*}}, 0.0
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
define void @lshr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
- %a = load <2 x i64> addrspace(1) * %in
- %b = load <2 x i64> addrspace(1) * %b_ptr
+ %a = load <2 x i64> addrspace(1)* %in
+ %b = load <2 x i64> addrspace(1)* %b_ptr
%result = lshr <2 x i64> %a, %b
store <2 x i64> %result, <2 x i64> addrspace(1)* %out
ret void
}
-
-;EG-CHECK: {{^}}lshr_v4i64:
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]]
-;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHA]]
-;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHB]]
-;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHC]]
-;EG-CHECK-DAG: LSHL {{\*? *}}[[COMPSHD]]
-;EG-CHECK-DAG: LSHL {{.*}}, 1
-;EG-CHECK-DAG: LSHL {{.*}}, 1
-;EG-CHECK-DAG: LSHL {{.*}}, 1
-;EG-CHECK-DAG: LSHL {{.*}}, 1
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHC]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHD]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHA]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHB]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHC]]
-;EG-CHECK-DAG: LSHR {{.*}}, [[SHD]]
-;EG-CHECK-DAG: OR_INT
-;EG-CHECK-DAG: OR_INT
-;EG-CHECK-DAG: OR_INT
-;EG-CHECK-DAG: OR_INT
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal
-;EG-CHECK-DAG: LSHR
-;EG-CHECK-DAG: LSHR
-;EG-CHECK-DAG: LSHR
-;EG-CHECK-DAG: LSHR
-;EG-CHECK-DAG: LSHR
-;EG-CHECK-DAG: LSHR
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal
-;EG-CHECK-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal
-;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
-;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
-;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
-;EG-CHECK-DAG: CNDE_INT {{.*}}, 0.0
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-;EG-CHECK-DAG: CNDE_INT
-
-;SI-CHECK: {{^}}lshr_v4i64:
-;SI-CHECK: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-;SI-CHECK: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-;SI-CHECK: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-;SI-CHECK: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
-
+; FUNC-LABEL: {{^}}lshr_v4i64:
+; SI: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+; SI: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+; SI: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+; SI: v_lshr_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
+
+; VI: v_lshrrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+; VI: v_lshrrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+; VI: v_lshrrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+; VI: v_lshrrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
+
+; EG-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
+; EG-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
+; EG-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
+; EG-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]]
+; EG-DAG: LSHL {{\*? *}}[[COMPSHA]]
+; EG-DAG: LSHL {{\*? *}}[[COMPSHB]]
+; EG-DAG: LSHL {{\*? *}}[[COMPSHC]]
+; EG-DAG: LSHL {{\*? *}}[[COMPSHD]]
+; EG-DAG: LSHL {{.*}}, 1
+; EG-DAG: LSHL {{.*}}, 1
+; EG-DAG: LSHL {{.*}}, 1
+; EG-DAG: LSHL {{.*}}, 1
+; EG-DAG: LSHR {{.*}}, [[SHA]]
+; EG-DAG: LSHR {{.*}}, [[SHB]]
+; EG-DAG: LSHR {{.*}}, [[SHC]]
+; EG-DAG: LSHR {{.*}}, [[SHD]]
+; EG-DAG: LSHR {{.*}}, [[SHA]]
+; EG-DAG: LSHR {{.*}}, [[SHB]]
+; EG-DAG: LSHR {{.*}}, [[SHC]]
+; EG-DAG: LSHR {{.*}}, [[SHD]]
+; EG-DAG: OR_INT
+; EG-DAG: OR_INT
+; EG-DAG: OR_INT
+; EG-DAG: OR_INT
+; EG-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
+; EG-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
+; EG-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal
+; EG-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: LSHR
+; EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
+; EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
+; EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal
+; EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal
+; EG-DAG: CNDE_INT {{.*}}, 0.0
+; EG-DAG: CNDE_INT {{.*}}, 0.0
+; EG-DAG: CNDE_INT {{.*}}, 0.0
+; EG-DAG: CNDE_INT {{.*}}, 0.0
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
+; EG-DAG: CNDE_INT
define void @lshr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
- %a = load <4 x i64> addrspace(1) * %in
- %b = load <4 x i64> addrspace(1) * %b_ptr
+ %a = load <4 x i64> addrspace(1)* %in
+ %b = load <4 x i64> addrspace(1)* %b_ptr
%result = lshr <4 x i64> %a, %b
store <4 x i64> %result, <4 x i64> addrspace(1)* %out
ret void
diff --git a/test/CodeGen/R600/ssubo.ll b/test/CodeGen/R600/ssubo.ll
index 8031c6f..09d3959 100644
--- a/test/CodeGen/R600/ssubo.ll
+++ b/test/CodeGen/R600/ssubo.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
diff --git a/test/CodeGen/R600/store-barrier.ll b/test/CodeGen/R600/store-barrier.ll
index 350b006..ea65bb0 100644
--- a/test/CodeGen/R600/store-barrier.ll
+++ b/test/CodeGen/R600/store-barrier.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck --check-prefix=CHECK %s
-; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck --check-prefix=CHECK %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck --check-prefix=CHECK %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck --check-prefix=CHECK %s
; This test is for a bug in the machine scheduler where stores without
; an underlying object would be moved across the barrier. In this
diff --git a/test/CodeGen/R600/store-v3i32.ll b/test/CodeGen/R600/store-v3i32.ll
index 0f28f33..33617b5 100644
--- a/test/CodeGen/R600/store-v3i32.ll
+++ b/test/CodeGen/R600/store-v3i32.ll
@@ -1,5 +1,6 @@
; XFAIL: *
-; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI %s
; 3 vectors have the same size and alignment as 4 vectors, so this
; should be done in a single store.
diff --git a/test/CodeGen/R600/store-v3i64.ll b/test/CodeGen/R600/store-v3i64.ll
index 247a561..e0c554a 100644
--- a/test/CodeGen/R600/store-v3i64.ll
+++ b/test/CodeGen/R600/store-v3i64.ll
@@ -1,5 +1,6 @@
; XFAIL: *
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}global_store_v3i64:
; SI: buffer_store_dwordx4
diff --git a/test/CodeGen/R600/store-vector-ptrs.ll b/test/CodeGen/R600/store-vector-ptrs.ll
index aee639b..ba4d94f 100644
--- a/test/CodeGen/R600/store-vector-ptrs.ll
+++ b/test/CodeGen/R600/store-vector-ptrs.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s
; This tests for a bug that caused a crash in
; AMDGPUDAGToDAGISel::SelectMUBUFScratch() which is used for selecting
diff --git a/test/CodeGen/R600/store.ll b/test/CodeGen/R600/store.ll
index 713ecd6..e4cb313 100644
--- a/test/CodeGen/R600/store.ll
+++ b/test/CodeGen/R600/store.ll
@@ -1,13 +1,14 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK --check-prefix=FUNC %s
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=CM-CHECK --check-prefix=FUNC %s
-; RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK --check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=CM -check-prefix=FUNC %s
;===------------------------------------------------------------------------===;
; Global Address Space
;===------------------------------------------------------------------------===;
; FUNC-LABEL: {{^}}store_i1:
-; EG-CHECK: MEM_RAT MSKOR
-; SI-CHECK: buffer_store_byte
+; EG: MEM_RAT MSKOR
+; SI: buffer_store_byte
define void @store_i1(i1 addrspace(1)* %out) {
entry:
store i1 true, i1 addrspace(1)* %out
@@ -15,27 +16,29 @@ entry:
}
; i8 store
-; EG-CHECK-LABEL: {{^}}store_i8:
-; EG-CHECK: MEM_RAT MSKOR T[[RW_GPR:[0-9]]].XW, T{{[0-9]}}.X
-; EG-CHECK: VTX_READ_8 [[VAL:T[0-9]\.X]], [[VAL]]
+; EG-LABEL: {{^}}store_i8:
+; EG: MEM_RAT MSKOR T[[RW_GPR:[0-9]]].XW, T{{[0-9]}}.X
+
; IG 0: Get the byte index and truncate the value
-; EG-CHECK: AND_INT T{{[0-9]}}.[[BI_CHAN:[XYZW]]], KC0[2].Y, literal.x
-; EG-CHECK-NEXT: AND_INT * T{{[0-9]}}.[[TRUNC_CHAN:[XYZW]]], [[VAL]], literal.y
-; EG-CHECK-NEXT: 3(4.203895e-45), 255(3.573311e-43)
+; EG: AND_INT * T{{[0-9]}}.[[BI_CHAN:[XYZW]]], KC0[2].Y, literal.x
+; EG: LSHL T{{[0-9]}}.[[SHIFT_CHAN:[XYZW]]], PV.[[BI_CHAN]], literal.x
+; EG: AND_INT * T{{[0-9]}}.[[TRUNC_CHAN:[XYZW]]], KC0[2].Z, literal.y
+; EG-NEXT: 3(4.203895e-45), 255(3.573311e-43)
+
+
; IG 1: Truncate the calculated the shift amount for the mask
-; EG-CHECK: LSHL * T{{[0-9]}}.[[SHIFT_CHAN:[XYZW]]], PV.[[BI_CHAN]], literal.x
-; EG-CHECK-NEXT: 3
+
; IG 2: Shift the value and the mask
-; EG-CHECK: LSHL T[[RW_GPR]].X, T{{[0-9]}}.[[TRUNC_CHAN]], PV.[[SHIFT_CHAN]]
-; EG-CHECK: LSHL * T[[RW_GPR]].W, literal.x, PV.[[SHIFT_CHAN]]
-; EG-CHECK-NEXT: 255
+; EG: LSHL T[[RW_GPR]].X, PS, PV.[[SHIFT_CHAN]]
+; EG: LSHL * T[[RW_GPR]].W, literal.x, PV.[[SHIFT_CHAN]]
+; EG-NEXT: 255
; IG 3: Initialize the Y and Z channels to zero
; XXX: An optimal scheduler should merge this into one of the prevous IGs.
-; EG-CHECK: MOV T[[RW_GPR]].Y, 0.0
-; EG-CHECK: MOV * T[[RW_GPR]].Z, 0.0
+; EG: MOV T[[RW_GPR]].Y, 0.0
+; EG: MOV * T[[RW_GPR]].Z, 0.0
-; SI-CHECK-LABEL: {{^}}store_i8:
-; SI-CHECK: buffer_store_byte
+; SI-LABEL: {{^}}store_i8:
+; SI: buffer_store_byte
define void @store_i8(i8 addrspace(1)* %out, i8 %in) {
entry:
@@ -44,39 +47,44 @@ entry:
}
; i16 store
-; EG-CHECK-LABEL: {{^}}store_i16:
-; EG-CHECK: MEM_RAT MSKOR T[[RW_GPR:[0-9]]].XW, T{{[0-9]}}.X
-; EG-CHECK: VTX_READ_16 [[VAL:T[0-9]\.X]], [[VAL]]
+; EG-LABEL: {{^}}store_i16:
+; EG: MEM_RAT MSKOR T[[RW_GPR:[0-9]]].XW, T{{[0-9]}}.X
+
; IG 0: Get the byte index and truncate the value
-; EG-CHECK: AND_INT T{{[0-9]}}.[[BI_CHAN:[XYZW]]], KC0[2].Y, literal.x
-; EG-CHECK: AND_INT * T{{[0-9]}}.[[TRUNC_CHAN:[XYZW]]], [[VAL]], literal.y
-; EG-CHECK-NEXT: 3(4.203895e-45), 65535(9.183409e-41)
+
+
+; EG: AND_INT * T{{[0-9]}}.[[BI_CHAN:[XYZW]]], KC0[2].Y, literal.x
+; EG-NEXT: 3(4.203895e-45),
+
+; EG: LSHL T{{[0-9]}}.[[SHIFT_CHAN:[XYZW]]], PV.[[BI_CHAN]], literal.x
+; EG: AND_INT * T{{[0-9]}}.[[TRUNC_CHAN:[XYZW]]], KC0[2].Z, literal.y
+
+; EG-NEXT: 3(4.203895e-45), 65535(9.183409e-41)
; IG 1: Truncate the calculated the shift amount for the mask
-; EG-CHECK: LSHL * T{{[0-9]}}.[[SHIFT_CHAN:[XYZW]]], PV.[[BI_CHAN]], literal.x
-; EG-CHECK: 3
+
; IG 2: Shift the value and the mask
-; EG-CHECK: LSHL T[[RW_GPR]].X, T{{[0-9]}}.[[TRUNC_CHAN]], PV.[[SHIFT_CHAN]]
-; EG-CHECK: LSHL * T[[RW_GPR]].W, literal.x, PV.[[SHIFT_CHAN]]
-; EG-CHECK-NEXT: 65535
+; EG: LSHL T[[RW_GPR]].X, PS, PV.[[SHIFT_CHAN]]
+; EG: LSHL * T[[RW_GPR]].W, literal.x, PV.[[SHIFT_CHAN]]
+; EG-NEXT: 65535
; IG 3: Initialize the Y and Z channels to zero
; XXX: An optimal scheduler should merge this into one of the prevous IGs.
-; EG-CHECK: MOV T[[RW_GPR]].Y, 0.0
-; EG-CHECK: MOV * T[[RW_GPR]].Z, 0.0
+; EG: MOV T[[RW_GPR]].Y, 0.0
+; EG: MOV * T[[RW_GPR]].Z, 0.0
-; SI-CHECK-LABEL: {{^}}store_i16:
-; SI-CHECK: buffer_store_short
+; SI-LABEL: {{^}}store_i16:
+; SI: buffer_store_short
define void @store_i16(i16 addrspace(1)* %out, i16 %in) {
entry:
store i16 %in, i16 addrspace(1)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}store_v2i8:
-; EG-CHECK: MEM_RAT MSKOR
-; EG-CHECK-NOT: MEM_RAT MSKOR
-; SI-CHECK-LABEL: {{^}}store_v2i8:
-; SI-CHECK: buffer_store_byte
-; SI-CHECK: buffer_store_byte
+; EG-LABEL: {{^}}store_v2i8:
+; EG: MEM_RAT MSKOR
+; EG-NOT: MEM_RAT MSKOR
+; SI-LABEL: {{^}}store_v2i8:
+; SI: buffer_store_byte
+; SI: buffer_store_byte
define void @store_v2i8(<2 x i8> addrspace(1)* %out, <2 x i32> %in) {
entry:
%0 = trunc <2 x i32> %in to <2 x i8>
@@ -85,13 +93,13 @@ entry:
}
-; EG-CHECK-LABEL: {{^}}store_v2i16:
-; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK-LABEL: {{^}}store_v2i16:
-; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK-LABEL: {{^}}store_v2i16:
-; SI-CHECK: buffer_store_short
-; SI-CHECK: buffer_store_short
+; EG-LABEL: {{^}}store_v2i16:
+; EG: MEM_RAT_CACHELESS STORE_RAW
+; CM-LABEL: {{^}}store_v2i16:
+; CM: MEM_RAT_CACHELESS STORE_DWORD
+; SI-LABEL: {{^}}store_v2i16:
+; SI: buffer_store_short
+; SI: buffer_store_short
define void @store_v2i16(<2 x i16> addrspace(1)* %out, <2 x i32> %in) {
entry:
%0 = trunc <2 x i32> %in to <2 x i16>
@@ -99,15 +107,15 @@ entry:
ret void
}
-; EG-CHECK-LABEL: {{^}}store_v4i8:
-; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK-LABEL: {{^}}store_v4i8:
-; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK-LABEL: {{^}}store_v4i8:
-; SI-CHECK: buffer_store_byte
-; SI-CHECK: buffer_store_byte
-; SI-CHECK: buffer_store_byte
-; SI-CHECK: buffer_store_byte
+; EG-LABEL: {{^}}store_v4i8:
+; EG: MEM_RAT_CACHELESS STORE_RAW
+; CM-LABEL: {{^}}store_v4i8:
+; CM: MEM_RAT_CACHELESS STORE_DWORD
+; SI-LABEL: {{^}}store_v4i8:
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
define void @store_v4i8(<4 x i8> addrspace(1)* %out, <4 x i32> %in) {
entry:
%0 = trunc <4 x i32> %in to <4 x i8>
@@ -116,30 +124,30 @@ entry:
}
; floating-point store
-; EG-CHECK-LABEL: {{^}}store_f32:
-; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+\.X, T[0-9]+\.X}}, 1
-; CM-CHECK-LABEL: {{^}}store_f32:
-; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD T{{[0-9]+\.X, T[0-9]+\.X}}
-; SI-CHECK-LABEL: {{^}}store_f32:
-; SI-CHECK: buffer_store_dword
+; EG-LABEL: {{^}}store_f32:
+; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+\.X, T[0-9]+\.X}}, 1
+; CM-LABEL: {{^}}store_f32:
+; CM: MEM_RAT_CACHELESS STORE_DWORD T{{[0-9]+\.X, T[0-9]+\.X}}
+; SI-LABEL: {{^}}store_f32:
+; SI: buffer_store_dword
define void @store_f32(float addrspace(1)* %out, float %in) {
store float %in, float addrspace(1)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}store_v4i16:
-; EG-CHECK: MEM_RAT MSKOR
-; EG-CHECK: MEM_RAT MSKOR
-; EG-CHECK: MEM_RAT MSKOR
-; EG-CHECK: MEM_RAT MSKOR
-; EG-CHECK-NOT: MEM_RAT MSKOR
-; SI-CHECK-LABEL: {{^}}store_v4i16:
-; SI-CHECK: buffer_store_short
-; SI-CHECK: buffer_store_short
-; SI-CHECK: buffer_store_short
-; SI-CHECK: buffer_store_short
-; SI-CHECK-NOT: buffer_store_byte
+; EG-LABEL: {{^}}store_v4i16:
+; EG: MEM_RAT MSKOR
+; EG: MEM_RAT MSKOR
+; EG: MEM_RAT MSKOR
+; EG: MEM_RAT MSKOR
+; EG-NOT: MEM_RAT MSKOR
+; SI-LABEL: {{^}}store_v4i16:
+; SI: buffer_store_short
+; SI: buffer_store_short
+; SI: buffer_store_short
+; SI: buffer_store_short
+; SI-NOT: buffer_store_byte
define void @store_v4i16(<4 x i16> addrspace(1)* %out, <4 x i32> %in) {
entry:
%0 = trunc <4 x i32> %in to <4 x i16>
@@ -148,12 +156,12 @@ entry:
}
; vec2 floating-point stores
-; EG-CHECK-LABEL: {{^}}store_v2f32:
-; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK-LABEL: {{^}}store_v2f32:
-; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK-LABEL: {{^}}store_v2f32:
-; SI-CHECK: buffer_store_dwordx2
+; EG-LABEL: {{^}}store_v2f32:
+; EG: MEM_RAT_CACHELESS STORE_RAW
+; CM-LABEL: {{^}}store_v2f32:
+; CM: MEM_RAT_CACHELESS STORE_DWORD
+; SI-LABEL: {{^}}store_v2f32:
+; SI: buffer_store_dwordx2
define void @store_v2f32(<2 x float> addrspace(1)* %out, float %a, float %b) {
entry:
@@ -163,14 +171,14 @@ entry:
ret void
}
-; EG-CHECK-LABEL: {{^}}store_v4i32:
-; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; EG-CHECK-NOT: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK-LABEL: {{^}}store_v4i32:
-; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
-; CM-CHECK-NOT: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK-LABEL: {{^}}store_v4i32:
-; SI-CHECK: buffer_store_dwordx4
+; EG-LABEL: {{^}}store_v4i32:
+; EG: MEM_RAT_CACHELESS STORE_RAW
+; EG-NOT: MEM_RAT_CACHELESS STORE_RAW
+; CM-LABEL: {{^}}store_v4i32:
+; CM: MEM_RAT_CACHELESS STORE_DWORD
+; CM-NOT: MEM_RAT_CACHELESS STORE_DWORD
+; SI-LABEL: {{^}}store_v4i32:
+; SI: buffer_store_dwordx4
define void @store_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %in) {
entry:
store <4 x i32> %in, <4 x i32> addrspace(1)* %out
@@ -178,8 +186,8 @@ entry:
}
; FUNC-LABEL: {{^}}store_i64_i8:
-; EG-CHECK: MEM_RAT MSKOR
-; SI-CHECK: buffer_store_byte
+; EG: MEM_RAT MSKOR
+; SI: buffer_store_byte
define void @store_i64_i8(i8 addrspace(1)* %out, i64 %in) {
entry:
%0 = trunc i64 %in to i8
@@ -188,8 +196,8 @@ entry:
}
; FUNC-LABEL: {{^}}store_i64_i16:
-; EG-CHECK: MEM_RAT MSKOR
-; SI-CHECK: buffer_store_short
+; EG: MEM_RAT MSKOR
+; SI: buffer_store_short
define void @store_i64_i16(i16 addrspace(1)* %out, i64 %in) {
entry:
%0 = trunc i64 %in to i16
@@ -202,89 +210,89 @@ entry:
;===------------------------------------------------------------------------===;
; FUNC-LABEL: {{^}}store_local_i1:
-; EG-CHECK: LDS_BYTE_WRITE
-; SI-CHECK: ds_write_b8
+; EG: LDS_BYTE_WRITE
+; SI: ds_write_b8
define void @store_local_i1(i1 addrspace(3)* %out) {
entry:
store i1 true, i1 addrspace(3)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}store_local_i8:
-; EG-CHECK: LDS_BYTE_WRITE
-; SI-CHECK-LABEL: {{^}}store_local_i8:
-; SI-CHECK: ds_write_b8
+; EG-LABEL: {{^}}store_local_i8:
+; EG: LDS_BYTE_WRITE
+; SI-LABEL: {{^}}store_local_i8:
+; SI: ds_write_b8
define void @store_local_i8(i8 addrspace(3)* %out, i8 %in) {
store i8 %in, i8 addrspace(3)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}store_local_i16:
-; EG-CHECK: LDS_SHORT_WRITE
-; SI-CHECK-LABEL: {{^}}store_local_i16:
-; SI-CHECK: ds_write_b16
+; EG-LABEL: {{^}}store_local_i16:
+; EG: LDS_SHORT_WRITE
+; SI-LABEL: {{^}}store_local_i16:
+; SI: ds_write_b16
define void @store_local_i16(i16 addrspace(3)* %out, i16 %in) {
store i16 %in, i16 addrspace(3)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}store_local_v2i16:
-; EG-CHECK: LDS_WRITE
-; CM-CHECK-LABEL: {{^}}store_local_v2i16:
-; CM-CHECK: LDS_WRITE
-; SI-CHECK-LABEL: {{^}}store_local_v2i16:
-; SI-CHECK: ds_write_b16
-; SI-CHECK: ds_write_b16
+; EG-LABEL: {{^}}store_local_v2i16:
+; EG: LDS_WRITE
+; CM-LABEL: {{^}}store_local_v2i16:
+; CM: LDS_WRITE
+; SI-LABEL: {{^}}store_local_v2i16:
+; SI: ds_write_b16
+; SI: ds_write_b16
define void @store_local_v2i16(<2 x i16> addrspace(3)* %out, <2 x i16> %in) {
entry:
store <2 x i16> %in, <2 x i16> addrspace(3)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}store_local_v4i8:
-; EG-CHECK: LDS_WRITE
-; CM-CHECK-LABEL: {{^}}store_local_v4i8:
-; CM-CHECK: LDS_WRITE
-; SI-CHECK-LABEL: {{^}}store_local_v4i8:
-; SI-CHECK: ds_write_b8
-; SI-CHECK: ds_write_b8
-; SI-CHECK: ds_write_b8
-; SI-CHECK: ds_write_b8
+; EG-LABEL: {{^}}store_local_v4i8:
+; EG: LDS_WRITE
+; CM-LABEL: {{^}}store_local_v4i8:
+; CM: LDS_WRITE
+; SI-LABEL: {{^}}store_local_v4i8:
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
define void @store_local_v4i8(<4 x i8> addrspace(3)* %out, <4 x i8> %in) {
entry:
store <4 x i8> %in, <4 x i8> addrspace(3)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}store_local_v2i32:
-; EG-CHECK: LDS_WRITE
-; EG-CHECK: LDS_WRITE
-; CM-CHECK-LABEL: {{^}}store_local_v2i32:
-; CM-CHECK: LDS_WRITE
-; CM-CHECK: LDS_WRITE
-; SI-CHECK-LABEL: {{^}}store_local_v2i32:
-; SI-CHECK: ds_write_b64
+; EG-LABEL: {{^}}store_local_v2i32:
+; EG: LDS_WRITE
+; EG: LDS_WRITE
+; CM-LABEL: {{^}}store_local_v2i32:
+; CM: LDS_WRITE
+; CM: LDS_WRITE
+; SI-LABEL: {{^}}store_local_v2i32:
+; SI: ds_write_b64
define void @store_local_v2i32(<2 x i32> addrspace(3)* %out, <2 x i32> %in) {
entry:
store <2 x i32> %in, <2 x i32> addrspace(3)* %out
ret void
}
-; EG-CHECK-LABEL: {{^}}store_local_v4i32:
-; EG-CHECK: LDS_WRITE
-; EG-CHECK: LDS_WRITE
-; EG-CHECK: LDS_WRITE
-; EG-CHECK: LDS_WRITE
-; CM-CHECK-LABEL: {{^}}store_local_v4i32:
-; CM-CHECK: LDS_WRITE
-; CM-CHECK: LDS_WRITE
-; CM-CHECK: LDS_WRITE
-; CM-CHECK: LDS_WRITE
-; SI-CHECK-LABEL: {{^}}store_local_v4i32:
-; SI-CHECK: ds_write_b32
-; SI-CHECK: ds_write_b32
-; SI-CHECK: ds_write_b32
-; SI-CHECK: ds_write_b32
+; EG-LABEL: {{^}}store_local_v4i32:
+; EG: LDS_WRITE
+; EG: LDS_WRITE
+; EG: LDS_WRITE
+; EG: LDS_WRITE
+; CM-LABEL: {{^}}store_local_v4i32:
+; CM: LDS_WRITE
+; CM: LDS_WRITE
+; CM: LDS_WRITE
+; CM: LDS_WRITE
+; SI-LABEL: {{^}}store_local_v4i32:
+; SI: ds_write_b32
+; SI: ds_write_b32
+; SI: ds_write_b32
+; SI: ds_write_b32
define void @store_local_v4i32(<4 x i32> addrspace(3)* %out, <4 x i32> %in) {
entry:
store <4 x i32> %in, <4 x i32> addrspace(3)* %out
@@ -292,8 +300,8 @@ entry:
}
; FUNC-LABEL: {{^}}store_local_i64_i8:
-; EG-CHECK: LDS_BYTE_WRITE
-; SI-CHECK: ds_write_b8
+; EG: LDS_BYTE_WRITE
+; SI: ds_write_b8
define void @store_local_i64_i8(i8 addrspace(3)* %out, i64 %in) {
entry:
%0 = trunc i64 %in to i8
@@ -302,8 +310,8 @@ entry:
}
; FUNC-LABEL: {{^}}store_local_i64_i16:
-; EG-CHECK: LDS_SHORT_WRITE
-; SI-CHECK: ds_write_b16
+; EG: LDS_SHORT_WRITE
+; SI: ds_write_b16
define void @store_local_i64_i16(i16 addrspace(3)* %out, i64 %in) {
entry:
%0 = trunc i64 %in to i16
@@ -318,12 +326,12 @@ entry:
; Evergreen / Northern Islands don't support 64-bit stores yet, so there should
; be two 32-bit stores.
-; EG-CHECK-LABEL: {{^}}vecload2:
-; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; CM-CHECK-LABEL: {{^}}vecload2:
-; CM-CHECK: MEM_RAT_CACHELESS STORE_DWORD
-; SI-CHECK-LABEL: {{^}}vecload2:
-; SI-CHECK: buffer_store_dwordx2
+; EG-LABEL: {{^}}vecload2:
+; EG: MEM_RAT_CACHELESS STORE_RAW
+; CM-LABEL: {{^}}vecload2:
+; CM: MEM_RAT_CACHELESS STORE_DWORD
+; SI-LABEL: {{^}}vecload2:
+; SI: buffer_store_dwordx2
define void @vecload2(i32 addrspace(1)* nocapture %out, i32 addrspace(2)* nocapture %mem) #0 {
entry:
%0 = load i32 addrspace(2)* %mem, align 4
@@ -341,14 +349,14 @@ attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"=
; FUNC-LABEL: {{^}}"i128-const-store":
; FIXME: We should be able to to this with one store instruction
-; EG-CHECK: STORE_RAW
-; EG-CHECK: STORE_RAW
-; EG-CHECK: STORE_RAW
-; EG-CHECK: STORE_RAW
-; CM-CHECK: STORE_DWORD
-; CM-CHECK: STORE_DWORD
-; CM-CHECK: STORE_DWORD
-; CM-CHECK: STORE_DWORD
+; EG: STORE_RAW
+; EG: STORE_RAW
+; EG: STORE_RAW
+; EG: STORE_RAW
+; CM: STORE_DWORD
+; CM: STORE_DWORD
+; CM: STORE_DWORD
+; CM: STORE_DWORD
; SI: buffer_store_dwordx2
; SI: buffer_store_dwordx2
define void @i128-const-store(i32 addrspace(1)* %out) {
diff --git a/test/CodeGen/R600/store.r600.ll b/test/CodeGen/R600/store.r600.ll
index 3df30d4..2197260 100644
--- a/test/CodeGen/R600/store.r600.ll
+++ b/test/CodeGen/R600/store.r600.ll
@@ -1,10 +1,10 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
; XXX: Merge this test into store.ll once it is supported on SI
; v4i32 store
-; EG-CHECK: {{^}}store_v4i32:
-; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1
+; EG: {{^}}store_v4i32:
+; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1
define void @store_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%1 = load <4 x i32> addrspace(1) * %in
@@ -13,8 +13,8 @@ define void @store_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %
}
; v4f32 store
-; EG-CHECK: {{^}}store_v4f32:
-; EG-CHECK: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1
+; EG: {{^}}store_v4f32:
+; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1
define void @store_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%1 = load <4 x float> addrspace(1) * %in
store <4 x float> %1, <4 x float> addrspace(1)* %out
diff --git a/test/CodeGen/R600/sub.ll b/test/CodeGen/R600/sub.ll
index 2bbc0cf..be48e18 100644
--- a/test/CodeGen/R600/sub.ll
+++ b/test/CodeGen/R600/sub.ll
@@ -1,16 +1,31 @@
-;RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-;RUN: llc -march=r600 -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
declare i32 @llvm.r600.read.tidig.x() readnone
-;FUNC-LABEL: {{^}}test2:
-;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; FUNC-LABEL: {{^}}test_sub_i32:
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+; SI: v_subrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+define void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %a = load i32 addrspace(1)* %in
+ %b = load i32 addrspace(1)* %b_ptr
+ %result = sub i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
-;SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+; FUNC-LABEL: {{^}}test_sub_v2i32:
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+; SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+
+define void @test_sub_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32> addrspace(1) * %in
%b = load <2 x i32> addrspace(1) * %b_ptr
@@ -19,18 +34,18 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-;FUNC-LABEL: {{^}}test4:
-;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; FUNC-LABEL: {{^}}test_sub_v4i32:
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define void @test_sub_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32> addrspace(1) * %in
%b = load <4 x i32> addrspace(1) * %b_ptr
@@ -73,3 +88,39 @@ define void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
}
+
+; FUNC-LABEL: {{^}}v_test_sub_v2i64:
+; SI: v_sub_i32_e32
+; SI: v_subb_u32_e32
+; SI: v_sub_i32_e32
+; SI: v_subb_u32_e32
+define void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %a_ptr = getelementptr <2 x i64> addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr <2 x i64> addrspace(1)* %inB, i32 %tid
+ %a = load <2 x i64> addrspace(1)* %a_ptr
+ %b = load <2 x i64> addrspace(1)* %b_ptr
+ %result = sub <2 x i64> %a, %b
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_test_sub_v4i64:
+; SI: v_sub_i32_e32
+; SI: v_subb_u32_e32
+; SI: v_sub_i32_e32
+; SI: v_subb_u32_e32
+; SI: v_sub_i32_e32
+; SI: v_subb_u32_e32
+; SI: v_sub_i32_e32
+; SI: v_subb_u32_e32
+define void @v_test_sub_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* noalias %inA, <4 x i64> addrspace(1)* noalias %inB) {
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %a_ptr = getelementptr <4 x i64> addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr <4 x i64> addrspace(1)* %inB, i32 %tid
+ %a = load <4 x i64> addrspace(1)* %a_ptr
+ %b = load <4 x i64> addrspace(1)* %b_ptr
+ %result = sub <4 x i64> %a, %b
+ store <4 x i64> %result, <4 x i64> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/subreg-coalescer-crash.ll b/test/CodeGen/R600/subreg-coalescer-crash.ll
new file mode 100644
index 0000000..c4dae47
--- /dev/null
+++ b/test/CodeGen/R600/subreg-coalescer-crash.ll
@@ -0,0 +1,109 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs -o - %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -o - %s
+
+; SI-LABEL:{{^}}row_filter_C1_D0:
+; SI: s_endpgm
+; Function Attrs: nounwind
+define void @row_filter_C1_D0() {
+entry:
+ br i1 undef, label %for.inc.1, label %do.body.preheader
+
+do.body.preheader: ; preds = %entry
+ %0 = insertelement <4 x i32> zeroinitializer, i32 undef, i32 1
+ br i1 undef, label %do.body56.1, label %do.body90
+
+do.body90: ; preds = %do.body56.2, %do.body56.1, %do.body.preheader
+ %1 = phi <4 x i32> [ %6, %do.body56.2 ], [ %5, %do.body56.1 ], [ %0, %do.body.preheader ]
+ %2 = insertelement <4 x i32> %1, i32 undef, i32 2
+ %3 = insertelement <4 x i32> %2, i32 undef, i32 3
+ br i1 undef, label %do.body124.1, label %do.body.1562.preheader
+
+do.body.1562.preheader: ; preds = %do.body124.1, %do.body90
+ %storemerge = phi <4 x i32> [ %3, %do.body90 ], [ %7, %do.body124.1 ]
+ %4 = insertelement <4 x i32> undef, i32 undef, i32 1
+ br label %for.inc.1
+
+do.body56.1: ; preds = %do.body.preheader
+ %5 = insertelement <4 x i32> %0, i32 undef, i32 1
+ %or.cond472.1 = or i1 undef, undef
+ br i1 %or.cond472.1, label %do.body56.2, label %do.body90
+
+do.body56.2: ; preds = %do.body56.1
+ %6 = insertelement <4 x i32> %5, i32 undef, i32 1
+ br label %do.body90
+
+do.body124.1: ; preds = %do.body90
+ %7 = insertelement <4 x i32> %3, i32 undef, i32 3
+ br label %do.body.1562.preheader
+
+for.inc.1: ; preds = %do.body.1562.preheader, %entry
+ %storemerge591 = phi <4 x i32> [ zeroinitializer, %entry ], [ %storemerge, %do.body.1562.preheader ]
+ %add.i495 = add <4 x i32> %storemerge591, undef
+ unreachable
+}
+
+; SI-LABEL: {{^}}foo:
+; SI: s_endpgm
+define void @foo() #0 {
+bb:
+ br i1 undef, label %bb2, label %bb1
+
+bb1: ; preds = %bb
+ br i1 undef, label %bb4, label %bb6
+
+bb2: ; preds = %bb4, %bb
+ %tmp = phi float [ %tmp5, %bb4 ], [ 0.000000e+00, %bb ]
+ br i1 undef, label %bb9, label %bb13
+
+bb4: ; preds = %bb7, %bb6, %bb1
+ %tmp5 = phi float [ undef, %bb1 ], [ undef, %bb6 ], [ %tmp8, %bb7 ]
+ br label %bb2
+
+bb6: ; preds = %bb1
+ br i1 undef, label %bb7, label %bb4
+
+bb7: ; preds = %bb6
+ %tmp8 = fmul float undef, undef
+ br label %bb4
+
+bb9: ; preds = %bb2
+ %tmp10 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> undef, <32 x i8> undef, <16 x i8> undef, i32 2)
+ %tmp11 = extractelement <4 x float> %tmp10, i32 1
+ %tmp12 = extractelement <4 x float> %tmp10, i32 3
+ br label %bb14
+
+bb13: ; preds = %bb2
+ br i1 undef, label %bb23, label %bb24
+
+bb14: ; preds = %bb27, %bb24, %bb9
+ %tmp15 = phi float [ %tmp12, %bb9 ], [ undef, %bb27 ], [ 0.000000e+00, %bb24 ]
+ %tmp16 = phi float [ %tmp11, %bb9 ], [ undef, %bb27 ], [ %tmp25, %bb24 ]
+ %tmp17 = fmul float 10.5, %tmp16
+ %tmp18 = fmul float 11.5, %tmp15
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp18, float %tmp17, float %tmp17, float %tmp17)
+ ret void
+
+bb23: ; preds = %bb13
+ br i1 undef, label %bb24, label %bb26
+
+bb24: ; preds = %bb26, %bb23, %bb13
+ %tmp25 = phi float [ %tmp, %bb13 ], [ %tmp, %bb26 ], [ 0.000000e+00, %bb23 ]
+ br i1 undef, label %bb27, label %bb14
+
+bb26: ; preds = %bb23
+ br label %bb24
+
+bb27: ; preds = %bb24
+ br label %bb14
+}
+
+; Function Attrs: nounwind readnone
+declare <4 x float> @llvm.SI.sample.v2i32(<2 x i32>, <32 x i8>, <16 x i8>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.SI.packf16(float, float) #1
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" "enable-no-nans-fp-math"="true" "unsafe-fp-math"="true" }
+attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/R600/swizzle-export.ll b/test/CodeGen/R600/swizzle-export.ll
index 3e6f7a7..5eaca76 100644
--- a/test/CodeGen/R600/swizzle-export.ll
+++ b/test/CodeGen/R600/swizzle-export.ll
@@ -1,10 +1,10 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
-;EG-CHECK: {{^}}main:
-;EG-CHECK: EXPORT T{{[0-9]+}}.XYXX
-;EG-CHECK: EXPORT T{{[0-9]+}}.ZXXX
-;EG-CHECK: EXPORT T{{[0-9]+}}.XXWX
-;EG-CHECK: EXPORT T{{[0-9]+}}.XXXW
+;EG: {{^}}main:
+;EG: EXPORT T{{[0-9]+}}.XYXX
+;EG: EXPORT T{{[0-9]+}}.ZXXX
+;EG: EXPORT T{{[0-9]+}}.XXWX
+;EG: EXPORT T{{[0-9]+}}.XXXW
define void @main(<4 x float> inreg %reg0, <4 x float> inreg %reg1) #0 {
main_body:
@@ -92,9 +92,9 @@ main_body:
ret void
}
-; EG-CHECK: {{^}}main2:
-; EG-CHECK: T{{[0-9]+}}.XY__
-; EG-CHECK: T{{[0-9]+}}.ZXY0
+; EG: {{^}}main2:
+; EG: T{{[0-9]+}}.XY__
+; EG: T{{[0-9]+}}.ZXY0
define void @main2(<4 x float> inreg %reg0, <4 x float> inreg %reg1) #0 {
main_body:
diff --git a/test/CodeGen/R600/trunc-cmp-constant.ll b/test/CodeGen/R600/trunc-cmp-constant.ll
new file mode 100644
index 0000000..a097ab0
--- /dev/null
+++ b/test/CodeGen/R600/trunc-cmp-constant.ll
@@ -0,0 +1,170 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL {{^}}sextload_i1_to_i32_trunc_cmp_eq_0:
+; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
+; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
+; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[TMP]], 1{{$}}
+; SI: s_xor_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -1{{$}}
+; SI: v_cndmask_b32_e64
+; SI: buffer_store_byte
+define void @sextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = sext i1 %load to i32
+ %cmp = icmp eq i32 %ext, 0
+ store i1 %cmp, i1 addrspace(1)* %out
+ ret void
+}
+
+; FIXME: The negate should be inverting the compare.
+; FUNC-LABEL: {{^}}zextload_i1_to_i32_trunc_cmp_eq_0:
+; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
+; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
+; SI: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], [[TMP]], 1{{$}}
+; SI-NEXT: s_xor_b64 [[NEG:s\[[0-9]+:[0-9]+\]]], [[CMP0]], -1
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[NEG]]
+; SI-NEXT: buffer_store_byte [[RESULT]]
+define void @zextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = zext i1 %load to i32
+ %cmp = icmp eq i32 %ext, 0
+ store i1 %cmp, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_i1_to_i32_trunc_cmp_eq_1:
+; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
+; SI: buffer_store_byte [[RESULT]]
+define void @sextload_i1_to_i32_trunc_cmp_eq_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = sext i1 %load to i32
+ %cmp = icmp eq i32 %ext, 1
+ store i1 %cmp, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_i1_to_i32_trunc_cmp_eq_1:
+; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
+; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[LOAD]]
+; SI-NEXT: buffer_store_byte [[RESULT]]
+define void @zextload_i1_to_i32_trunc_cmp_eq_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = zext i1 %load to i32
+ %cmp = icmp eq i32 %ext, 1
+ store i1 %cmp, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_i1_to_i32_trunc_cmp_eq_neg1:
+; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
+; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[LOAD]]
+; SI-NEXT: buffer_store_byte [[RESULT]]
+define void @sextload_i1_to_i32_trunc_cmp_eq_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = sext i1 %load to i32
+ %cmp = icmp eq i32 %ext, -1
+ store i1 %cmp, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_i1_to_i32_trunc_cmp_eq_neg1:
+; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
+; SI: buffer_store_byte [[RESULT]]
+define void @zextload_i1_to_i32_trunc_cmp_eq_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = zext i1 %load to i32
+ %cmp = icmp eq i32 %ext, -1
+ store i1 %cmp, i1 addrspace(1)* %out
+ ret void
+}
+
+
+; FUNC-LABEL {{^}}sextload_i1_to_i32_trunc_cmp_ne_0:
+; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
+; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
+; SI-NEXT: buffer_store_byte [[RESULT]]
+define void @sextload_i1_to_i32_trunc_cmp_ne_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = sext i1 %load to i32
+ %cmp = icmp ne i32 %ext, 0
+ store i1 %cmp, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_i1_to_i32_trunc_cmp_ne_0:
+; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
+; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
+; SI-NEXT: buffer_store_byte [[RESULT]]
+define void @zextload_i1_to_i32_trunc_cmp_ne_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = zext i1 %load to i32
+ %cmp = icmp ne i32 %ext, 0
+ store i1 %cmp, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sextload_i1_to_i32_trunc_cmp_ne_1:
+; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 1{{$}}
+; SI: buffer_store_byte [[RESULT]]
+define void @sextload_i1_to_i32_trunc_cmp_ne_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = sext i1 %load to i32
+ %cmp = icmp ne i32 %ext, 1
+ store i1 %cmp, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_i1_to_i32_trunc_cmp_ne_1:
+; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
+; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
+; SI: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], [[TMP]], 1{{$}}
+; SI-NEXT: s_xor_b64 [[NEG:s\[[0-9]+:[0-9]+\]]], [[CMP0]], -1
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[NEG]]
+; SI-NEXT: buffer_store_byte [[RESULT]]
+define void @zextload_i1_to_i32_trunc_cmp_ne_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = zext i1 %load to i32
+ %cmp = icmp ne i32 %ext, 1
+ store i1 %cmp, i1 addrspace(1)* %out
+ ret void
+}
+
+; FIXME: This should be one compare.
+; FUNC-LABEL: {{^}}sextload_i1_to_i32_trunc_cmp_ne_neg1:
+; XSI: buffer_load_ubyte [[LOAD:v[0-9]+]]
+; XSI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
+; XSI: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], [[TMP]], 0{{$}}
+; XSI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[CMP0]]
+; XSI-NEXT: buffer_store_byte [[RESULT]]
+define void @sextload_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = sext i1 %load to i32
+ %cmp = icmp ne i32 %ext, -1
+ store i1 %cmp, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}zextload_i1_to_i32_trunc_cmp_ne_neg1:
+; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 1{{$}}
+; SI: buffer_store_byte [[RESULT]]
+define void @zextload_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
+ %load = load i1 addrspace(1)* %in
+ %ext = zext i1 %load to i32
+ %cmp = icmp ne i32 %ext, -1
+ store i1 %cmp, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}masked_load_i1_to_i32_trunc_cmp_ne_neg1:
+; SI: buffer_load_sbyte [[LOAD:v[0-9]+]]
+; SI: v_cmp_ne_i32_e64 {{s\[[0-9]+:[0-9]+\]}}, [[LOAD]], -1{{$}}
+; SI-NEXT: v_cndmask_b32_e64
+; SI-NEXT: buffer_store_byte
+define void @masked_load_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
+ %load = load i8 addrspace(1)* %in
+ %masked = and i8 %load, 255
+ %ext = sext i8 %masked to i32
+ %cmp = icmp ne i32 %ext, -1
+ store i1 %cmp, i1 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/trunc-store-i1.ll b/test/CodeGen/R600/trunc-store-i1.ll
index 3c1b19f..b71a838 100644
--- a/test/CodeGen/R600/trunc-store-i1.ll
+++ b/test/CodeGen/R600/trunc-store-i1.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
; SI-LABEL: {{^}}global_truncstore_i32_to_i1:
diff --git a/test/CodeGen/R600/trunc.ll b/test/CodeGen/R600/trunc.ll
index 7519d10..fa44264 100644
--- a/test/CodeGen/R600/trunc.ll
+++ b/test/CodeGen/R600/trunc.ll
@@ -1,6 +1,8 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
+
define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) {
; SI-LABEL: {{^}}trunc_i64_to_i32_store:
; SI: s_load_dword [[SLOAD:s[0-9]+]], s[0:1], 0xb
@@ -34,6 +36,8 @@ define void @trunc_load_shl_i64(i32 addrspace(1)* %out, i64 %a) {
; SI: s_lshl_b64 s{{\[}}[[LO_SHL:[0-9]+]]:{{[0-9]+\]}}, s{{\[}}[[LO_SREG]]:{{[0-9]+\]}}, 2
; SI: s_add_u32 s[[LO_SREG2:[0-9]+]], s[[LO_SHL]],
; SI: s_addc_u32
+; SI: v_mov_b32_e32
+; SI: v_mov_b32_e32
; SI: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], s[[LO_SREG2]]
; SI: buffer_store_dword v[[LO_VREG]],
define void @trunc_shl_i64(i64 addrspace(1)* %out2, i32 addrspace(1)* %out, i64 %a) {
@@ -65,3 +69,32 @@ define void @sgpr_trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) {
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
}
+
+; SI-LABEL: {{^}}s_trunc_i64_to_i1:
+; SI: s_load_dwordx2 s{{\[}}[[SLO:[0-9]+]]:{{[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; SI: v_and_b32_e64 [[MASKED:v[0-9]+]], 1, s[[SLO]]
+; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[MASKED]], 1
+; SI: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, [[CMP]]
+define void @s_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 %x) {
+ %trunc = trunc i64 %x to i1
+ %sel = select i1 %trunc, i32 63, i32 -12
+ store i32 %sel, i32 addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: {{^}}v_trunc_i64_to_i1:
+; SI: buffer_load_dwordx2 v{{\[}}[[VLO:[0-9]+]]:{{[0-9]+\]}}
+; SI: v_and_b32_e32 [[MASKED:v[0-9]+]], 1, v[[VLO]]
+; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[MASKED]], 1
+; SI: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, [[CMP]]
+define void @v_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %gep = getelementptr i64 addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %x = load i64 addrspace(1)* %gep
+
+ %trunc = trunc i64 %x to i1
+ %sel = select i1 %trunc, i32 63, i32 -12
+ store i32 %sel, i32 addrspace(1)* %out.gep
+ ret void
+}
diff --git a/test/CodeGen/R600/tti-unroll-prefs.ll b/test/CodeGen/R600/tti-unroll-prefs.ll
new file mode 100644
index 0000000..0009c42
--- /dev/null
+++ b/test/CodeGen/R600/tti-unroll-prefs.ll
@@ -0,0 +1,58 @@
+; RUN: opt -loop-unroll -S -mtriple=amdgcn-- -mcpu=SI %s | FileCheck %s
+
+; This IR comes from this OpenCL C code:
+;
+; if (b + 4 > a) {
+; for (int i = 0; i < 4; i++, b++) {
+; if (b + 1 <= a)
+; *(dst + c + b) = 0;
+; else
+; break;
+; }
+; }
+;
+; This test is meant to check that this loop isn't unrolled into more than
+; four iterations. The loop unrolling preferences we currently use cause this
+; loop to not be unrolled at all, but that may change in the future.
+
+; CHECK-LABEL: @test
+; CHECK: store i8 0, i8 addrspace(1)*
+; CHECK-NOT: store i8 0, i8 addrspace(1)*
+; CHECK: ret void
+define void @test(i8 addrspace(1)* nocapture %dst, i32 %a, i32 %b, i32 %c) {
+entry:
+ %add = add nsw i32 %b, 4
+ %cmp = icmp sgt i32 %add, %a
+ br i1 %cmp, label %for.cond.preheader, label %if.end7
+
+for.cond.preheader: ; preds = %entry
+ %cmp313 = icmp slt i32 %b, %a
+ br i1 %cmp313, label %if.then4.lr.ph, label %if.end7.loopexit
+
+if.then4.lr.ph: ; preds = %for.cond.preheader
+ %0 = sext i32 %c to i64
+ br label %if.then4
+
+if.then4: ; preds = %if.then4.lr.ph, %if.then4
+ %i.015 = phi i32 [ 0, %if.then4.lr.ph ], [ %inc, %if.then4 ]
+ %b.addr.014 = phi i32 [ %b, %if.then4.lr.ph ], [ %add2, %if.then4 ]
+ %add2 = add nsw i32 %b.addr.014, 1
+ %1 = sext i32 %b.addr.014 to i64
+ %add.ptr.sum = add nsw i64 %1, %0
+ %add.ptr5 = getelementptr inbounds i8 addrspace(1)* %dst, i64 %add.ptr.sum
+ store i8 0, i8 addrspace(1)* %add.ptr5, align 1
+ %inc = add nsw i32 %i.015, 1
+ %cmp1 = icmp slt i32 %inc, 4
+ %cmp3 = icmp slt i32 %add2, %a
+ %or.cond = and i1 %cmp3, %cmp1
+ br i1 %or.cond, label %if.then4, label %for.cond.if.end7.loopexit_crit_edge
+
+for.cond.if.end7.loopexit_crit_edge: ; preds = %if.then4
+ br label %if.end7.loopexit
+
+if.end7.loopexit: ; preds = %for.cond.if.end7.loopexit_crit_edge, %for.cond.preheader
+ br label %if.end7
+
+if.end7: ; preds = %if.end7.loopexit, %entry
+ ret void
+}
diff --git a/test/CodeGen/R600/uaddo.ll b/test/CodeGen/R600/uaddo.ll
index eb242c1..57d7835 100644
--- a/test/CodeGen/R600/uaddo.ll
+++ b/test/CodeGen/R600/uaddo.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
diff --git a/test/CodeGen/R600/udiv.ll b/test/CodeGen/R600/udiv.ll
index 59e91f8..0c2c65b 100644
--- a/test/CodeGen/R600/udiv.ll
+++ b/test/CodeGen/R600/udiv.ll
@@ -1,9 +1,10 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=SI %s
-;EG-CHECK-LABEL: {{^}}test:
-;EG-CHECK-NOT: SETGE_INT
-;EG-CHECK: CF_END
+;EG-LABEL: {{^}}test:
+;EG-NOT: SETGE_INT
+;EG: CF_END
define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
@@ -18,10 +19,10 @@ define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
;The goal of this test is to make sure the ISel doesn't fail when it gets
;a v4i32 udiv
-;EG-CHECK-LABEL: {{^}}test2:
-;EG-CHECK: CF_END
-;SI-CHECK-LABEL: {{^}}test2:
-;SI-CHECK: s_endpgm
+;EG-LABEL: {{^}}test2:
+;EG: CF_END
+;SI-LABEL: {{^}}test2:
+;SI: s_endpgm
define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
@@ -32,10 +33,10 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
ret void
}
-;EG-CHECK-LABEL: {{^}}test4:
-;EG-CHECK: CF_END
-;SI-CHECK-LABEL: {{^}}test4:
-;SI-CHECK: s_endpgm
+;EG-LABEL: {{^}}test4:
+;EG: CF_END
+;SI-LABEL: {{^}}test4:
+;SI: s_endpgm
define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
diff --git a/test/CodeGen/R600/udivrem.ll b/test/CodeGen/R600/udivrem.ll
index f20705b..b3837f2 100644
--- a/test/CodeGen/R600/udivrem.ll
+++ b/test/CodeGen/R600/udivrem.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck --check-prefix=SI --check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=EG --check-prefix=FUNC %s
; FUNC-LABEL: {{^}}test_udivrem:
@@ -32,8 +33,8 @@
; SI-DAG: v_sub_i32_e32 [[NEG_RCP_LO:v[0-9]+]], 0, [[RCP_LO]]
; SI: v_cndmask_b32_e64
; SI: v_mul_hi_u32 [[E:v[0-9]+]], {{v[0-9]+}}, [[RCP]]
-; SI-DAG: v_add_i32_e32 [[RCP_A_E:v[0-9]+]], [[RCP]], [[E]]
-; SI-DAG: v_sub_i32_e32 [[RCP_S_E:v[0-9]+]], [[RCP]], [[E]]
+; SI-DAG: v_add_i32_e32 [[RCP_A_E:v[0-9]+]], [[E]], [[RCP]]
+; SI-DAG: v_subrev_i32_e32 [[RCP_S_E:v[0-9]+]], [[E]], [[RCP]]
; SI: v_cndmask_b32_e64
; SI: v_mul_hi_u32 [[Quotient:v[0-9]+]]
; SI: v_mul_lo_i32 [[Num_S_Remainder:v[0-9]+]]
@@ -112,12 +113,12 @@ define void @test_udivrem(i32 addrspace(1)* %out, i32 %x, i32 %y) {
; SI-DAG: v_sub_i32_e32 [[FIRST_NEG_RCP_LO:v[0-9]+]], 0, [[FIRST_RCP_LO]]
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_mul_hi_u32 [[FIRST_E:v[0-9]+]], {{v[0-9]+}}, [[FIRST_RCP]]
-; SI-DAG: v_add_i32_e32 [[FIRST_RCP_A_E:v[0-9]+]], [[FIRST_RCP]], [[FIRST_E]]
-; SI-DAG: v_sub_i32_e32 [[FIRST_RCP_S_E:v[0-9]+]], [[FIRST_RCP]], [[FIRST_E]]
+; SI-DAG: v_add_i32_e32 [[FIRST_RCP_A_E:v[0-9]+]], [[FIRST_E]], [[FIRST_RCP]]
+; SI-DAG: v_subrev_i32_e32 [[FIRST_RCP_S_E:v[0-9]+]], [[FIRST_E]], [[FIRST_RCP]]
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_mul_hi_u32 [[FIRST_Quotient:v[0-9]+]]
; SI-DAG: v_mul_lo_i32 [[FIRST_Num_S_Remainder:v[0-9]+]]
-; SI-DAG: v_sub_i32_e32 [[FIRST_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[FIRST_Num_S_Remainder]]
+; SI-DAG: v_subrev_i32_e32 [[FIRST_Remainder:v[0-9]+]], [[FIRST_Num_S_Remainder]], v{{[0-9]+}}
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_and_b32_e32 [[FIRST_Tmp1:v[0-9]+]]
@@ -135,12 +136,12 @@ define void @test_udivrem(i32 addrspace(1)* %out, i32 %x, i32 %y) {
; SI-DAG: v_sub_i32_e32 [[SECOND_NEG_RCP_LO:v[0-9]+]], 0, [[SECOND_RCP_LO]]
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_mul_hi_u32 [[SECOND_E:v[0-9]+]], {{v[0-9]+}}, [[SECOND_RCP]]
-; SI-DAG: v_add_i32_e32 [[SECOND_RCP_A_E:v[0-9]+]], [[SECOND_RCP]], [[SECOND_E]]
-; SI-DAG: v_sub_i32_e32 [[SECOND_RCP_S_E:v[0-9]+]], [[SECOND_RCP]], [[SECOND_E]]
+; SI-DAG: v_add_i32_e32 [[SECOND_RCP_A_E:v[0-9]+]], [[SECOND_E]], [[SECOND_RCP]]
+; SI-DAG: v_subrev_i32_e32 [[SECOND_RCP_S_E:v[0-9]+]], [[SECOND_E]], [[SECOND_RCP]]
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_mul_hi_u32 [[SECOND_Quotient:v[0-9]+]]
; SI-DAG: v_mul_lo_i32 [[SECOND_Num_S_Remainder:v[0-9]+]]
-; SI-DAG: v_sub_i32_e32 [[SECOND_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[SECOND_Num_S_Remainder]]
+; SI-DAG: v_subrev_i32_e32 [[SECOND_Remainder:v[0-9]+]], [[SECOND_Num_S_Remainder]], v{{[0-9]+}}
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_and_b32_e32 [[SECOND_Tmp1:v[0-9]+]]
@@ -262,12 +263,12 @@ define void @test_udivrem_v2(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i3
; SI-DAG: v_sub_i32_e32 [[FIRST_NEG_RCP_LO:v[0-9]+]], 0, [[FIRST_RCP_LO]]
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_mul_hi_u32 [[FIRST_E:v[0-9]+]], {{v[0-9]+}}, [[FIRST_RCP]]
-; SI-DAG: v_add_i32_e32 [[FIRST_RCP_A_E:v[0-9]+]], [[FIRST_RCP]], [[FIRST_E]]
-; SI-DAG: v_sub_i32_e32 [[FIRST_RCP_S_E:v[0-9]+]], [[FIRST_RCP]], [[FIRST_E]]
+; SI-DAG: v_add_i32_e32 [[FIRST_RCP_A_E:v[0-9]+]], [[FIRST_E]], [[FIRST_RCP]]
+; SI-DAG: v_subrev_i32_e32 [[FIRST_RCP_S_E:v[0-9]+]], [[FIRST_E]], [[FIRST_RCP]]
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_mul_hi_u32 [[FIRST_Quotient:v[0-9]+]]
; SI-DAG: v_mul_lo_i32 [[FIRST_Num_S_Remainder:v[0-9]+]]
-; SI-DAG: v_sub_i32_e32 [[FIRST_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[FIRST_Num_S_Remainder]]
+; SI-DAG: v_subrev_i32_e32 [[FIRST_Remainder:v[l0-9]+]], [[FIRST_Num_S_Remainder]], v{{[0-9]+}}
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_and_b32_e32 [[FIRST_Tmp1:v[0-9]+]]
@@ -285,12 +286,12 @@ define void @test_udivrem_v2(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i3
; SI-DAG: v_sub_i32_e32 [[SECOND_NEG_RCP_LO:v[0-9]+]], 0, [[SECOND_RCP_LO]]
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_mul_hi_u32 [[SECOND_E:v[0-9]+]], {{v[0-9]+}}, [[SECOND_RCP]]
-; SI-DAG: v_add_i32_e32 [[SECOND_RCP_A_E:v[0-9]+]], [[SECOND_RCP]], [[SECOND_E]]
-; SI-DAG: v_sub_i32_e32 [[SECOND_RCP_S_E:v[0-9]+]], [[SECOND_RCP]], [[SECOND_E]]
+; SI-DAG: v_add_i32_e32 [[SECOND_RCP_A_E:v[0-9]+]], [[SECOND_E]], [[SECOND_RCP]]
+; SI-DAG: v_subrev_i32_e32 [[SECOND_RCP_S_E:v[0-9]+]], [[SECOND_E]], [[SECOND_RCP]]
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_mul_hi_u32 [[SECOND_Quotient:v[0-9]+]]
; SI-DAG: v_mul_lo_i32 [[SECOND_Num_S_Remainder:v[0-9]+]]
-; SI-DAG: v_sub_i32_e32 [[SECOND_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[SECOND_Num_S_Remainder]]
+; SI-DAG: v_subrev_i32_e32 [[SECOND_Remainder:v[0-9]+]], [[SECOND_Num_S_Remainder]], v{{[0-9]+}}
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_and_b32_e32 [[SECOND_Tmp1:v[0-9]+]]
@@ -308,12 +309,12 @@ define void @test_udivrem_v2(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i3
; SI-DAG: v_sub_i32_e32 [[THIRD_NEG_RCP_LO:v[0-9]+]], 0, [[THIRD_RCP_LO]]
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_mul_hi_u32 [[THIRD_E:v[0-9]+]], {{v[0-9]+}}, [[THIRD_RCP]]
-; SI-DAG: v_add_i32_e32 [[THIRD_RCP_A_E:v[0-9]+]], [[THIRD_RCP]], [[THIRD_E]]
-; SI-DAG: v_sub_i32_e32 [[THIRD_RCP_S_E:v[0-9]+]], [[THIRD_RCP]], [[THIRD_E]]
+; SI-DAG: v_add_i32_e32 [[THIRD_RCP_A_E:v[0-9]+]], [[THIRD_E]], [[THIRD_RCP]]
+; SI-DAG: v_subrev_i32_e32 [[THIRD_RCP_S_E:v[0-9]+]], [[THIRD_E]], [[THIRD_RCP]]
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_mul_hi_u32 [[THIRD_Quotient:v[0-9]+]]
; SI-DAG: v_mul_lo_i32 [[THIRD_Num_S_Remainder:v[0-9]+]]
-; SI-DAG: v_sub_i32_e32 [[THIRD_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[THIRD_Num_S_Remainder]]
+; SI-DAG: v_subrev_i32_e32 [[THIRD_Remainder:v[0-9]+]], [[THIRD_Num_S_Remainder]], {{v[0-9]+}}
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_and_b32_e32 [[THIRD_Tmp1:v[0-9]+]]
@@ -331,22 +332,8 @@ define void @test_udivrem_v2(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i3
; SI-DAG: v_sub_i32_e32 [[FOURTH_NEG_RCP_LO:v[0-9]+]], 0, [[FOURTH_RCP_LO]]
; SI-DAG: v_cndmask_b32_e64
; SI-DAG: v_mul_hi_u32 [[FOURTH_E:v[0-9]+]], {{v[0-9]+}}, [[FOURTH_RCP]]
-; SI-DAG: v_add_i32_e32 [[FOURTH_RCP_A_E:v[0-9]+]], [[FOURTH_RCP]], [[FOURTH_E]]
-; SI-DAG: v_sub_i32_e32 [[FOURTH_RCP_S_E:v[0-9]+]], [[FOURTH_RCP]], [[FOURTH_E]]
-; SI-DAG: v_cndmask_b32_e64
-; SI-DAG: v_mul_hi_u32 [[FOURTH_Quotient:v[0-9]+]]
-; SI-DAG: v_mul_lo_i32 [[FOURTH_Num_S_Remainder:v[0-9]+]]
-; SI-DAG: v_sub_i32_e32 [[FOURTH_Remainder:v[0-9]+]], {{[vs][0-9]+}}, [[FOURTH_Num_S_Remainder]]
-; SI-DAG: v_cndmask_b32_e64
-; SI-DAG: v_cndmask_b32_e64
-; SI-DAG: v_and_b32_e32 [[FOURTH_Tmp1:v[0-9]+]]
-; SI-DAG: v_add_i32_e32 [[FOURTH_Quotient_A_One:v[0-9]+]], {{.*}}, [[FOURTH_Quotient]]
-; SI-DAG: v_subrev_i32_e32 [[FOURTH_Quotient_S_One:v[0-9]+]],
-; SI-DAG: v_cndmask_b32_e64
-; SI-DAG: v_cndmask_b32_e64
-; SI-DAG: v_add_i32_e32 [[FOURTH_Remainder_A_Den:v[0-9]+]],
-; SI-DAG: v_subrev_i32_e32 [[FOURTH_Remainder_S_Den:v[0-9]+]],
-; SI-DAG: v_cndmask_b32_e64
+; SI-DAG: v_add_i32_e32 [[FOURTH_RCP_A_E:v[0-9]+]], [[FOURTH_E]], [[FOURTH_RCP]]
+; SI-DAG: v_subrev_i32_e32 [[FOURTH_RCP_S_E:v[0-9]+]], [[FOURTH_E]], [[FOURTH_RCP]]
; SI-DAG: v_cndmask_b32_e64
; SI: s_endpgm
define void @test_udivrem_v4(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) {
diff --git a/test/CodeGen/R600/udivrem24.ll b/test/CodeGen/R600/udivrem24.ll
index defb3c0..4b98ac6 100644
--- a/test/CodeGen/R600/udivrem24.ll
+++ b/test/CodeGen/R600/udivrem24.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}udiv24_i8:
diff --git a/test/CodeGen/R600/udivrem64.ll b/test/CodeGen/R600/udivrem64.ll
index 8864c83..9f3069b 100644
--- a/test/CodeGen/R600/udivrem64.ll
+++ b/test/CodeGen/R600/udivrem64.ll
@@ -1,5 +1,6 @@
-;XUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG --check-prefix=FUNC %s
+;RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck --check-prefix=SI --check-prefix=GCN --check-prefix=FUNC %s
+;RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck --check-prefix=VI --check-prefix=GCN --check-prefix=FUNC %s
+;RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=EG --check-prefix=FUNC %s
;FUNC-LABEL: {{^}}test_udiv:
;EG: RECIP_UINT
@@ -34,7 +35,41 @@
;EG: BFE_UINT
;EG: BFE_UINT
;EG: BFE_UINT
-;SI: s_endpgm
+
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN-NOT: v_mad_f32
+;SI-NOT: v_lshr_b64
+;VI-NOT: v_lshrrev_b64
+;GCN: s_endpgm
define void @test_udiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%result = udiv i64 %x, %y
store i64 %result, i64 addrspace(1)* %out
@@ -74,9 +109,115 @@ define void @test_udiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
;EG: BFE_UINT
;EG: BFE_UINT
;EG: AND_INT {{.*}}, 1,
-;SI: s_endpgm
+
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN: s_bfe_u32
+;GCN-NOT: v_mad_f32
+;SI-NOT: v_lshr_b64
+;VI-NOT: v_lshrrev_b64
+;GCN: s_endpgm
define void @test_urem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
%result = urem i64 %x, %y
store i64 %result, i64 addrspace(1)* %out
ret void
}
+
+;FUNC-LABEL: {{^}}test_udiv3264:
+;EG: RECIP_UINT
+;EG-NOT: BFE_UINT
+
+;GCN-NOT: s_bfe_u32
+;GCN-NOT: v_mad_f32
+;SI-NOT: v_lshr_b64
+;VI-NOT: v_lshrrev_b64
+;GCN: s_endpgm
+define void @test_udiv3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+ %1 = lshr i64 %x, 33
+ %2 = lshr i64 %y, 33
+ %result = udiv i64 %1, %2
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: {{^}}test_urem3264:
+;EG: RECIP_UINT
+;EG-NOT: BFE_UINT
+
+;GCN-NOT: s_bfe_u32
+;GCN-NOT: v_mad_f32
+;SI-NOT: v_lshr_b64
+;VI-NOT: v_lshrrev_b64
+;GCN: s_endpgm
+define void @test_urem3264(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+ %1 = lshr i64 %x, 33
+ %2 = lshr i64 %y, 33
+ %result = urem i64 %1, %2
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: {{^}}test_udiv2464:
+;EG: UINT_TO_FLT
+;EG: UINT_TO_FLT
+;EG: FLT_TO_UINT
+;EG-NOT: RECIP_UINT
+;EG-NOT: BFE_UINT
+
+;SI-NOT: v_lshr_b64
+;VI-NOT: v_lshrrev_b64
+;GCN: v_mad_f32
+;GCN: s_endpgm
+define void @test_udiv2464(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+ %1 = lshr i64 %x, 40
+ %2 = lshr i64 %y, 40
+ %result = udiv i64 %1, %2
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+;FUNC-LABEL: {{^}}test_urem2464:
+;EG: UINT_TO_FLT
+;EG: UINT_TO_FLT
+;EG: FLT_TO_UINT
+;EG-NOT: RECIP_UINT
+;EG-NOT: BFE_UINT
+
+;SI-NOT: v_lshr_b64
+;VI-NOT: v_lshrrev_b64
+;GCN: v_mad_f32
+;GCN: s_endpgm
+define void @test_urem2464(i64 addrspace(1)* %out, i64 %x, i64 %y) {
+ %1 = lshr i64 %x, 40
+ %2 = lshr i64 %y, 40
+ %result = urem i64 %1, %2
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/uint_to_fp.f64.ll b/test/CodeGen/R600/uint_to_fp.f64.ll
index bddf700..f715243 100644
--- a/test/CodeGen/R600/uint_to_fp.f64.ll
+++ b/test/CodeGen/R600/uint_to_fp.f64.ll
@@ -1,47 +1,12 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare i32 @llvm.r600.read.tidig.x() nounwind readnone
-; SI-LABEL: {{^}}uint_to_fp_f64_i32
-; SI: v_cvt_f64_u32_e32
-; SI: s_endpgm
-define void @uint_to_fp_f64_i32(double addrspace(1)* %out, i32 %in) {
- %cast = uitofp i32 %in to double
- store double %cast, double addrspace(1)* %out, align 8
- ret void
-}
-
-; SI-LABEL: {{^}}uint_to_fp_i1_f64:
-; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
-; FIXME: We should the VGPR sources for V_CNDMASK are copied from SGPRs,
-; we should be able to fold the SGPRs into the V_CNDMASK instructions.
-; SI: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
-; SI: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
-; SI: buffer_store_dwordx2
-; SI: s_endpgm
-define void @uint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {
- %cmp = icmp eq i32 %in, 0
- %fp = uitofp i1 %cmp to double
- store double %fp, double addrspace(1)* %out, align 4
- ret void
-}
-
-; SI-LABEL: {{^}}uint_to_fp_i1_f64_load:
-; SI: v_cndmask_b32_e64 [[IRESULT:v[0-9]]], 0, 1
-; SI-NEXT: v_cvt_f64_u32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
-; SI: buffer_store_dwordx2 [[RESULT]]
-; SI: s_endpgm
-define void @uint_to_fp_i1_f64_load(double addrspace(1)* %out, i1 %in) {
- %fp = uitofp i1 %in to double
- store double %fp, double addrspace(1)* %out, align 8
- ret void
-}
-
; SI-LABEL: {{^}}v_uint_to_fp_i64_to_f64
; SI: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
-; SI-DAG: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]]
-; SI-DAG: v_cvt_f64_u32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]]
+; SI: v_cvt_f64_u32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]]
; SI: v_ldexp_f64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32
+; SI: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]]
; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]]
; SI: buffer_store_dwordx2 [[RESULT]]
define void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
@@ -53,23 +18,81 @@ define void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)
ret void
}
-; SI-LABEL: {{^}}s_uint_to_fp_f64_i64
-define void @s_uint_to_fp_f64_i64(double addrspace(1)* %out, i64 %in) {
+; SI-LABEL: {{^}}s_uint_to_fp_i64_to_f64
+define void @s_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) {
%cast = uitofp i64 %in to double
store double %cast, double addrspace(1)* %out, align 8
ret void
}
-; SI-LABEL: {{^}}s_uint_to_fp_v2f64_v2i64
-define void @s_uint_to_fp_v2f64_v2i64(<2 x double> addrspace(1)* %out, <2 x i64> %in) {
+; SI-LABEL: {{^}}s_uint_to_fp_v2i64_to_v2f64
+define void @s_uint_to_fp_v2i64_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i64> %in) {
%cast = uitofp <2 x i64> %in to <2 x double>
store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16
ret void
}
-; SI-LABEL: {{^}}s_uint_to_fp_v4f64_v4i64
-define void @s_uint_to_fp_v4f64_v4i64(<4 x double> addrspace(1)* %out, <4 x i64> %in) {
+; SI-LABEL: {{^}}s_uint_to_fp_v4i64_to_v4f64
+define void @s_uint_to_fp_v4i64_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i64> %in) {
%cast = uitofp <4 x i64> %in to <4 x double>
store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16
ret void
}
+
+; SI-LABEL: {{^}}s_uint_to_fp_i32_to_f64
+; SI: v_cvt_f64_u32_e32
+; SI: s_endpgm
+define void @s_uint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) {
+ %cast = uitofp i32 %in to double
+ store double %cast, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; SI-LABEL: {{^}}s_uint_to_fp_v2i32_to_v2f64
+; SI: v_cvt_f64_u32_e32
+; SI: v_cvt_f64_u32_e32
+; SI: s_endpgm
+define void @s_uint_to_fp_v2i32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i32> %in) {
+ %cast = uitofp <2 x i32> %in to <2 x double>
+ store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; SI-LABEL: {{^}}s_uint_to_fp_v4i32_to_v4f64
+; SI: v_cvt_f64_u32_e32
+; SI: v_cvt_f64_u32_e32
+; SI: v_cvt_f64_u32_e32
+; SI: v_cvt_f64_u32_e32
+; SI: s_endpgm
+define void @s_uint_to_fp_v4i32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i32> %in) {
+ %cast = uitofp <4 x i32> %in to <4 x double>
+ store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FIXME: select on 0, 0
+; SI-LABEL: {{^}}uint_to_fp_i1_to_f64:
+; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
+; We can't fold the SGPRs into v_cndmask_b32_e64, because it already
+; uses an SGPR for [[CMP]]
+; SI: v_cndmask_b32_e64 v{{[0-9]+}}, 0, v{{[0-9]+}}, [[CMP]]
+; SI: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 0, [[CMP]]
+; SI: buffer_store_dwordx2
+; SI: s_endpgm
+define void @uint_to_fp_i1_to_f64(double addrspace(1)* %out, i32 %in) {
+ %cmp = icmp eq i32 %in, 0
+ %fp = uitofp i1 %cmp to double
+ store double %fp, double addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}uint_to_fp_i1_to_f64_load:
+; SI: v_cndmask_b32_e64 [[IRESULT:v[0-9]]], 0, 1
+; SI-NEXT: v_cvt_f64_u32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
+; SI: buffer_store_dwordx2 [[RESULT]]
+; SI: s_endpgm
+define void @uint_to_fp_i1_to_f64_load(double addrspace(1)* %out, i1 %in) {
+ %fp = uitofp i1 %in to double
+ store double %fp, double addrspace(1)* %out, align 8
+ ret void
+}
diff --git a/test/CodeGen/R600/uint_to_fp.ll b/test/CodeGen/R600/uint_to_fp.ll
index f58f10b..1c8a175 100644
--- a/test/CodeGen/R600/uint_to_fp.ll
+++ b/test/CodeGen/R600/uint_to_fp.ll
@@ -1,20 +1,32 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
-; FUNC-LABEL: {{^}}uint_to_fp_v2i32:
+; FUNC-LABEL: {{^}}uint_to_fp_i32_to_f32:
+; R600-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+
+; SI: v_cvt_f32_u32_e32
+; SI: s_endpgm
+define void @uint_to_fp_i32_to_f32(float addrspace(1)* %out, i32 %in) {
+ %result = uitofp i32 %in to float
+ store float %result, float addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}uint_to_fp_v2i32_to_v2f32:
; R600-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[2].W
; R600-DAG: UINT_TO_FLT * T{{[0-9]+\.[XYZW]}}, KC0[3].X
; SI: v_cvt_f32_u32_e32
; SI: v_cvt_f32_u32_e32
; SI: s_endpgm
-define void @uint_to_fp_v2i32(<2 x float> addrspace(1)* %out, <2 x i32> %in) {
+define void @uint_to_fp_v2i32_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i32> %in) {
%result = uitofp <2 x i32> %in to <2 x float>
store <2 x float> %result, <2 x float> addrspace(1)* %out
ret void
}
-; FUNC-LABEL: {{^}}uint_to_fp_v4i32:
+; FUNC-LABEL: {{^}}uint_to_fp_v4i32_to_v4f32:
; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -25,45 +37,45 @@ define void @uint_to_fp_v2i32(<2 x float> addrspace(1)* %out, <2 x i32> %in) {
; SI: v_cvt_f32_u32_e32
; SI: v_cvt_f32_u32_e32
; SI: s_endpgm
-define void @uint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+define void @uint_to_fp_v4i32_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%value = load <4 x i32> addrspace(1) * %in
%result = uitofp <4 x i32> %value to <4 x float>
store <4 x float> %result, <4 x float> addrspace(1)* %out
ret void
}
-; FUNC-LABEL: {{^}}uint_to_fp_i64_f32:
+; FUNC-LABEL: {{^}}uint_to_fp_i64_to_f32:
; R600: UINT_TO_FLT
; R600: UINT_TO_FLT
; R600: MULADD_IEEE
; SI: v_cvt_f32_u32_e32
; SI: v_cvt_f32_u32_e32
-; SI: v_mad_f32
+; SI: v_madmk_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, 0x4f800000
; SI: s_endpgm
-define void @uint_to_fp_i64_f32(float addrspace(1)* %out, i64 %in) {
+define void @uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) {
entry:
%0 = uitofp i64 %in to float
store float %0, float addrspace(1)* %out
ret void
}
-; FUNC-LABEL: {{^}}uint_to_fp_i1_f32:
+; FUNC-LABEL: {{^}}uint_to_fp_i1_to_f32:
; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1.0, [[CMP]]
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @uint_to_fp_i1_f32(float addrspace(1)* %out, i32 %in) {
+define void @uint_to_fp_i1_to_f32(float addrspace(1)* %out, i32 %in) {
%cmp = icmp eq i32 %in, 0
%fp = uitofp i1 %cmp to float
store float %fp, float addrspace(1)* %out, align 4
ret void
}
-; FUNC-LABEL: {{^}}uint_to_fp_i1_f32_load:
+; FUNC-LABEL: {{^}}uint_to_fp_i1_to_f32_load:
; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1.0
; SI: buffer_store_dword [[RESULT]],
; SI: s_endpgm
-define void @uint_to_fp_i1_f32_load(float addrspace(1)* %out, i1 %in) {
+define void @uint_to_fp_i1_to_f32_load(float addrspace(1)* %out, i1 %in) {
%fp = uitofp i1 %in to float
store float %fp, float addrspace(1)* %out, align 4
ret void
diff --git a/test/CodeGen/R600/unaligned-load-store.ll b/test/CodeGen/R600/unaligned-load-store.ll
index f8737e6..665dc37 100644
--- a/test/CodeGen/R600/unaligned-load-store.ll
+++ b/test/CodeGen/R600/unaligned-load-store.ll
@@ -1,37 +1,179 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
-; FIXME: This is probably wrong. This probably needs to expand to 8-bit reads and writes.
-; SI-LABEL: {{^}}unaligned_load_store_i32:
-; SI: ds_read_u16
-; SI: ds_read_u16
-; SI: ds_write_b32
+; SI-LABEL: {{^}}unaligned_load_store_i16_local:
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_write_b8
+; SI: ds_write_b8
; SI: s_endpgm
-define void @unaligned_load_store_i32(i32 addrspace(3)* %p, i32 addrspace(3)* %r) nounwind {
+define void @unaligned_load_store_i16_local(i16 addrspace(3)* %p, i16 addrspace(3)* %r) nounwind {
+ %v = load i16 addrspace(3)* %p, align 1
+ store i16 %v, i16 addrspace(3)* %r, align 1
+ ret void
+}
+
+; SI-LABEL: {{^}}unaligned_load_store_i16_global:
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: s_endpgm
+define void @unaligned_load_store_i16_global(i16 addrspace(1)* %p, i16 addrspace(1)* %r) nounwind {
+ %v = load i16 addrspace(1)* %p, align 1
+ store i16 %v, i16 addrspace(1)* %r, align 1
+ ret void
+}
+
+; SI-LABEL: {{^}}unaligned_load_store_i32_local:
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: s_endpgm
+define void @unaligned_load_store_i32_local(i32 addrspace(3)* %p, i32 addrspace(3)* %r) nounwind {
%v = load i32 addrspace(3)* %p, align 1
store i32 %v, i32 addrspace(3)* %r, align 1
ret void
}
-; SI-LABEL: {{^}}unaligned_load_store_v4i32:
-; SI: ds_read_u16
-; SI: ds_read_u16
-; SI: ds_read_u16
-; SI: ds_read_u16
-; SI: ds_read_u16
-; SI: ds_read_u16
-; SI: ds_read_u16
-; SI: ds_read_u16
-; SI: ds_write_b32
-; SI: ds_write_b32
-; SI: ds_write_b32
-; SI: ds_write_b32
+; SI-LABEL: {{^}}unaligned_load_store_i32_global:
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+define void @unaligned_load_store_i32_global(i32 addrspace(1)* %p, i32 addrspace(1)* %r) nounwind {
+ %v = load i32 addrspace(1)* %p, align 1
+ store i32 %v, i32 addrspace(1)* %r, align 1
+ ret void
+}
+
+; SI-LABEL: {{^}}unaligned_load_store_i64_local:
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
; SI: s_endpgm
-define void @unaligned_load_store_v4i32(<4 x i32> addrspace(3)* %p, <4 x i32> addrspace(3)* %r) nounwind {
+define void @unaligned_load_store_i64_local(i64 addrspace(3)* %p, i64 addrspace(3)* %r) {
+ %v = load i64 addrspace(3)* %p, align 1
+ store i64 %v, i64 addrspace(3)* %r, align 1
+ ret void
+}
+
+; SI-LABEL: {{^}}unaligned_load_store_i64_global:
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_load_ubyte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+define void @unaligned_load_store_i64_global(i64 addrspace(1)* %p, i64 addrspace(1)* %r) {
+ %v = load i64 addrspace(1)* %p, align 1
+ store i64 %v, i64 addrspace(1)* %r, align 1
+ ret void
+}
+
+; SI-LABEL: {{^}}unaligned_load_store_v4i32_local:
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: ds_write_b8
+; SI: s_endpgm
+define void @unaligned_load_store_v4i32_local(<4 x i32> addrspace(3)* %p, <4 x i32> addrspace(3)* %r) nounwind {
%v = load <4 x i32> addrspace(3)* %p, align 1
store <4 x i32> %v, <4 x i32> addrspace(3)* %r, align 1
ret void
}
+; FIXME: We mark v4i32 as custom, so misaligned loads are never expanded.
+; FIXME-SI-LABEL: {{^}}unaligned_load_store_v4i32_global
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+; FIXME-SI: buffer_load_ubyte
+define void @unaligned_load_store_v4i32_global(<4 x i32> addrspace(1)* %p, <4 x i32> addrspace(1)* %r) nounwind {
+ %v = load <4 x i32> addrspace(1)* %p, align 1
+ store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 1
+ ret void
+}
+
; SI-LABEL: {{^}}load_lds_i64_align_4:
; SI: ds_read2_b32
; SI: s_endpgm
@@ -64,12 +206,23 @@ define void @load_lds_i64_align_4_with_split_offset(i64 addrspace(1)* nocapture
ret void
}
-; FIXME: Need to fix this case.
-; define void @load_lds_i64_align_1(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
-; %val = load i64 addrspace(3)* %in, align 1
-; store i64 %val, i64 addrspace(1)* %out, align 8
-; ret void
-; }
+; SI-LABEL: {{^}}load_lds_i64_align_1:
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: ds_read_u8
+; SI: buffer_store_dwordx2
+; SI: s_endpgm
+
+define void @load_lds_i64_align_1(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
+ %val = load i64 addrspace(3)* %in, align 1
+ store i64 %val, i64 addrspace(1)* %out, align 8
+ ret void
+}
; SI-LABEL: {{^}}store_lds_i64_align_4:
; SI: ds_write2_b32
diff --git a/test/CodeGen/R600/unhandled-loop-condition-assertion.ll b/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
index ff01a1e..c615f0b 100644
--- a/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
+++ b/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
@@ -1,6 +1,7 @@
; REQUIRES: asserts
; XFAIL: *
-; RUN: llc -O0 -verify-machineinstrs -asm-verbose=0 -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=COMMON %s
+; RUN: llc -O0 -verify-machineinstrs -asm-verbose=0 -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI -check-prefix=COMMON %s
+; RUN: llc -O0 -verify-machineinstrs -asm-verbose=0 -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=COMMON %s
; RUN: llc -O0 -verify-machineinstrs -asm-verbose=0 -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=COMMON %s
; SI hits an assertion at -O0, evergreen hits a not implemented unreachable.
diff --git a/test/CodeGen/R600/urecip.ll b/test/CodeGen/R600/urecip.ll
index 4d953b5..daacc77 100644
--- a/test/CodeGen/R600/urecip.ll
+++ b/test/CodeGen/R600/urecip.ll
@@ -1,4 +1,5 @@
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
;CHECK: v_rcp_iflag_f32_e32
diff --git a/test/CodeGen/R600/urem.ll b/test/CodeGen/R600/urem.ll
index 914f5d0..aa2a3eb 100644
--- a/test/CodeGen/R600/urem.ll
+++ b/test/CodeGen/R600/urem.ll
@@ -1,34 +1,94 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-;The code generated by urem is long and complex and may frequently change.
-;The goal of this test is to make sure the ISel doesn't fail when it gets
-;a v2i32/v4i32 urem
+; The code generated by urem is long and complex and may frequently
+; change. The goal of this test is to make sure the ISel doesn't fail
+; when it gets a v2i32/v4i32 urem
-;EG-CHECK: {{^}}test2:
-;EG-CHECK: CF_END
-;SI-CHECK: {{^}}test2:
-;SI-CHECK: s_endpgm
+; FUNC-LABEL: {{^}}test_urem_i32:
+; SI: s_endpgm
+; EG: CF_END
+define void @test_urem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %a = load i32 addrspace(1)* %in
+ %b = load i32 addrspace(1)* %b_ptr
+ %result = urem i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_urem_i32_7:
+; SI: v_mov_b32_e32 [[MAGIC:v[0-9]+]], 0x24924925
+; SI: v_mul_hi_u32 {{v[0-9]+}}, [[MAGIC]]
+; SI: v_subrev_i32
+; SI: v_mul_lo_i32
+; SI: v_sub_i32
+; SI: buffer_store_dword
+; SI: s_endpgm
+define void @test_urem_i32_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %num = load i32 addrspace(1) * %in
+ %result = urem i32 %num, 7
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
-define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+; FUNC-LABEL: {{^}}test_urem_v2i32:
+; SI: s_endpgm
+; EG: CF_END
+define void @test_urem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
- %a = load <2 x i32> addrspace(1) * %in
- %b = load <2 x i32> addrspace(1) * %b_ptr
+ %a = load <2 x i32> addrspace(1)* %in
+ %b = load <2 x i32> addrspace(1)* %b_ptr
%result = urem <2 x i32> %a, %b
store <2 x i32> %result, <2 x i32> addrspace(1)* %out
ret void
}
-;EG-CHECK: {{^}}test4:
-;EG-CHECK: CF_END
-;SI-CHECK: {{^}}test4:
-;SI-CHECK: s_endpgm
-
-define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+; FUNC-LABEL: {{^}}test_urem_v4i32:
+; SI: s_endpgm
+; EG: CF_END
+define void @test_urem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
- %a = load <4 x i32> addrspace(1) * %in
- %b = load <4 x i32> addrspace(1) * %b_ptr
+ %a = load <4 x i32> addrspace(1)* %in
+ %b = load <4 x i32> addrspace(1)* %b_ptr
%result = urem <4 x i32> %a, %b
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: {{^}}test_urem_i64:
+; SI: s_endpgm
+; EG: CF_END
+define void @test_urem_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+ %b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
+ %a = load i64 addrspace(1)* %in
+ %b = load i64 addrspace(1)* %b_ptr
+ %result = urem i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_urem_v2i64:
+; SI: s_endpgm
+; EG: CF_END
+define void @test_urem_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
+ %a = load <2 x i64> addrspace(1)* %in
+ %b = load <2 x i64> addrspace(1)* %b_ptr
+ %result = urem <2 x i64> %a, %b
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_urem_v4i64:
+; SI: s_endpgm
+; EG: CF_END
+define void @test_urem_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
+ %a = load <4 x i64> addrspace(1)* %in
+ %b = load <4 x i64> addrspace(1)* %b_ptr
+ %result = urem <4 x i64> %a, %b
+ store <4 x i64> %result, <4 x i64> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/use-sgpr-multiple-times.ll b/test/CodeGen/R600/use-sgpr-multiple-times.ll
index aa94a0e..f26f300 100644
--- a/test/CodeGen/R600/use-sgpr-multiple-times.ll
+++ b/test/CodeGen/R600/use-sgpr-multiple-times.ll
@@ -1,80 +1,87 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN %s
declare float @llvm.fma.f32(float, float, float) #1
declare float @llvm.fmuladd.f32(float, float, float) #1
declare i32 @llvm.AMDGPU.imad24(i32, i32, i32) #1
-; SI-LABEL: {{^}}test_sgpr_use_twice_binop:
-; SI: s_load_dword [[SGPR:s[0-9]+]],
-; SI: v_add_f32_e64 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]]
-; SI: buffer_store_dword [[RESULT]]
+; GCN-LABEL: {{^}}test_sgpr_use_twice_binop:
+; GCN: s_load_dword [[SGPR:s[0-9]+]],
+; GCN: v_add_f32_e64 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]]
+; GCN: buffer_store_dword [[RESULT]]
define void @test_sgpr_use_twice_binop(float addrspace(1)* %out, float %a) #0 {
%dbl = fadd float %a, %a
store float %dbl, float addrspace(1)* %out, align 4
ret void
}
-; SI-LABEL: {{^}}test_sgpr_use_three_ternary_op:
-; SI: s_load_dword [[SGPR:s[0-9]+]],
-; SI: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]], [[SGPR]]
-; SI: buffer_store_dword [[RESULT]]
+; GCN-LABEL: {{^}}test_sgpr_use_three_ternary_op:
+; GCN: s_load_dword [[SGPR:s[0-9]+]],
+; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]], [[SGPR]]
+; GCN: buffer_store_dword [[RESULT]]
define void @test_sgpr_use_three_ternary_op(float addrspace(1)* %out, float %a) #0 {
%fma = call float @llvm.fma.f32(float %a, float %a, float %a) #1
store float %fma, float addrspace(1)* %out, align 4
ret void
}
-; SI-LABEL: {{^}}test_sgpr_use_twice_ternary_op_a_a_b:
+; GCN-LABEL: {{^}}test_sgpr_use_twice_ternary_op_a_a_b:
; SI: s_load_dword [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: s_load_dword [[SGPR1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
-; SI: v_mov_b32_e32 [[VGPR1:v[0-9]+]], [[SGPR1]]
-; SI: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR0]], [[SGPR0]], [[VGPR1]]
-; SI: buffer_store_dword [[RESULT]]
+; VI: s_load_dword [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI: s_load_dword [[SGPR1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30
+; GCN: v_mov_b32_e32 [[VGPR1:v[0-9]+]], [[SGPR1]]
+; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR0]], [[SGPR0]], [[VGPR1]]
+; GCN: buffer_store_dword [[RESULT]]
define void @test_sgpr_use_twice_ternary_op_a_a_b(float addrspace(1)* %out, float %a, float %b) #0 {
%fma = call float @llvm.fma.f32(float %a, float %a, float %b) #1
store float %fma, float addrspace(1)* %out, align 4
ret void
}
-; SI-LABEL: {{^}}test_sgpr_use_twice_ternary_op_a_b_a:
+; GCN-LABEL: {{^}}test_sgpr_use_twice_ternary_op_a_b_a:
; SI: s_load_dword [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: s_load_dword [[SGPR1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
-; SI: v_mov_b32_e32 [[VGPR1:v[0-9]+]], [[SGPR1]]
-; SI: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR0]], [[VGPR1]], [[SGPR0]]
-; SI: buffer_store_dword [[RESULT]]
+; VI: s_load_dword [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI: s_load_dword [[SGPR1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30
+; GCN: v_mov_b32_e32 [[VGPR1:v[0-9]+]], [[SGPR1]]
+; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[VGPR1]], [[SGPR0]], [[SGPR0]]
+; GCN: buffer_store_dword [[RESULT]]
define void @test_sgpr_use_twice_ternary_op_a_b_a(float addrspace(1)* %out, float %a, float %b) #0 {
%fma = call float @llvm.fma.f32(float %a, float %b, float %a) #1
store float %fma, float addrspace(1)* %out, align 4
ret void
}
-; SI-LABEL: {{^}}test_sgpr_use_twice_ternary_op_b_a_a:
+; GCN-LABEL: {{^}}test_sgpr_use_twice_ternary_op_b_a_a:
; SI: s_load_dword [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: s_load_dword [[SGPR1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
-; SI: v_mov_b32_e32 [[VGPR1:v[0-9]+]], [[SGPR1]]
-; SI: v_fma_f32 [[RESULT:v[0-9]+]], [[VGPR1]], [[SGPR0]], [[SGPR0]]
-; SI: buffer_store_dword [[RESULT]]
+; VI: s_load_dword [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI: s_load_dword [[SGPR1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30
+; GCN: v_mov_b32_e32 [[VGPR1:v[0-9]+]], [[SGPR1]]
+; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR0]], [[VGPR1]], [[SGPR0]]
+; GCN: buffer_store_dword [[RESULT]]
define void @test_sgpr_use_twice_ternary_op_b_a_a(float addrspace(1)* %out, float %a, float %b) #0 {
%fma = call float @llvm.fma.f32(float %b, float %a, float %a) #1
store float %fma, float addrspace(1)* %out, align 4
ret void
}
-; SI-LABEL: {{^}}test_sgpr_use_twice_ternary_op_a_a_imm:
-; SI: s_load_dword [[SGPR:s[0-9]+]]
-; SI: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]], 2.0
-; SI: buffer_store_dword [[RESULT]]
+; GCN-LABEL: {{^}}test_sgpr_use_twice_ternary_op_a_a_imm:
+; GCN: s_load_dword [[SGPR:s[0-9]+]]
+; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]], 2.0
+; GCN: buffer_store_dword [[RESULT]]
define void @test_sgpr_use_twice_ternary_op_a_a_imm(float addrspace(1)* %out, float %a) #0 {
%fma = call float @llvm.fma.f32(float %a, float %a, float 2.0) #1
store float %fma, float addrspace(1)* %out, align 4
ret void
}
-; SI-LABEL: {{^}}test_sgpr_use_twice_ternary_op_a_imm_a:
-; SI: s_load_dword [[SGPR:s[0-9]+]]
-; SI: v_fma_f32 [[RESULT:v[0-9]+]], 2.0, [[SGPR]], [[SGPR]]
-; SI: buffer_store_dword [[RESULT]]
+; GCN-LABEL: {{^}}test_sgpr_use_twice_ternary_op_a_imm_a:
+; GCN: s_load_dword [[SGPR:s[0-9]+]]
+; GCN: v_fma_f32 [[RESULT:v[0-9]+]], 2.0, [[SGPR]], [[SGPR]]
+; GCN: buffer_store_dword [[RESULT]]
define void @test_sgpr_use_twice_ternary_op_a_imm_a(float addrspace(1)* %out, float %a) #0 {
%fma = call float @llvm.fma.f32(float %a, float 2.0, float %a) #1
store float %fma, float addrspace(1)* %out, align 4
@@ -82,10 +89,10 @@ define void @test_sgpr_use_twice_ternary_op_a_imm_a(float addrspace(1)* %out, fl
}
; Don't use fma since fma c, x, y is canonicalized to fma x, c, y
-; SI-LABEL: {{^}}test_sgpr_use_twice_ternary_op_imm_a_a:
-; SI: s_load_dword [[SGPR:s[0-9]+]]
-; SI: v_mad_i32_i24 [[RESULT:v[0-9]+]], 2, [[SGPR]], [[SGPR]]
-; SI: buffer_store_dword [[RESULT]]
+; GCN-LABEL: {{^}}test_sgpr_use_twice_ternary_op_imm_a_a:
+; GCN: s_load_dword [[SGPR:s[0-9]+]]
+; GCN: v_mad_i32_i24 [[RESULT:v[0-9]+]], 2, [[SGPR]], [[SGPR]]
+; GCN: buffer_store_dword [[RESULT]]
define void @test_sgpr_use_twice_ternary_op_imm_a_a(i32 addrspace(1)* %out, i32 %a) #0 {
%fma = call i32 @llvm.AMDGPU.imad24(i32 2, i32 %a, i32 %a) #1
store i32 %fma, i32 addrspace(1)* %out, align 4
diff --git a/test/CodeGen/R600/usubo.ll b/test/CodeGen/R600/usubo.ll
index abc5bd2..be1e666 100644
--- a/test/CodeGen/R600/usubo.ll
+++ b/test/CodeGen/R600/usubo.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs< %s
declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
@@ -27,7 +28,7 @@ define void @s_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32
}
; FUNC-LABEL: {{^}}v_usubo_i32:
-; SI: v_sub_i32_e32
+; SI: v_subrev_i32_e32
define void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%a = load i32 addrspace(1)* %aptr, align 4
%b = load i32 addrspace(1)* %bptr, align 4
diff --git a/test/CodeGen/R600/v_cndmask.ll b/test/CodeGen/R600/v_cndmask.ll
index a24dcc7..85936ec 100644
--- a/test/CodeGen/R600/v_cndmask.ll
+++ b/test/CodeGen/R600/v_cndmask.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare i32 @llvm.r600.read.tidig.x() #1
diff --git a/test/CodeGen/R600/valu-i1.ll b/test/CodeGen/R600/valu-i1.ll
index 2c209fc..5a3c2ec 100644
--- a/test/CodeGen/R600/valu-i1.ll
+++ b/test/CodeGen/R600/valu-i1.ll
@@ -1,10 +1,13 @@
-; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck --check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs -enable-misched -asm-verbose < %s | FileCheck -check-prefix=SI %s
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
+
+; SI-LABEL: @test_if
; Make sure the i1 values created by the cfg structurizer pass are
; moved using VALU instructions
; SI-NOT: s_mov_b64 s[{{[0-9]:[0-9]}}], -1
; SI: v_mov_b32_e32 v{{[0-9]}}, -1
-define void @test_if(i32 %a, i32 %b, i32 addrspace(1)* %src, i32 addrspace(1)* %dst) {
+define void @test_if(i32 %a, i32 %b, i32 addrspace(1)* %src, i32 addrspace(1)* %dst) #1 {
entry:
switch i32 %a, label %default [
i32 0, label %case0
@@ -37,3 +40,149 @@ else:
end:
ret void
}
+
+; SI-LABEL: @simple_test_v_if
+; SI: v_cmp_ne_i32_e64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0
+; SI: s_and_saveexec_b64 [[BR_SREG]], [[BR_SREG]]
+; SI: s_xor_b64 [[BR_SREG]], exec, [[BR_SREG]]
+
+; SI: ; BB#1
+; SI: buffer_store_dword
+; SI: s_endpgm
+
+; SI: BB1_2:
+; SI: s_or_b64 exec, exec, [[BR_SREG]]
+; SI: s_endpgm
+define void @simple_test_v_if(i32 addrspace(1)* %dst, i32 addrspace(1)* %src) #1 {
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %is.0 = icmp ne i32 %tid, 0
+ br i1 %is.0, label %store, label %exit
+
+store:
+ %gep = getelementptr i32 addrspace(1)* %dst, i32 %tid
+ store i32 999, i32 addrspace(1)* %gep
+ ret void
+
+exit:
+ ret void
+}
+
+; SI-LABEL: @simple_test_v_loop
+; SI: v_cmp_ne_i32_e64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0
+; SI: s_and_saveexec_b64 [[BR_SREG]], [[BR_SREG]]
+; SI: s_xor_b64 [[BR_SREG]], exec, [[BR_SREG]]
+; SI: s_cbranch_execz BB2_2
+
+; SI: ; BB#1:
+; SI: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, 0{{$}}
+
+; SI: BB2_3:
+; SI: buffer_load_dword
+; SI: buffer_store_dword
+; SI: v_cmp_eq_i32_e32 vcc,
+; SI: s_or_b64 [[OR_SREG:s\[[0-9]+:[0-9]+\]]]
+; SI: s_andn2_b64 exec, exec, [[OR_SREG]]
+; SI: s_cbranch_execnz BB2_3
+
+define void @simple_test_v_loop(i32 addrspace(1)* %dst, i32 addrspace(1)* %src) #1 {
+entry:
+ %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
+ %is.0 = icmp ne i32 %tid, 0
+ %limit = add i32 %tid, 64
+ br i1 %is.0, label %loop, label %exit
+
+loop:
+ %i = phi i32 [%tid, %entry], [%i.inc, %loop]
+ %gep.src = getelementptr i32 addrspace(1)* %src, i32 %i
+ %gep.dst = getelementptr i32 addrspace(1)* %dst, i32 %i
+ %load = load i32 addrspace(1)* %src
+ store i32 %load, i32 addrspace(1)* %gep.dst
+ %i.inc = add nsw i32 %i, 1
+ %cmp = icmp eq i32 %limit, %i.inc
+ br i1 %cmp, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+; SI-LABEL: @multi_vcond_loop
+
+; Load loop limit from buffer
+; Branch to exit if uniformly not taken
+; SI: ; BB#0:
+; SI: buffer_load_dword [[VBOUND:v[0-9]+]]
+; SI: v_cmp_gt_i32_e64 [[OUTER_CMP_SREG:s\[[0-9]+:[0-9]+\]]]
+; SI: s_and_saveexec_b64 [[OUTER_CMP_SREG]], [[OUTER_CMP_SREG]]
+; SI: s_xor_b64 [[OUTER_CMP_SREG]], exec, [[OUTER_CMP_SREG]]
+; SI: s_cbranch_execz BB3_2
+
+; Initialize inner condition to false
+; SI: ; BB#1:
+; SI: s_mov_b64 [[ZERO:s\[[0-9]+:[0-9]+\]]], 0{{$}}
+; SI: s_mov_b64 [[COND_STATE:s\[[0-9]+:[0-9]+\]]], [[ZERO]]
+
+; Clear exec bits for workitems that load -1s
+; SI: BB3_3:
+; SI: buffer_load_dword [[B:v[0-9]+]]
+; SI: buffer_load_dword [[A:v[0-9]+]]
+; SI-DAG: v_cmp_ne_i32_e64 [[NEG1_CHECK_0:s\[[0-9]+:[0-9]+\]]], [[A]], -1
+; SI-DAG: v_cmp_ne_i32_e64 [[NEG1_CHECK_1:s\[[0-9]+:[0-9]+\]]], [[B]], -1
+; SI: s_and_b64 [[ORNEG1:s\[[0-9]+:[0-9]+\]]], [[NEG1_CHECK_1]], [[NEG1_CHECK_0]]
+; SI: s_and_saveexec_b64 [[ORNEG1]], [[ORNEG1]]
+; SI: s_xor_b64 [[ORNEG1]], exec, [[ORNEG1]]
+; SI: s_cbranch_execz BB3_5
+
+; SI: BB#4:
+; SI: buffer_store_dword
+; SI: v_cmp_ge_i64_e32 vcc
+; SI: s_or_b64 [[COND_STATE]], vcc, [[COND_STATE]]
+
+; SI: BB3_5:
+; SI: s_or_b64 exec, exec, [[ORNEG1]]
+; SI: s_or_b64 [[COND_STATE]], [[ORNEG1]], [[COND_STATE]]
+; SI: s_andn2_b64 exec, exec, [[COND_STATE]]
+; SI: s_cbranch_execnz BB3_3
+
+; SI: BB#6
+; SI: s_or_b64 exec, exec, [[COND_STATE]]
+
+; SI: BB3_2:
+; SI-NOT: [[COND_STATE]]
+; SI: s_endpgm
+
+define void @multi_vcond_loop(i32 addrspace(1)* noalias nocapture %arg, i32 addrspace(1)* noalias nocapture readonly %arg1, i32 addrspace(1)* noalias nocapture readonly %arg2, i32 addrspace(1)* noalias nocapture readonly %arg3) #1 {
+bb:
+ %tmp = tail call i32 @llvm.r600.read.tidig.x() #0
+ %tmp4 = sext i32 %tmp to i64
+ %tmp5 = getelementptr inbounds i32 addrspace(1)* %arg3, i64 %tmp4
+ %tmp6 = load i32 addrspace(1)* %tmp5, align 4
+ %tmp7 = icmp sgt i32 %tmp6, 0
+ %tmp8 = sext i32 %tmp6 to i64
+ br i1 %tmp7, label %bb10, label %bb26
+
+bb10: ; preds = %bb, %bb20
+ %tmp11 = phi i64 [ %tmp23, %bb20 ], [ 0, %bb ]
+ %tmp12 = add nsw i64 %tmp11, %tmp4
+ %tmp13 = getelementptr inbounds i32 addrspace(1)* %arg1, i64 %tmp12
+ %tmp14 = load i32 addrspace(1)* %tmp13, align 4
+ %tmp15 = getelementptr inbounds i32 addrspace(1)* %arg2, i64 %tmp12
+ %tmp16 = load i32 addrspace(1)* %tmp15, align 4
+ %tmp17 = icmp ne i32 %tmp14, -1
+ %tmp18 = icmp ne i32 %tmp16, -1
+ %tmp19 = and i1 %tmp17, %tmp18
+ br i1 %tmp19, label %bb20, label %bb26
+
+bb20: ; preds = %bb10
+ %tmp21 = add nsw i32 %tmp16, %tmp14
+ %tmp22 = getelementptr inbounds i32 addrspace(1)* %arg, i64 %tmp12
+ store i32 %tmp21, i32 addrspace(1)* %tmp22, align 4
+ %tmp23 = add nuw nsw i64 %tmp11, 1
+ %tmp24 = icmp slt i64 %tmp23, %tmp8
+ br i1 %tmp24, label %bb10, label %bb26
+
+bb26: ; preds = %bb10, %bb20, %bb
+ ret void
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/R600/vector-alloca.ll b/test/CodeGen/R600/vector-alloca.ll
index 0b457a8..228868a 100644
--- a/test/CodeGen/R600/vector-alloca.ll
+++ b/test/CodeGen/R600/vector-alloca.ll
@@ -1,6 +1,8 @@
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=verde -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=verde -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}vector_read:
; EG: MOV
diff --git a/test/CodeGen/R600/vertex-fetch-encoding.ll b/test/CodeGen/R600/vertex-fetch-encoding.ll
index e24744e..e4d117f 100644
--- a/test/CodeGen/R600/vertex-fetch-encoding.ll
+++ b/test/CodeGen/R600/vertex-fetch-encoding.ll
@@ -1,10 +1,10 @@
-; RUN: llc < %s -march=r600 -show-mc-encoding -mcpu=barts | FileCheck --check-prefix=NI-CHECK %s
-; RUN: llc < %s -march=r600 -show-mc-encoding -mcpu=cayman | FileCheck --check-prefix=CM-CHECK %s
+; RUN: llc < %s -march=r600 -show-mc-encoding -mcpu=barts | FileCheck --check-prefix=NI %s
+; RUN: llc < %s -march=r600 -show-mc-encoding -mcpu=cayman | FileCheck --check-prefix=CM %s
-; NI-CHECK: {{^}}vtx_fetch32:
-; NI-CHECK: VTX_READ_32 T[[GPR:[0-9]]].X, T[[GPR]].X, 0 ; encoding: [0x40,0x01,0x0[[GPR]],0x10,0x0[[GPR]],0xf0,0x5f,0x13,0x00,0x00,0x08,0x00
-; CM-CHECK: {{^}}vtx_fetch32:
-; CM-CHECK: VTX_READ_32 T[[GPR:[0-9]]].X, T[[GPR]].X, 0 ; encoding: [0x40,0x01,0x0[[GPR]],0x00,0x0[[GPR]],0xf0,0x5f,0x13,0x00,0x00,0x00,0x00
+; NI: {{^}}vtx_fetch32:
+; NI: VTX_READ_32 T[[GPR:[0-9]]].X, T[[GPR]].X, 0 ; encoding: [0x40,0x01,0x0[[GPR]],0x10,0x0[[GPR]],0xf0,0x5f,0x13,0x00,0x00,0x08,0x00
+; CM: {{^}}vtx_fetch32:
+; CM: VTX_READ_32 T[[GPR:[0-9]]].X, T[[GPR]].X, 0 ; encoding: [0x40,0x01,0x0[[GPR]],0x00,0x0[[GPR]],0xf0,0x5f,0x13,0x00,0x00,0x00,0x00
define void @vtx_fetch32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
@@ -13,8 +13,8 @@ entry:
ret void
}
-; NI-CHECK: {{^}}vtx_fetch128:
-; NI-CHECK: VTX_READ_128 T[[DST:[0-9]]].XYZW, T[[SRC:[0-9]]].X, 0 ; encoding: [0x40,0x01,0x0[[SRC]],0x40,0x0[[DST]],0x10,0x8d,0x18,0x00,0x00,0x08,0x00
+; NI: {{^}}vtx_fetch128:
+; NI: VTX_READ_128 T[[DST:[0-9]]].XYZW, T[[SRC:[0-9]]].X, 0 ; encoding: [0x40,0x01,0x0[[SRC]],0x40,0x0[[DST]],0x10,0x8d,0x18,0x00,0x00,0x08,0x00
; XXX: Add a case for Cayman when v4i32 stores are supported.
define void @vtx_fetch128(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
diff --git a/test/CodeGen/R600/vop-shrink.ll b/test/CodeGen/R600/vop-shrink.ll
index e7f0288..d5a46e3 100644
--- a/test/CodeGen/R600/vop-shrink.ll
+++ b/test/CodeGen/R600/vop-shrink.ll
@@ -1,4 +1,5 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; Test that we correctly commute a sub instruction
; FUNC-LABEL: {{^}}sub_rev:
diff --git a/test/CodeGen/R600/vselect.ll b/test/CodeGen/R600/vselect.ll
index e84b8f7..a6152f7 100644
--- a/test/CodeGen/R600/vselect.ll
+++ b/test/CodeGen/R600/vselect.ll
@@ -1,13 +1,14 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-;RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
+;RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=SI %s
-;EG-CHECK: {{^}}test_select_v2i32:
-;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: {{^}}test_select_v2i32:
+;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: {{^}}test_select_v2i32:
-;SI-CHECK: v_cndmask_b32_e64
-;SI-CHECK: v_cndmask_b32_e64
+;SI: {{^}}test_select_v2i32:
+;SI: v_cndmask_b32_e64
+;SI: v_cndmask_b32_e64
define void @test_select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1) {
entry:
@@ -19,13 +20,13 @@ entry:
ret void
}
-;EG-CHECK: {{^}}test_select_v2f32:
-;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: {{^}}test_select_v2f32:
+;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: {{^}}test_select_v2f32:
-;SI-CHECK: v_cndmask_b32_e64
-;SI-CHECK: v_cndmask_b32_e64
+;SI: {{^}}test_select_v2f32:
+;SI: v_cndmask_b32_e64
+;SI: v_cndmask_b32_e64
define void @test_select_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in0, <2 x float> addrspace(1)* %in1) {
entry:
@@ -37,17 +38,17 @@ entry:
ret void
}
-;EG-CHECK: {{^}}test_select_v4i32:
-;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: {{^}}test_select_v4i32:
+;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: {{^}}test_select_v4i32:
-;SI-CHECK: v_cndmask_b32_e64
-;SI-CHECK: v_cndmask_b32_e64
-;SI-CHECK: v_cndmask_b32_e64
-;SI-CHECK: v_cndmask_b32_e64
+;SI: {{^}}test_select_v4i32:
+;SI: v_cndmask_b32_e64
+;SI: v_cndmask_b32_e64
+;SI: v_cndmask_b32_e64
+;SI: v_cndmask_b32_e64
define void @test_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1) {
entry:
@@ -59,11 +60,11 @@ entry:
ret void
}
-;EG-CHECK: {{^}}test_select_v4f32:
-;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: {{^}}test_select_v4f32:
+;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test_select_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in0, <4 x float> addrspace(1)* %in1) {
entry:
diff --git a/test/CodeGen/R600/wait.ll b/test/CodeGen/R600/wait.ll
index 735eabd..43561aa 100644
--- a/test/CodeGen/R600/wait.ll
+++ b/test/CodeGen/R600/wait.ll
@@ -1,11 +1,11 @@
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -strict-whitespace %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -strict-whitespace %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -strict-whitespace %s
; CHECK-LABEL: {{^}}main:
; CHECK: s_load_dwordx4
; CHECK: s_load_dwordx4
-; CHECK: s_waitcnt lgkmcnt(0){{$}}
-; CHECK: s_waitcnt vmcnt(0){{$}}
-; CHECK: s_waitcnt expcnt(0) lgkmcnt(0){{$}}
+; CHECK: s_waitcnt vmcnt(0) lgkmcnt(0){{$}}
+; CHECK: s_endpgm
define void @main(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, <16 x i8> addrspace(2)* inreg %arg3, <16 x i8> addrspace(2)* inreg %arg4, i32 inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, float addrspace(2)* inreg %constptr) #0 {
main_body:
%tmp = getelementptr <16 x i8> addrspace(2)* %arg3, i32 0
@@ -41,5 +41,5 @@ attributes #0 = { "ShaderType"="1" }
attributes #1 = { noduplicate nounwind }
attributes #2 = { nounwind readnone }
-!0 = metadata !{metadata !1, metadata !1, i64 0, i32 1}
-!1 = metadata !{metadata !"const", null}
+!0 = !{!1, !1, i64 0, i32 1}
+!1 = !{!"const", null}
diff --git a/test/CodeGen/R600/work-item-intrinsics.ll b/test/CodeGen/R600/work-item-intrinsics.ll
index 47f65f5..4328e96 100644
--- a/test/CodeGen/R600/work-item-intrinsics.ll
+++ b/test/CodeGen/R600/work-item-intrinsics.ll
@@ -1,14 +1,15 @@
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}ngroups_x:
; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
; EG: MOV [[VAL]], KC0[0].X
-; SI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0
-; SI: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
-; SI: buffer_store_dword [[VVAL]]
+; GCN: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
define void @ngroups_x (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.ngroups.x() #0
@@ -21,8 +22,9 @@ entry:
; EG: MOV [[VAL]], KC0[0].Y
; SI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x1
-; SI: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
-; SI: buffer_store_dword [[VVAL]]
+; VI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x4
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
define void @ngroups_y (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.ngroups.y() #0
@@ -35,8 +37,9 @@ entry:
; EG: MOV [[VAL]], KC0[0].Z
; SI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x2
-; SI: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
-; SI: buffer_store_dword [[VVAL]]
+; VI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x8
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
define void @ngroups_z (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.ngroups.z() #0
@@ -49,8 +52,9 @@ entry:
; EG: MOV [[VAL]], KC0[0].W
; SI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x3
-; SI: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
-; SI: buffer_store_dword [[VVAL]]
+; VI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0xc
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
define void @global_size_x (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.global.size.x() #0
@@ -63,8 +67,9 @@ entry:
; EG: MOV [[VAL]], KC0[1].X
; SI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x4
-; SI: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
-; SI: buffer_store_dword [[VVAL]]
+; VI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x10
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
define void @global_size_y (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.global.size.y() #0
@@ -77,8 +82,9 @@ entry:
; EG: MOV [[VAL]], KC0[1].Y
; SI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x5
-; SI: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
-; SI: buffer_store_dword [[VVAL]]
+; VI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x14
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
define void @global_size_z (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.global.size.z() #0
@@ -91,8 +97,9 @@ entry:
; EG: MOV [[VAL]], KC0[1].Z
; SI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x6
-; SI: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
-; SI: buffer_store_dword [[VVAL]]
+; VI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x18
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
define void @local_size_x (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.local.size.x() #0
@@ -105,8 +112,9 @@ entry:
; EG: MOV [[VAL]], KC0[1].W
; SI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x7
-; SI: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
-; SI: buffer_store_dword [[VVAL]]
+; VI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x1c
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
define void @local_size_y (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.local.size.y() #0
@@ -119,8 +127,9 @@ entry:
; EG: MOV [[VAL]], KC0[2].X
; SI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x8
-; SI: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
-; SI: buffer_store_dword [[VVAL]]
+; VI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x20
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
define void @local_size_z (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.local.size.z() #0
@@ -133,8 +142,9 @@ entry:
; EG: MOV [[VAL]], KC0[2].Z
; SI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0xb
-; SI: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
-; SI: buffer_store_dword [[VVAL]]
+; VI: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x2c
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]]
+; GCN: buffer_store_dword [[VVAL]]
define void @get_work_dim (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.AMDGPU.read.workdim() #0
@@ -147,8 +157,8 @@ entry:
; kernel arguments, but this may change in the future.
; FUNC-LABEL: {{^}}tgid_x:
-; SI: v_mov_b32_e32 [[VVAL:v[0-9]+]], s4
-; SI: buffer_store_dword [[VVAL]]
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], s4
+; GCN: buffer_store_dword [[VVAL]]
define void @tgid_x (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tgid.x() #0
@@ -157,8 +167,8 @@ entry:
}
; FUNC-LABEL: {{^}}tgid_y:
-; SI: v_mov_b32_e32 [[VVAL:v[0-9]+]], s5
-; SI: buffer_store_dword [[VVAL]]
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], s5
+; GCN: buffer_store_dword [[VVAL]]
define void @tgid_y (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tgid.y() #0
@@ -167,8 +177,8 @@ entry:
}
; FUNC-LABEL: {{^}}tgid_z:
-; SI: v_mov_b32_e32 [[VVAL:v[0-9]+]], s6
-; SI: buffer_store_dword [[VVAL]]
+; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], s6
+; GCN: buffer_store_dword [[VVAL]]
define void @tgid_z (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tgid.z() #0
@@ -177,7 +187,7 @@ entry:
}
; FUNC-LABEL: {{^}}tidig_x:
-; SI: buffer_store_dword v0
+; GCN: buffer_store_dword v0
define void @tidig_x (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tidig.x() #0
@@ -186,7 +196,7 @@ entry:
}
; FUNC-LABEL: {{^}}tidig_y:
-; SI: buffer_store_dword v1
+; GCN: buffer_store_dword v1
define void @tidig_y (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tidig.y() #0
@@ -195,7 +205,7 @@ entry:
}
; FUNC-LABEL: {{^}}tidig_z:
-; SI: buffer_store_dword v2
+; GCN: buffer_store_dword v2
define void @tidig_z (i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tidig.z() #0
diff --git a/test/CodeGen/R600/wrong-transalu-pos-fix.ll b/test/CodeGen/R600/wrong-transalu-pos-fix.ll
index d652d2d..4e77c07 100644
--- a/test/CodeGen/R600/wrong-transalu-pos-fix.ll
+++ b/test/CodeGen/R600/wrong-transalu-pos-fix.ll
@@ -81,6 +81,6 @@ attributes #1 = { nounwind readnone }
!opencl.kernels = !{!0, !1, !2}
-!0 = metadata !{null}
-!1 = metadata !{null}
-!2 = metadata !{void (i32 addrspace(1)*)* @fill3d}
+!0 = !{null}
+!1 = !{null}
+!2 = !{void (i32 addrspace(1)*)* @fill3d}
diff --git a/test/CodeGen/R600/xor.ll b/test/CodeGen/R600/xor.ll
index fa54e38..1526e28 100644
--- a/test/CodeGen/R600/xor.ll
+++ b/test/CodeGen/R600/xor.ll
@@ -1,14 +1,14 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
-;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-;EG-CHECK: {{^}}xor_v2i32:
-;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: {{^}}xor_v2i32:
-;SI-CHECK: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; FUNC-LABEL: {{^}}xor_v2i32:
+; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1) {
%a = load <2 x i32> addrspace(1) * %in0
@@ -18,17 +18,16 @@ define void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in
ret void
}
-;EG-CHECK: {{^}}xor_v4i32:
-;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; FUNC-LABEL: {{^}}xor_v4i32:
+; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;SI-CHECK: {{^}}xor_v4i32:
-;SI-CHECK: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
-;SI-CHECK: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
+; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1) {
%a = load <4 x i32> addrspace(1) * %in0
@@ -38,25 +37,42 @@ define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in
ret void
}
-;EG-CHECK: {{^}}xor_i1:
-;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}}
-
-;SI-CHECK: {{^}}xor_i1:
-;SI-CHECK: v_xor_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; FUNC-LABEL: {{^}}xor_i1:
+; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}}
+; SI-DAG: v_cmp_ge_f32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], {{v[0-9]+}}, 0
+; SI-DAG: v_cmp_ge_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], {{v[0-9]+}}, 1.0
+; SI: s_xor_b64 [[XOR:s\[[0-9]+:[0-9]+\]]], [[CMP0]], [[CMP1]]
+; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, [[XOR]]
+; SI: buffer_store_dword [[RESULT]]
+; SI: s_endpgm
define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
%a = load float addrspace(1) * %in0
%b = load float addrspace(1) * %in1
%acmp = fcmp oge float %a, 0.000000e+00
- %bcmp = fcmp oge float %b, 0.000000e+00
+ %bcmp = fcmp oge float %b, 1.000000e+00
%xor = xor i1 %acmp, %bcmp
%result = select i1 %xor, float %a, float %b
store float %result, float addrspace(1)* %out
ret void
}
-; SI-CHECK-LABEL: {{^}}vector_xor_i32:
-; SI-CHECK: v_xor_b32_e32
+; FUNC-LABEL: {{^}}v_xor_i1:
+; SI: buffer_load_ubyte [[B:v[0-9]+]]
+; SI: buffer_load_ubyte [[A:v[0-9]+]]
+; SI: v_xor_b32_e32 [[XOR:v[0-9]+]], [[A]], [[B]]
+; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[XOR]]
+; SI: buffer_store_byte [[RESULT]]
+define void @v_xor_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) {
+ %a = load i1 addrspace(1)* %in0
+ %b = load i1 addrspace(1)* %in1
+ %xor = xor i1 %a, %b
+ store i1 %xor, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}vector_xor_i32:
+; SI: v_xor_b32_e32
define void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
%a = load i32 addrspace(1)* %in0
%b = load i32 addrspace(1)* %in1
@@ -65,24 +81,24 @@ define void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32
ret void
}
-; SI-CHECK-LABEL: {{^}}scalar_xor_i32:
-; SI-CHECK: s_xor_b32
+; FUNC-LABEL: {{^}}scalar_xor_i32:
+; SI: s_xor_b32
define void @scalar_xor_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%result = xor i32 %a, %b
store i32 %result, i32 addrspace(1)* %out
ret void
}
-; SI-CHECK-LABEL: {{^}}scalar_not_i32:
-; SI-CHECK: s_not_b32
+; FUNC-LABEL: {{^}}scalar_not_i32:
+; SI: s_not_b32
define void @scalar_not_i32(i32 addrspace(1)* %out, i32 %a) {
%result = xor i32 %a, -1
store i32 %result, i32 addrspace(1)* %out
ret void
}
-; SI-CHECK-LABEL: {{^}}vector_not_i32:
-; SI-CHECK: v_not_b32
+; FUNC-LABEL: {{^}}vector_not_i32:
+; SI: v_not_b32
define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
%a = load i32 addrspace(1)* %in0
%b = load i32 addrspace(1)* %in1
@@ -91,10 +107,10 @@ define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32
ret void
}
-; SI-CHECK-LABEL: {{^}}vector_xor_i64:
-; SI-CHECK: v_xor_b32_e32
-; SI-CHECK: v_xor_b32_e32
-; SI-CHECK: s_endpgm
+; FUNC-LABEL: {{^}}vector_xor_i64:
+; SI: v_xor_b32_e32
+; SI: v_xor_b32_e32
+; SI: s_endpgm
define void @vector_xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
%a = load i64 addrspace(1)* %in0
%b = load i64 addrspace(1)* %in1
@@ -103,26 +119,26 @@ define void @vector_xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64
ret void
}
-; SI-CHECK-LABEL: {{^}}scalar_xor_i64:
-; SI-CHECK: s_xor_b64
-; SI-CHECK: s_endpgm
+; FUNC-LABEL: {{^}}scalar_xor_i64:
+; SI: s_xor_b64
+; SI: s_endpgm
define void @scalar_xor_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%result = xor i64 %a, %b
store i64 %result, i64 addrspace(1)* %out
ret void
}
-; SI-CHECK-LABEL: {{^}}scalar_not_i64:
-; SI-CHECK: s_not_b64
+; FUNC-LABEL: {{^}}scalar_not_i64:
+; SI: s_not_b64
define void @scalar_not_i64(i64 addrspace(1)* %out, i64 %a) {
%result = xor i64 %a, -1
store i64 %result, i64 addrspace(1)* %out
ret void
}
-; SI-CHECK-LABEL: {{^}}vector_not_i64:
-; SI-CHECK: v_not_b32
-; SI-CHECK: v_not_b32
+; FUNC-LABEL: {{^}}vector_not_i64:
+; SI: v_not_b32
+; SI: v_not_b32
define void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
%a = load i64 addrspace(1)* %in0
%b = load i64 addrspace(1)* %in1
@@ -135,8 +151,8 @@ define void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64
; Note that in the future the backend may be smart enough to
; use an SALU instruction for this.
-; SI-CHECK-LABEL: {{^}}xor_cf:
-; SI-CHECK: s_xor_b64
+; FUNC-LABEL: {{^}}xor_cf:
+; SI: s_xor_b64
define void @xor_cf(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b) {
entry:
%0 = icmp eq i64 %a, 0
diff --git a/test/CodeGen/R600/zero_extend.ll b/test/CodeGen/R600/zero_extend.ll
index 0fe1f15..033055d 100644
--- a/test/CodeGen/R600/zero_extend.ll
+++ b/test/CodeGen/R600/zero_extend.ll
@@ -1,14 +1,15 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
-; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600
+; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s --check-prefix=SI
+; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=SI
-; R600-CHECK: {{^}}test:
-; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW
-; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW
+; R600: {{^}}test:
+; R600: MEM_RAT_CACHELESS STORE_RAW
+; R600: MEM_RAT_CACHELESS STORE_RAW
-; SI-CHECK: {{^}}test:
-; SI-CHECK: s_mov_b32 [[ZERO:s[0-9]]], 0{{$}}
-; SI-CHECK: v_mov_b32_e32 v[[V_ZERO:[0-9]]], [[ZERO]]
-; SI-CHECK: buffer_store_dwordx2 v[0:[[V_ZERO]]{{\]}}
+; SI: {{^}}test:
+; SI: s_mov_b32 [[ZERO:s[0-9]]], 0{{$}}
+; SI: v_mov_b32_e32 v[[V_ZERO:[0-9]]], [[ZERO]]
+; SI: buffer_store_dwordx2 v[0:[[V_ZERO]]{{\]}}
define void @test(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
entry:
%0 = mul i32 %a, %b
@@ -18,8 +19,8 @@ entry:
ret void
}
-; SI-CHECK-LABEL: {{^}}testi1toi32:
-; SI-CHECK: v_cndmask_b32
+; SI-LABEL: {{^}}testi1toi32:
+; SI: v_cndmask_b32
define void @testi1toi32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
%0 = icmp eq i32 %a, %b
@@ -28,10 +29,10 @@ entry:
ret void
}
-; SI-CHECK-LABEL: {{^}}zext_i1_to_i64:
-; SI-CHECK: v_cmp_eq_i32
-; SI-CHECK: v_cndmask_b32
-; SI-CHECK: s_mov_b32 s{{[0-9]+}}, 0
+; SI-LABEL: {{^}}zext_i1_to_i64:
+; SI: s_mov_b32 s{{[0-9]+}}, 0
+; SI: v_cmp_eq_i32
+; SI: v_cndmask_b32
define void @zext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
%cmp = icmp eq i32 %a, %b
%ext = zext i1 %cmp to i64
diff --git a/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll b/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
index e8315f1..373a196 100644
--- a/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
+++ b/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=sparc
+; RUN: llc < %s -march=sparc -no-integrated-as
; PR 1557
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f128:128:128"
diff --git a/test/CodeGen/SPARC/float.ll b/test/CodeGen/SPARC/float.ll
index 6636704..d7a79cb 100644
--- a/test/CodeGen/SPARC/float.ll
+++ b/test/CodeGen/SPARC/float.ll
@@ -154,11 +154,11 @@ entry:
; SPARC64: fitod
; SPARC64: fdtoi
-define void @test_itod_dtoi(i32 %a, i32* %ptr0, double* %ptr1) {
+define void @test_itod_dtoi(i32 %a, double %b, i32* %ptr0, double* %ptr1) {
entry:
%0 = sitofp i32 %a to double
store double %0, double* %ptr1, align 8
- %1 = fptosi double %0 to i32
+ %1 = fptosi double %b to i32
store i32 %1, i32* %ptr0, align 8
ret void
}
diff --git a/test/CodeGen/SPARC/fp128.ll b/test/CodeGen/SPARC/fp128.ll
index abd89bf..a06112a 100644
--- a/test/CodeGen/SPARC/fp128.ll
+++ b/test/CodeGen/SPARC/fp128.ll
@@ -182,26 +182,28 @@ entry:
}
; HARD-LABEL: test_itoq_qtoi
-; HARD: call _Q_lltoq
-; HARD: call _Q_qtoll
-; HARD: fitoq
-; HARD: fqtoi
+; HARD-DAG: call _Q_lltoq
+; HARD-DAG: call _Q_qtoll
+; HARD-DAG: fitoq
+; HARD-DAG: fqtoi
; SOFT-LABEL: test_itoq_qtoi
-; SOFT: call _Q_lltoq
-; SOFT: call _Q_qtoll
-; SOFT: call _Q_itoq
-; SOFT: call _Q_qtoi
+; SOFT-DAG: call _Q_lltoq
+; SOFT-DAG: call _Q_qtoll
+; SOFT-DAG: call _Q_itoq
+; SOFT-DAG: call _Q_qtoi
-define void @test_itoq_qtoi(i64 %a, i32 %b, i64* %ptr0, fp128* %ptr1) {
+define void @test_itoq_qtoi(i64 %a, i32 %b, fp128* %c, fp128* %d, i64* %ptr0, fp128* %ptr1) {
entry:
%0 = sitofp i64 %a to fp128
store fp128 %0, fp128* %ptr1, align 8
- %1 = fptosi fp128 %0 to i64
+ %cval = load fp128* %c, align 8
+ %1 = fptosi fp128 %cval to i64
store i64 %1, i64* %ptr0, align 8
%2 = sitofp i32 %b to fp128
store fp128 %2, fp128* %ptr1, align 8
- %3 = fptosi fp128 %2 to i32
+ %dval = load fp128* %d, align 8
+ %3 = fptosi fp128 %dval to i32
%4 = bitcast i64* %ptr0 to i32*
store i32 %3, i32* %4, align 8
ret void
@@ -219,15 +221,17 @@ entry:
; SOFT-DAG: call _Q_utoq
; SOFT-DAG: call _Q_qtou
-define void @test_utoq_qtou(i64 %a, i32 %b, i64* %ptr0, fp128* %ptr1) {
+define void @test_utoq_qtou(i64 %a, i32 %b, fp128* %c, fp128* %d, i64* %ptr0, fp128* %ptr1) {
entry:
%0 = uitofp i64 %a to fp128
store fp128 %0, fp128* %ptr1, align 8
- %1 = fptoui fp128 %0 to i64
+ %cval = load fp128* %c, align 8
+ %1 = fptoui fp128 %cval to i64
store i64 %1, i64* %ptr0, align 8
%2 = uitofp i32 %b to fp128
store fp128 %2, fp128* %ptr1, align 8
- %3 = fptoui fp128 %2 to i32
+ %dval = load fp128* %d, align 8
+ %3 = fptoui fp128 %dval to i32
%4 = bitcast i64* %ptr0 to i32*
store i32 %3, i32* %4, align 8
ret void
diff --git a/test/CodeGen/SPARC/inlineasm.ll b/test/CodeGen/SPARC/inlineasm.ll
index 2650533..526cde8 100644
--- a/test/CodeGen/SPARC/inlineasm.ll
+++ b/test/CodeGen/SPARC/inlineasm.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=sparc <%s | FileCheck %s
+; RUN: llc -march=sparc -no-integrated-as <%s | FileCheck %s
; CHECK-LABEL: test_constraint_r
; CHECK: add %o1, %o0, %o0
diff --git a/test/CodeGen/SPARC/mult-alt-generic-sparc.ll b/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
index 6013b17..6a67616 100644
--- a/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
+++ b/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=sparc
+; RUN: llc < %s -march=sparc -no-integrated-as
; ModuleID = 'mult-alt-generic.c'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32"
target triple = "sparc"
diff --git a/test/CodeGen/SPARC/setjmp.ll b/test/CodeGen/SPARC/setjmp.ll
index a31cd70..17afb36 100644
--- a/test/CodeGen/SPARC/setjmp.ll
+++ b/test/CodeGen/SPARC/setjmp.ll
@@ -65,8 +65,8 @@ attributes #0 = { nounwind }
attributes #1 = { noreturn nounwind }
attributes #2 = { nounwind returns_twice }
-!0 = metadata !{metadata !"alias set 6: struct.jmpbuf_env*", metadata !1}
-!1 = metadata !{metadata !1}
-!2 = metadata !{metadata !"alias set 3: int", metadata !1}
-!3 = metadata !{metadata !0, metadata !0, i64 0}
-!4 = metadata !{metadata !2, metadata !2, i64 0}
+!0 = !{!"alias set 6: struct.jmpbuf_env*", !1}
+!1 = !{!1}
+!2 = !{!"alias set 3: int", !1}
+!3 = !{!0, !0, i64 0}
+!4 = !{!2, !2, i64 0}
diff --git a/test/CodeGen/SystemZ/alias-01.ll b/test/CodeGen/SystemZ/alias-01.ll
index 8839aad..89a7318 100644
--- a/test/CodeGen/SystemZ/alias-01.ll
+++ b/test/CodeGen/SystemZ/alias-01.ll
@@ -14,6 +14,6 @@ define void @f1(<16 x i32> *%src1, <16 x float> *%dest) {
ret void
}
-!0 = metadata !{ metadata !"root" }
-!1 = metadata !{ metadata !"set1", metadata !0 }
-!2 = metadata !{ metadata !"set2", metadata !0 }
+!0 = !{ !"root" }
+!1 = !{ !"set1", !0 }
+!2 = !{ !"set2", !0 }
diff --git a/test/CodeGen/SystemZ/and-08.ll b/test/CodeGen/SystemZ/and-08.ll
index 7ded115..a328c4e 100644
--- a/test/CodeGen/SystemZ/and-08.ll
+++ b/test/CodeGen/SystemZ/and-08.ll
@@ -371,8 +371,8 @@ define void @f26(i64 *%ptr1, i64 *%ptr2) {
ret void
}
-!0 = metadata !{ metadata !"root" }
-!1 = metadata !{ metadata !"set1", metadata !0 }
-!2 = metadata !{ metadata !"set2", metadata !0 }
-!3 = metadata !{ metadata !1, metadata !1, i64 0}
-!4 = metadata !{ metadata !2, metadata !2, i64 0}
+!0 = !{ !"root" }
+!1 = !{ !"set1", !0 }
+!2 = !{ !"set2", !0 }
+!3 = !{ !1, !1, i64 0}
+!4 = !{ !2, !2, i64 0}
diff --git a/test/CodeGen/SystemZ/asm-01.ll b/test/CodeGen/SystemZ/asm-01.ll
index 801378c..3dbc8ac 100644
--- a/test/CodeGen/SystemZ/asm-01.ll
+++ b/test/CodeGen/SystemZ/asm-01.ll
@@ -1,7 +1,7 @@
; Test the "Q" asm constraint, which accepts addresses that have a base
; and a 12-bit displacement.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
; Check the lowest range.
define void @f1(i64 %base) {
diff --git a/test/CodeGen/SystemZ/asm-02.ll b/test/CodeGen/SystemZ/asm-02.ll
index ad1e35b..458bfeb 100644
--- a/test/CodeGen/SystemZ/asm-02.ll
+++ b/test/CodeGen/SystemZ/asm-02.ll
@@ -1,7 +1,7 @@
; Test the "R" asm constraint, which accepts addresses that have a base,
; an index and a 12-bit displacement.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
; Check the lowest range.
define void @f1(i64 %base) {
diff --git a/test/CodeGen/SystemZ/asm-03.ll b/test/CodeGen/SystemZ/asm-03.ll
index fa3e1a7..2e60ad6 100644
--- a/test/CodeGen/SystemZ/asm-03.ll
+++ b/test/CodeGen/SystemZ/asm-03.ll
@@ -1,7 +1,7 @@
; Test the "S" asm constraint, which accepts addresses that have a base
; and a 20-bit displacement.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
define void @f1(i64 %base) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/SystemZ/asm-04.ll b/test/CodeGen/SystemZ/asm-04.ll
index af7ea9f..b212253 100644
--- a/test/CodeGen/SystemZ/asm-04.ll
+++ b/test/CodeGen/SystemZ/asm-04.ll
@@ -1,7 +1,7 @@
; Test the "T" asm constraint, which accepts addresses that have a base,
; an index and a 20-bit displacement.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
define void @f1(i64 %base) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/SystemZ/asm-05.ll b/test/CodeGen/SystemZ/asm-05.ll
index e18cb75..db99b10 100644
--- a/test/CodeGen/SystemZ/asm-05.ll
+++ b/test/CodeGen/SystemZ/asm-05.ll
@@ -1,6 +1,6 @@
; Test the "m" asm constraint, which is equivalent to "T".
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
define void @f1(i64 %base) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/SystemZ/asm-06.ll b/test/CodeGen/SystemZ/asm-06.ll
index f9848a2..73c938f 100644
--- a/test/CodeGen/SystemZ/asm-06.ll
+++ b/test/CodeGen/SystemZ/asm-06.ll
@@ -1,6 +1,6 @@
; Test the GPR constraint "a", which forbids %r0.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
define i64 @f1() {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/SystemZ/asm-07.ll b/test/CodeGen/SystemZ/asm-07.ll
index bf63150..42b89e6 100644
--- a/test/CodeGen/SystemZ/asm-07.ll
+++ b/test/CodeGen/SystemZ/asm-07.ll
@@ -1,6 +1,6 @@
; Test the GPR constraint "r".
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
define i64 @f1() {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/SystemZ/asm-08.ll b/test/CodeGen/SystemZ/asm-08.ll
index 1662337..4185108 100644
--- a/test/CodeGen/SystemZ/asm-08.ll
+++ b/test/CodeGen/SystemZ/asm-08.ll
@@ -1,6 +1,6 @@
; Test the GPR constraint "d", which is equivalent to "r".
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
define i64 @f1() {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/SystemZ/asm-09.ll b/test/CodeGen/SystemZ/asm-09.ll
index 5cd7efb..b9d86cf 100644
--- a/test/CodeGen/SystemZ/asm-09.ll
+++ b/test/CodeGen/SystemZ/asm-09.ll
@@ -1,6 +1,6 @@
; Test matching operands with the GPR constraint "r".
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
define void @f1(i32 *%dst) {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/SystemZ/asm-10.ll b/test/CodeGen/SystemZ/asm-10.ll
index 0eccc19..b71db83 100644
--- a/test/CodeGen/SystemZ/asm-10.ll
+++ b/test/CodeGen/SystemZ/asm-10.ll
@@ -1,6 +1,6 @@
; Test the FPR constraint "f".
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
define float @f1() {
; CHECK-LABEL: f1:
diff --git a/test/CodeGen/SystemZ/asm-11.ll b/test/CodeGen/SystemZ/asm-11.ll
index 8aeb784..8a4cdbb 100644
--- a/test/CodeGen/SystemZ/asm-11.ll
+++ b/test/CodeGen/SystemZ/asm-11.ll
@@ -1,6 +1,6 @@
; Test the "I" constraint (8-bit unsigned constants).
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
; Test 1 below the first valid value.
define i32 @f1() {
diff --git a/test/CodeGen/SystemZ/asm-12.ll b/test/CodeGen/SystemZ/asm-12.ll
index feecbac..115092c 100644
--- a/test/CodeGen/SystemZ/asm-12.ll
+++ b/test/CodeGen/SystemZ/asm-12.ll
@@ -1,6 +1,6 @@
; Test the "J" constraint (12-bit unsigned constants).
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
; Test 1 below the first valid value.
define i32 @f1() {
diff --git a/test/CodeGen/SystemZ/asm-13.ll b/test/CodeGen/SystemZ/asm-13.ll
index b881700..83454ea 100644
--- a/test/CodeGen/SystemZ/asm-13.ll
+++ b/test/CodeGen/SystemZ/asm-13.ll
@@ -1,6 +1,6 @@
; Test the "K" constraint (16-bit signed constants).
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
; Test 1 below the first valid value.
define i32 @f1() {
diff --git a/test/CodeGen/SystemZ/asm-14.ll b/test/CodeGen/SystemZ/asm-14.ll
index bcd8b1e..41b8f40 100644
--- a/test/CodeGen/SystemZ/asm-14.ll
+++ b/test/CodeGen/SystemZ/asm-14.ll
@@ -1,6 +1,6 @@
; Test the "L" constraint (20-bit signed constants).
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
; Test 1 below the first valid value.
define i32 @f1() {
diff --git a/test/CodeGen/SystemZ/asm-15.ll b/test/CodeGen/SystemZ/asm-15.ll
index 886ee0e..8361b68 100644
--- a/test/CodeGen/SystemZ/asm-15.ll
+++ b/test/CodeGen/SystemZ/asm-15.ll
@@ -1,6 +1,6 @@
; Test the "M" constraint (0x7fffffff)
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
; Test 1 below the valid value.
define i32 @f1() {
diff --git a/test/CodeGen/SystemZ/asm-16.ll b/test/CodeGen/SystemZ/asm-16.ll
index 886ee0e..8361b68 100644
--- a/test/CodeGen/SystemZ/asm-16.ll
+++ b/test/CodeGen/SystemZ/asm-16.ll
@@ -1,6 +1,6 @@
; Test the "M" constraint (0x7fffffff)
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
; Test 1 below the valid value.
define i32 @f1() {
diff --git a/test/CodeGen/SystemZ/asm-17.ll b/test/CodeGen/SystemZ/asm-17.ll
index 7bc9da3..533b5e9 100644
--- a/test/CodeGen/SystemZ/asm-17.ll
+++ b/test/CodeGen/SystemZ/asm-17.ll
@@ -1,6 +1,6 @@
; Test explicit register names.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
; Test i32 GPRs.
define i32 @f1() {
diff --git a/test/CodeGen/SystemZ/asm-18.ll b/test/CodeGen/SystemZ/asm-18.ll
index d60654b..71e145a 100644
--- a/test/CodeGen/SystemZ/asm-18.ll
+++ b/test/CodeGen/SystemZ/asm-18.ll
@@ -1,7 +1,7 @@
; Test high-word operations, using "h" constraints to force a high
; register and "r" constraints to force a low register.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 -no-integrated-as | FileCheck %s
; Test loads and stores involving mixtures of high and low registers.
define void @f1(i32 *%ptr1, i32 *%ptr2) {
diff --git a/test/CodeGen/SystemZ/fp-cmp-04.ll b/test/CodeGen/SystemZ/fp-cmp-04.ll
index 781a3be..1637ccb 100644
--- a/test/CodeGen/SystemZ/fp-cmp-04.ll
+++ b/test/CodeGen/SystemZ/fp-cmp-04.ll
@@ -1,7 +1,7 @@
; Test that floating-point compares are omitted if CC already has the
; right value.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 -no-integrated-as | FileCheck %s
declare float @llvm.fabs.f32(float %f)
diff --git a/test/CodeGen/SystemZ/int-cmp-44.ll b/test/CodeGen/SystemZ/int-cmp-44.ll
index f065e64..30c1c4f 100644
--- a/test/CodeGen/SystemZ/int-cmp-44.ll
+++ b/test/CodeGen/SystemZ/int-cmp-44.ll
@@ -1,7 +1,7 @@
; Test that compares are omitted if CC already has the right value
; (z10 version).
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 -no-integrated-as | FileCheck %s
declare void @foo()
diff --git a/test/CodeGen/SystemZ/int-cmp-45.ll b/test/CodeGen/SystemZ/int-cmp-45.ll
index 9c9c49c..c9affa6 100644
--- a/test/CodeGen/SystemZ/int-cmp-45.ll
+++ b/test/CodeGen/SystemZ/int-cmp-45.ll
@@ -1,7 +1,7 @@
; Test that compares are omitted if CC already has the right value
; (z196 version).
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 -no-integrated-as | FileCheck %s
; Addition provides enough for equality comparisons with zero. First teest
; the EQ case with LOC.
diff --git a/test/CodeGen/SystemZ/memchr-02.ll b/test/CodeGen/SystemZ/memchr-02.ll
index 982b396..8986627 100644
--- a/test/CodeGen/SystemZ/memchr-02.ll
+++ b/test/CodeGen/SystemZ/memchr-02.ll
@@ -1,6 +1,6 @@
; Test memchr using SRST, with the correct prototype.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -no-integrated-as | FileCheck %s
declare i8 *@memchr(i8 *%src, i32 %char, i64 %len)
diff --git a/test/CodeGen/SystemZ/memcpy-02.ll b/test/CodeGen/SystemZ/memcpy-02.ll
index 2b01091..776cfee 100644
--- a/test/CodeGen/SystemZ/memcpy-02.ll
+++ b/test/CodeGen/SystemZ/memcpy-02.ll
@@ -385,8 +385,8 @@ define void @f32(i64 *%ptr1, i64 *%ptr2) {
ret void
}
-!0 = metadata !{ metadata !"root" }
-!1 = metadata !{ metadata !3, metadata !3, i64 0 }
-!2 = metadata !{ metadata !4, metadata !4, i64 0 }
-!3 = metadata !{ metadata !"set1", metadata !0 }
-!4 = metadata !{ metadata !"set2", metadata !0 }
+!0 = !{ !"root" }
+!1 = !{ !3, !3, i64 0 }
+!2 = !{ !4, !4, i64 0 }
+!3 = !{ !"set1", !0 }
+!4 = !{ !"set2", !0 }
diff --git a/test/CodeGen/SystemZ/tls-01.ll b/test/CodeGen/SystemZ/tls-01.ll
index 16bc8f6..da7176c 100644
--- a/test/CodeGen/SystemZ/tls-01.ll
+++ b/test/CodeGen/SystemZ/tls-01.ll
@@ -1,7 +1,7 @@
-; Test initial-exec TLS accesses.
+; Test local-exec TLS accesses.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-MAIN
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-CP
+; RUN: llc < %s -mcpu=z10 -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-MAIN
+; RUN: llc < %s -mcpu=z10 -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-CP
@x = thread_local global i32 0
diff --git a/test/CodeGen/SystemZ/tls-02.ll b/test/CodeGen/SystemZ/tls-02.ll
new file mode 100644
index 0000000..15918d0
--- /dev/null
+++ b/test/CodeGen/SystemZ/tls-02.ll
@@ -0,0 +1,18 @@
+; Test initial-exec TLS accesses.
+;
+; RUN: llc < %s -mcpu=z10 -mtriple=s390x-linux-gnu -relocation-model=pic | FileCheck %s -check-prefix=CHECK-MAIN
+
+@x = thread_local(initialexec) global i32 0
+
+; The offset must be loaded from the GOT. This TLS access model does
+; not use literal pool constants.
+define i32 *@foo() {
+; CHECK-MAIN-LABEL: foo:
+; CHECK-MAIN: ear [[HIGH:%r[0-5]]], %a0
+; CHECK-MAIN: sllg %r2, [[HIGH]], 32
+; CHECK-MAIN: ear %r2, %a1
+; CHECK-MAIN: larl %r1, x@INDNTPOFF
+; CHECK-MAIN: ag %r2, 0(%r1)
+; CHECK-MAIN: br %r14
+ ret i32 *@x
+}
diff --git a/test/CodeGen/SystemZ/tls-03.ll b/test/CodeGen/SystemZ/tls-03.ll
new file mode 100644
index 0000000..c9f7bd6
--- /dev/null
+++ b/test/CodeGen/SystemZ/tls-03.ll
@@ -0,0 +1,23 @@
+; Test general-dynamic TLS accesses.
+;
+; RUN: llc < %s -mcpu=z10 -mtriple=s390x-linux-gnu -relocation-model=pic | FileCheck %s -check-prefix=CHECK-MAIN
+; RUN: llc < %s -mcpu=z10 -mtriple=s390x-linux-gnu -relocation-model=pic | FileCheck %s -check-prefix=CHECK-CP
+
+@x = thread_local global i32 0
+
+; Call __tls_get_offset to retrieve the symbol's TLS offset.
+define i32 *@foo() {
+; CHECK-CP: .LCP{{.*}}:
+; CHECK-CP: .quad x@TLSGD
+;
+; CHECK-MAIN-LABEL: foo:
+; CHECK-MAIN-DAG: larl %r12, _GLOBAL_OFFSET_TABLE_
+; CHECK-MAIN-DAG: lgrl %r2, .LCP{{.*}}
+; CHECK-MAIN: brasl %r14, __tls_get_offset@PLT:tls_gdcall:x
+; CHECK-MAIN: ear [[HIGH:%r[0-5]]], %a0
+; CHECK-MAIN: sllg [[TP:%r[0-5]]], [[HIGH]], 32
+; CHECK-MAIN: ear [[TP]], %a1
+; CHECK-MAIN: agr %r2, [[TP]]
+; CHECK-MAIN: br %r14
+ ret i32 *@x
+}
diff --git a/test/CodeGen/SystemZ/tls-04.ll b/test/CodeGen/SystemZ/tls-04.ll
new file mode 100644
index 0000000..dcb210a
--- /dev/null
+++ b/test/CodeGen/SystemZ/tls-04.ll
@@ -0,0 +1,28 @@
+; Test local-dynamic TLS accesses.
+;
+; RUN: llc < %s -mcpu=z10 -mtriple=s390x-linux-gnu -relocation-model=pic | FileCheck %s -check-prefix=CHECK-MAIN
+; RUN: llc < %s -mcpu=z10 -mtriple=s390x-linux-gnu -relocation-model=pic | FileCheck %s -check-prefix=CHECK-CP
+
+@x = thread_local(localdynamic) global i32 0
+
+; Call __tls_get_offset to retrieve the module's TLS base offset.
+; Add the per-symbol offset and the thread pointer.
+define i32 *@foo() {
+; CHECK-CP: .LCP{{.*}}_0:
+; CHECK-CP: .quad x@TLSLDM
+; CHECK-CP: .LCP{{.*}}_1:
+; CHECK-CP: .quad x@DTPOFF
+;
+; CHECK-MAIN-LABEL: foo:
+; CHECK-MAIN-DAG: larl %r12, _GLOBAL_OFFSET_TABLE_
+; CHECK-MAIN-DAG: lgrl %r2, .LCP{{.*}}_0
+; CHECK-MAIN: brasl %r14, __tls_get_offset@PLT:tls_ldcall:x
+; CHECK-MAIN: larl %r1, .LCP{{.*}}_1
+; CHECK-MAIN: ag %r2, 0(%r1)
+; CHECK-MAIN: ear [[HIGH:%r[0-5]]], %a0
+; CHECK-MAIN: sllg [[TP:%r[0-5]]], [[HIGH]], 32
+; CHECK-MAIN: ear [[TP]], %a1
+; CHECK-MAIN: agr %r2, [[TP]]
+; CHECK-MAIN: br %r14
+ ret i32 *@x
+}
diff --git a/test/CodeGen/SystemZ/tls-05.ll b/test/CodeGen/SystemZ/tls-05.ll
new file mode 100644
index 0000000..385208d
--- /dev/null
+++ b/test/CodeGen/SystemZ/tls-05.ll
@@ -0,0 +1,15 @@
+; Test general-dynamic TLS access optimizations.
+;
+; If we access the same TLS variable twice, there should only be
+; a single call to __tls_get_offset.
+;
+; RUN: llc < %s -mcpu=z10 -mtriple=s390x-linux-gnu -relocation-model=pic | grep "__tls_get_offset" | count 1
+
+@x = thread_local global i32 0
+
+define i32 @foo() {
+ %val = load i32* @x
+ %inc = add nsw i32 %val, 1
+ store i32 %inc, i32* @x
+ ret i32 %val
+}
diff --git a/test/CodeGen/SystemZ/tls-06.ll b/test/CodeGen/SystemZ/tls-06.ll
new file mode 100644
index 0000000..fcd8614
--- /dev/null
+++ b/test/CodeGen/SystemZ/tls-06.ll
@@ -0,0 +1,17 @@
+; Test general-dynamic TLS access optimizations.
+;
+; If we access two different TLS variables, we need two calls to
+; __tls_get_offset, but should load _GLOBAL_OFFSET_TABLE only once.
+;
+; RUN: llc < %s -mcpu=z10 -mtriple=s390x-linux-gnu -relocation-model=pic | grep "__tls_get_offset" | count 2
+; RUN: llc < %s -mcpu=z10 -mtriple=s390x-linux-gnu -relocation-model=pic | grep "_GLOBAL_OFFSET_TABLE_" | count 1
+
+@x = thread_local global i32 0
+@y = thread_local global i32 0
+
+define i32 @foo() {
+ %valx = load i32* @x
+ %valy = load i32* @y
+ %add = add nsw i32 %valx, %valy
+ ret i32 %add
+}
diff --git a/test/CodeGen/SystemZ/tls-07.ll b/test/CodeGen/SystemZ/tls-07.ll
new file mode 100644
index 0000000..6547515
--- /dev/null
+++ b/test/CodeGen/SystemZ/tls-07.ll
@@ -0,0 +1,16 @@
+; Test local-dynamic TLS access optimizations.
+;
+; If we access two different local-dynamic TLS variables, we only
+; need a single call to __tls_get_offset.
+;
+; RUN: llc < %s -mcpu=z10 -mtriple=s390x-linux-gnu -relocation-model=pic | grep "__tls_get_offset" | count 1
+
+@x = thread_local(localdynamic) global i32 0
+@y = thread_local(localdynamic) global i32 0
+
+define i32 @foo() {
+ %valx = load i32* @x
+ %valy = load i32* @y
+ %add = add nsw i32 %valx, %valy
+ ret i32 %add
+}
diff --git a/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll b/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll
index d31a84b..622f55d 100644
--- a/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll
+++ b/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll
@@ -25,7 +25,7 @@ define void @_Z19getClosestDiagonal3ii(%0* noalias sret, i32, i32) nounwind {
%storemerge = phi double [ -1.000000e+00, %4 ], [ 1.000000e+00, %3 ], [ 1.000000e+00, %3 ] ; <double> [#uses=1]
%v_6 = icmp slt i32 %1, 2 ; <i1> [#uses=1]
%storemerge1 = select i1 %v_6, double 1.000000e+00, double -1.000000e+00 ; <double> [#uses=3]
- call void @llvm.dbg.value(metadata !{double %storemerge}, i64 0, metadata !91, metadata !{metadata !"0x102"}), !dbg !0
+ call void @llvm.dbg.value(metadata double %storemerge, i64 0, metadata !91, metadata !{!"0x102"}), !dbg !0
%v_7 = icmp eq i32 %2, 1, !dbg !92 ; <i1> [#uses=1]
%storemerge2 = select i1 %v_7, double 1.000000e+00, double -1.000000e+00 ; <double> [#uses=3]
%v_8 = getelementptr inbounds %0* %0, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
@@ -48,108 +48,108 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!5}
!llvm.module.flags = !{!104}
-!0 = metadata !{i32 46, i32 0, metadata !1, null}
-!1 = metadata !{metadata !"0xb\0044\000\000", metadata !101, metadata !2} ; [ DW_TAG_lexical_block ]
-!2 = metadata !{metadata !"0xb\0044\000\000", metadata !101, metadata !3} ; [ DW_TAG_lexical_block ]
-!3 = metadata !{metadata !"0x2e\00getClosestDiagonal3\00getClosestDiagonal3\00_Z19getClosestDiagonal3ii\0044\000\001\000\006\000\000\000", metadata !101, null, metadata !6, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!4 = metadata !{metadata !"0x29", metadata !101} ; [ DW_TAG_file_type ]
-!5 = metadata !{metadata !"0x11\004\004.2.1 (Based on Apple Inc. build 5658) (LLVM build 00)\001\00\000\00\000", metadata !101, metadata !102, metadata !102, metadata !103, null, null} ; [ DW_TAG_compile_unit ]
-!6 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!7 = metadata !{metadata !8, metadata !22, metadata !22}
-!8 = metadata !{metadata !"0x13\00ggVector3\0066\00192\0032\000\000\000", metadata !99, null, null, metadata !10, null, null, null} ; [ DW_TAG_structure_type ] [ggVector3] [line 66, size 192, align 32, offset 0] [def] [from ]
-!9 = metadata !{metadata !"0x29", metadata !"ggVector3.h", metadata !"/Volumes/Home/grosbaj/sources/llvm-externals/speccpu2000/benchspec/CINT2000/252.eon/src", metadata !5} ; [ DW_TAG_file_type ]
-!99 = metadata !{metadata !"ggVector3.h", metadata !"/Volumes/Home/grosbaj/sources/llvm-externals/speccpu2000/benchspec/CINT2000/252.eon/src"}
-!10 = metadata !{metadata !11, metadata !16, metadata !23, metadata !26, metadata !29, metadata !30, metadata !35, metadata !36, metadata !37, metadata !41, metadata !42, metadata !43, metadata !46, metadata !47, metadata !48, metadata !52, metadata !53, metadata !54, metadata !57, metadata !60, metadata !63, metadata !66, metadata !70, metadata !71, metadata !74, metadata !75, metadata !76, metadata !77, metadata !78, metadata !81, metadata !82, metadata !83, metadata !84, metadata !85, metadata !88, metadata !89, metadata !90}
-!11 = metadata !{metadata !"0xd\00e\00160\00192\0032\000\000", metadata !99, metadata !8, metadata !12} ; [ DW_TAG_member ]
-!12 = metadata !{metadata !"0x1\00\000\00192\0032\000\000", metadata !101, metadata !4, metadata !13, metadata !14, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 192, align 32, offset 0] [from double]
-!13 = metadata !{metadata !"0x24\00double\000\0064\0032\000\000\004", metadata !101, metadata !4} ; [ DW_TAG_base_type ]
-!14 = metadata !{metadata !15}
-!15 = metadata !{metadata !"0x21\000\003"} ; [ DW_TAG_subrange_type ]
-!16 = metadata !{metadata !"0x2e\00ggVector3\00ggVector3\00\0072\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !17, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!17 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !18, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!18 = metadata !{null, metadata !19, metadata !20}
-!19 = metadata !{metadata !"0xf\00\000\0032\0032\000\0064", metadata !101, metadata !4, metadata !8} ; [ DW_TAG_pointer_type ]
-!20 = metadata !{metadata !"0x16\00ggBoolean\00478\000\000\000\000", metadata !100, null, metadata !22} ; [ DW_TAG_typedef ]
-!21 = metadata !{metadata !"0x29", metadata !"math.h", metadata !"/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS4.2.Internal.sdk/usr/include/architecture/arm", metadata !5} ; [ DW_TAG_file_type ]
-!100 = metadata !{metadata !"math.h", metadata !"/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS4.2.Internal.sdk/usr/include/architecture/arm"}
-!22 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !101, metadata !4} ; [ DW_TAG_base_type ]
-!23 = metadata !{metadata !"0x2e\00ggVector3\00ggVector3\00\0073\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !24, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!24 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !25, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!25 = metadata !{null, metadata !19}
-!26 = metadata !{metadata !"0x2e\00ggVector3\00ggVector3\00\0074\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !27, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!27 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !28, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!28 = metadata !{null, metadata !19, metadata !13, metadata !13, metadata !13}
-!29 = metadata !{metadata !"0x2e\00Set\00Set\00_ZN9ggVector33SetEddd\0081\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !27, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!30 = metadata !{metadata !"0x2e\00x\00x\00_ZNK9ggVector31xEv\0082\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!31 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !32, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!32 = metadata !{metadata !13, metadata !33}
-!33 = metadata !{metadata !"0xf\00\000\0032\0032\000\0064", metadata !101, metadata !4, metadata !34} ; [ DW_TAG_pointer_type ]
-!34 = metadata !{metadata !"0x26\00\000\00192\0032\000\000", metadata !101, metadata !4, metadata !8} ; [ DW_TAG_const_type ]
-!35 = metadata !{metadata !"0x2e\00y\00y\00_ZNK9ggVector31yEv\0083\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!36 = metadata !{metadata !"0x2e\00z\00z\00_ZNK9ggVector31zEv\0084\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!37 = metadata !{metadata !"0x2e\00x\00x\00_ZN9ggVector31xEv\0085\000\001\000\006\000\000\000", metadata !9, metadata !8, metadata !38, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!38 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !39, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!39 = metadata !{metadata !40, metadata !19}
-!40 = metadata !{metadata !"0x10\00double\000\0032\0032\000\000", metadata !101, metadata !4, metadata !13} ; [ DW_TAG_reference_type ]
-!41 = metadata !{metadata !"0x2e\00y\00y\00_ZN9ggVector31yEv\0086\000\001\000\006\000\000\000", metadata !9, metadata !8, metadata !38, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!42 = metadata !{metadata !"0x2e\00z\00z\00_ZN9ggVector31zEv\0087\000\001\000\006\000\000\000", metadata !9, metadata !8, metadata !38, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!43 = metadata !{metadata !"0x2e\00SetX\00SetX\00_ZN9ggVector34SetXEd\0088\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !44, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!44 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !45, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!45 = metadata !{null, metadata !19, metadata !13}
-!46 = metadata !{metadata !"0x2e\00SetY\00SetY\00_ZN9ggVector34SetYEd\0089\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !44, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!47 = metadata !{metadata !"0x2e\00SetZ\00SetZ\00_ZN9ggVector34SetZEd\0090\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !44, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!48 = metadata !{metadata !"0x2e\00ggVector3\00ggVector3\00\0092\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !49, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!49 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !50, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!50 = metadata !{null, metadata !19, metadata !51}
-!51 = metadata !{metadata !"0x10\00\000\0032\0032\000\000", metadata !101, metadata !4, metadata !34} ; [ DW_TAG_reference_type ]
-!52 = metadata !{metadata !"0x2e\00tolerance\00tolerance\00_ZNK9ggVector39toleranceEv\00100\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!53 = metadata !{metadata !"0x2e\00tolerance\00tolerance\00_ZN9ggVector39toleranceEv\00101\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !38, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!54 = metadata !{metadata !"0x2e\00operator+\00operator+\00_ZNK9ggVector3psEv\00107\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !55, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!55 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !56, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!56 = metadata !{metadata !51, metadata !33}
-!57 = metadata !{metadata !"0x2e\00operator-\00operator-\00_ZNK9ggVector3ngEv\00108\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !58, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!58 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !59, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!59 = metadata !{metadata !8, metadata !33}
-!60 = metadata !{metadata !"0x2e\00operator[]\00operator[]\00_ZNK9ggVector3ixEi\00290\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !61, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!61 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !62, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!62 = metadata !{metadata !13, metadata !33, metadata !22}
-!63 = metadata !{metadata !"0x2e\00operator[]\00operator[]\00_ZN9ggVector3ixEi\00278\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !64, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!64 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !65, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!65 = metadata !{metadata !40, metadata !19, metadata !22}
-!66 = metadata !{metadata !"0x2e\00operator+=\00operator+=\00_ZN9ggVector3pLERKS_\00303\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !67, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!67 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !68, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!68 = metadata !{metadata !69, metadata !19, metadata !51}
-!69 = metadata !{metadata !"0x10\00ggVector3\000\0032\0032\000\000", metadata !101, metadata !4, metadata !8} ; [ DW_TAG_reference_type ]
-!70 = metadata !{metadata !"0x2e\00operator-=\00operator-=\00_ZN9ggVector3mIERKS_\00310\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !67, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!71 = metadata !{metadata !"0x2e\00operator*=\00operator*=\00_ZN9ggVector3mLEd\00317\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !72, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!72 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !73, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!73 = metadata !{metadata !69, metadata !19, metadata !13}
-!74 = metadata !{metadata !"0x2e\00operator/=\00operator/=\00_ZN9ggVector3dVEd\00324\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !72, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!75 = metadata !{metadata !"0x2e\00length\00length\00_ZNK9ggVector36lengthEv\00121\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!76 = metadata !{metadata !"0x2e\00squaredLength\00squaredLength\00_ZNK9ggVector313squaredLengthEv\00122\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!77 = metadata !{metadata !"0x2e\00MakeUnitVector\00MakeUnitVector\00_ZN9ggVector314MakeUnitVectorEv\00217\000\001\000\006\000\000\000", metadata !9, metadata !8, metadata !24, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!78 = metadata !{metadata !"0x2e\00Perturb\00Perturb\00_ZNK9ggVector37PerturbEdd\00126\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !79, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!79 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !80, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!80 = metadata !{metadata !8, metadata !33, metadata !13, metadata !13}
-!81 = metadata !{metadata !"0x2e\00maxComponent\00maxComponent\00_ZNK9ggVector312maxComponentEv\00128\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!82 = metadata !{metadata !"0x2e\00minComponent\00minComponent\00_ZNK9ggVector312minComponentEv\00129\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!83 = metadata !{metadata !"0x2e\00maxAbsComponent\00maxAbsComponent\00_ZNK9ggVector315maxAbsComponentEv\00131\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!84 = metadata !{metadata !"0x2e\00minAbsComponent\00minAbsComponent\00_ZNK9ggVector315minAbsComponentEv\00132\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!85 = metadata !{metadata !"0x2e\00indexOfMinComponent\00indexOfMinComponent\00_ZNK9ggVector319indexOfMinComponentEv\00133\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !86, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!86 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !101, metadata !4, null, metadata !87, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!87 = metadata !{metadata !22, metadata !33}
-!88 = metadata !{metadata !"0x2e\00indexOfMinAbsComponent\00indexOfMinAbsComponent\00_ZNK9ggVector322indexOfMinAbsComponentEv\00137\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !86, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!89 = metadata !{metadata !"0x2e\00indexOfMaxComponent\00indexOfMaxComponent\00_ZNK9ggVector319indexOfMaxComponentEv\00146\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !86, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!90 = metadata !{metadata !"0x2e\00indexOfMaxAbsComponent\00indexOfMaxAbsComponent\00_ZNK9ggVector322indexOfMaxAbsComponentEv\00150\000\000\000\006\000\000\000", metadata !9, metadata !8, metadata !86, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!91 = metadata !{metadata !"0x100\00vx\0046\000", metadata !1, metadata !4, metadata !13} ; [ DW_TAG_auto_variable ]
-!92 = metadata !{i32 48, i32 0, metadata !1, null}
-!93 = metadata !{i32 218, i32 0, metadata !94, metadata !96}
-!94 = metadata !{metadata !"0xb\00217\000\000", metadata !101, metadata !95} ; [ DW_TAG_lexical_block ]
-!95 = metadata !{metadata !"0xb\00217\000\000", metadata !101, metadata !77} ; [ DW_TAG_lexical_block ]
-!96 = metadata !{i32 51, i32 0, metadata !1, null}
-!97 = metadata !{i32 227, i32 0, metadata !94, metadata !96}
-!98 = metadata !{i32 52, i32 0, metadata !1, null}
-!101 = metadata !{metadata !"ggEdgeDiscrepancy.cc", metadata !"/Volumes/Home/grosbaj/sources/llvm-externals/speccpu2000/benchspec/CINT2000/252.eon/src"}
-!102 = metadata !{i32 0}
-!103 = metadata !{metadata !3, metadata !77}
-!104 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !MDLocation(line: 46, scope: !1)
+!1 = !{!"0xb\0044\000\000", !101, !2} ; [ DW_TAG_lexical_block ]
+!2 = !{!"0xb\0044\000\000", !101, !3} ; [ DW_TAG_lexical_block ]
+!3 = !{!"0x2e\00getClosestDiagonal3\00getClosestDiagonal3\00_Z19getClosestDiagonal3ii\0044\000\001\000\006\000\000\000", !101, null, !6, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!4 = !{!"0x29", !101} ; [ DW_TAG_file_type ]
+!5 = !{!"0x11\004\004.2.1 (Based on Apple Inc. build 5658) (LLVM build 00)\001\00\000\00\000", !101, !102, !102, !103, null, null} ; [ DW_TAG_compile_unit ]
+!6 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = !{!8, !22, !22}
+!8 = !{!"0x13\00ggVector3\0066\00192\0032\000\000\000", !99, null, null, !10, null, null, null} ; [ DW_TAG_structure_type ] [ggVector3] [line 66, size 192, align 32, offset 0] [def] [from ]
+!9 = !{!"0x29", !"ggVector3.h", !"/Volumes/Home/grosbaj/sources/llvm-externals/speccpu2000/benchspec/CINT2000/252.eon/src", !5} ; [ DW_TAG_file_type ]
+!99 = !{!"ggVector3.h", !"/Volumes/Home/grosbaj/sources/llvm-externals/speccpu2000/benchspec/CINT2000/252.eon/src"}
+!10 = !{!11, !16, !23, !26, !29, !30, !35, !36, !37, !41, !42, !43, !46, !47, !48, !52, !53, !54, !57, !60, !63, !66, !70, !71, !74, !75, !76, !77, !78, !81, !82, !83, !84, !85, !88, !89, !90}
+!11 = !{!"0xd\00e\00160\00192\0032\000\000", !99, !8, !12} ; [ DW_TAG_member ]
+!12 = !{!"0x1\00\000\00192\0032\000\000", !101, !4, !13, !14, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 192, align 32, offset 0] [from double]
+!13 = !{!"0x24\00double\000\0064\0032\000\000\004", !101, !4} ; [ DW_TAG_base_type ]
+!14 = !{!15}
+!15 = !{!"0x21\000\003"} ; [ DW_TAG_subrange_type ]
+!16 = !{!"0x2e\00ggVector3\00ggVector3\00\0072\000\000\000\006\000\000\000", !9, !8, !17, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!17 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !18, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!18 = !{null, !19, !20}
+!19 = !{!"0xf\00\000\0032\0032\000\0064", !101, !4, !8} ; [ DW_TAG_pointer_type ]
+!20 = !{!"0x16\00ggBoolean\00478\000\000\000\000", !100, null, !22} ; [ DW_TAG_typedef ]
+!21 = !{!"0x29", !"math.h", !"/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS4.2.Internal.sdk/usr/include/architecture/arm", !5} ; [ DW_TAG_file_type ]
+!100 = !{!"math.h", !"/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS4.2.Internal.sdk/usr/include/architecture/arm"}
+!22 = !{!"0x24\00int\000\0032\0032\000\000\005", !101, !4} ; [ DW_TAG_base_type ]
+!23 = !{!"0x2e\00ggVector3\00ggVector3\00\0073\000\000\000\006\000\000\000", !9, !8, !24, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!24 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !25, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!25 = !{null, !19}
+!26 = !{!"0x2e\00ggVector3\00ggVector3\00\0074\000\000\000\006\000\000\000", !9, !8, !27, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!27 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !28, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!28 = !{null, !19, !13, !13, !13}
+!29 = !{!"0x2e\00Set\00Set\00_ZN9ggVector33SetEddd\0081\000\000\000\006\000\000\000", !9, !8, !27, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!30 = !{!"0x2e\00x\00x\00_ZNK9ggVector31xEv\0082\000\000\000\006\000\000\000", !9, !8, !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!31 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !32, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!32 = !{!13, !33}
+!33 = !{!"0xf\00\000\0032\0032\000\0064", !101, !4, !34} ; [ DW_TAG_pointer_type ]
+!34 = !{!"0x26\00\000\00192\0032\000\000", !101, !4, !8} ; [ DW_TAG_const_type ]
+!35 = !{!"0x2e\00y\00y\00_ZNK9ggVector31yEv\0083\000\000\000\006\000\000\000", !9, !8, !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!36 = !{!"0x2e\00z\00z\00_ZNK9ggVector31zEv\0084\000\000\000\006\000\000\000", !9, !8, !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!37 = !{!"0x2e\00x\00x\00_ZN9ggVector31xEv\0085\000\001\000\006\000\000\000", !9, !8, !38, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!38 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !39, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!39 = !{!40, !19}
+!40 = !{!"0x10\00double\000\0032\0032\000\000", !101, !4, !13} ; [ DW_TAG_reference_type ]
+!41 = !{!"0x2e\00y\00y\00_ZN9ggVector31yEv\0086\000\001\000\006\000\000\000", !9, !8, !38, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!42 = !{!"0x2e\00z\00z\00_ZN9ggVector31zEv\0087\000\001\000\006\000\000\000", !9, !8, !38, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!43 = !{!"0x2e\00SetX\00SetX\00_ZN9ggVector34SetXEd\0088\000\000\000\006\000\000\000", !9, !8, !44, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!44 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !45, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!45 = !{null, !19, !13}
+!46 = !{!"0x2e\00SetY\00SetY\00_ZN9ggVector34SetYEd\0089\000\000\000\006\000\000\000", !9, !8, !44, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!47 = !{!"0x2e\00SetZ\00SetZ\00_ZN9ggVector34SetZEd\0090\000\000\000\006\000\000\000", !9, !8, !44, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!48 = !{!"0x2e\00ggVector3\00ggVector3\00\0092\000\000\000\006\000\000\000", !9, !8, !49, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!49 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !50, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!50 = !{null, !19, !51}
+!51 = !{!"0x10\00\000\0032\0032\000\000", !101, !4, !34} ; [ DW_TAG_reference_type ]
+!52 = !{!"0x2e\00tolerance\00tolerance\00_ZNK9ggVector39toleranceEv\00100\000\000\000\006\000\000\000", !9, !8, !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!53 = !{!"0x2e\00tolerance\00tolerance\00_ZN9ggVector39toleranceEv\00101\000\000\000\006\000\000\000", !9, !8, !38, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!54 = !{!"0x2e\00operator+\00operator+\00_ZNK9ggVector3psEv\00107\000\000\000\006\000\000\000", !9, !8, !55, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!55 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !56, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!56 = !{!51, !33}
+!57 = !{!"0x2e\00operator-\00operator-\00_ZNK9ggVector3ngEv\00108\000\000\000\006\000\000\000", !9, !8, !58, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!58 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !59, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!59 = !{!8, !33}
+!60 = !{!"0x2e\00operator[]\00operator[]\00_ZNK9ggVector3ixEi\00290\000\000\000\006\000\000\000", !9, !8, !61, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!61 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !62, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!62 = !{!13, !33, !22}
+!63 = !{!"0x2e\00operator[]\00operator[]\00_ZN9ggVector3ixEi\00278\000\000\000\006\000\000\000", !9, !8, !64, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!64 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !65, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!65 = !{!40, !19, !22}
+!66 = !{!"0x2e\00operator+=\00operator+=\00_ZN9ggVector3pLERKS_\00303\000\000\000\006\000\000\000", !9, !8, !67, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!67 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !68, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!68 = !{!69, !19, !51}
+!69 = !{!"0x10\00ggVector3\000\0032\0032\000\000", !101, !4, !8} ; [ DW_TAG_reference_type ]
+!70 = !{!"0x2e\00operator-=\00operator-=\00_ZN9ggVector3mIERKS_\00310\000\000\000\006\000\000\000", !9, !8, !67, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!71 = !{!"0x2e\00operator*=\00operator*=\00_ZN9ggVector3mLEd\00317\000\000\000\006\000\000\000", !9, !8, !72, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!72 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !73, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!73 = !{!69, !19, !13}
+!74 = !{!"0x2e\00operator/=\00operator/=\00_ZN9ggVector3dVEd\00324\000\000\000\006\000\000\000", !9, !8, !72, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!75 = !{!"0x2e\00length\00length\00_ZNK9ggVector36lengthEv\00121\000\000\000\006\000\000\000", !9, !8, !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!76 = !{!"0x2e\00squaredLength\00squaredLength\00_ZNK9ggVector313squaredLengthEv\00122\000\000\000\006\000\000\000", !9, !8, !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!77 = !{!"0x2e\00MakeUnitVector\00MakeUnitVector\00_ZN9ggVector314MakeUnitVectorEv\00217\000\001\000\006\000\000\000", !9, !8, !24, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!78 = !{!"0x2e\00Perturb\00Perturb\00_ZNK9ggVector37PerturbEdd\00126\000\000\000\006\000\000\000", !9, !8, !79, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!79 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !80, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!80 = !{!8, !33, !13, !13}
+!81 = !{!"0x2e\00maxComponent\00maxComponent\00_ZNK9ggVector312maxComponentEv\00128\000\000\000\006\000\000\000", !9, !8, !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!82 = !{!"0x2e\00minComponent\00minComponent\00_ZNK9ggVector312minComponentEv\00129\000\000\000\006\000\000\000", !9, !8, !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!83 = !{!"0x2e\00maxAbsComponent\00maxAbsComponent\00_ZNK9ggVector315maxAbsComponentEv\00131\000\000\000\006\000\000\000", !9, !8, !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!84 = !{!"0x2e\00minAbsComponent\00minAbsComponent\00_ZNK9ggVector315minAbsComponentEv\00132\000\000\000\006\000\000\000", !9, !8, !31, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!85 = !{!"0x2e\00indexOfMinComponent\00indexOfMinComponent\00_ZNK9ggVector319indexOfMinComponentEv\00133\000\000\000\006\000\000\000", !9, !8, !86, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!86 = !{!"0x15\00\000\000\000\000\000\000", !101, !4, null, !87, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!87 = !{!22, !33}
+!88 = !{!"0x2e\00indexOfMinAbsComponent\00indexOfMinAbsComponent\00_ZNK9ggVector322indexOfMinAbsComponentEv\00137\000\000\000\006\000\000\000", !9, !8, !86, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!89 = !{!"0x2e\00indexOfMaxComponent\00indexOfMaxComponent\00_ZNK9ggVector319indexOfMaxComponentEv\00146\000\000\000\006\000\000\000", !9, !8, !86, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!90 = !{!"0x2e\00indexOfMaxAbsComponent\00indexOfMaxAbsComponent\00_ZNK9ggVector322indexOfMaxAbsComponentEv\00150\000\000\000\006\000\000\000", !9, !8, !86, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!91 = !{!"0x100\00vx\0046\000", !1, !4, !13} ; [ DW_TAG_auto_variable ]
+!92 = !MDLocation(line: 48, scope: !1)
+!93 = !MDLocation(line: 218, scope: !94, inlinedAt: !96)
+!94 = !{!"0xb\00217\000\000", !101, !95} ; [ DW_TAG_lexical_block ]
+!95 = !{!"0xb\00217\000\000", !101, !77} ; [ DW_TAG_lexical_block ]
+!96 = !MDLocation(line: 51, scope: !1)
+!97 = !MDLocation(line: 227, scope: !94, inlinedAt: !96)
+!98 = !MDLocation(line: 52, scope: !1)
+!101 = !{!"ggEdgeDiscrepancy.cc", !"/Volumes/Home/grosbaj/sources/llvm-externals/speccpu2000/benchspec/CINT2000/252.eon/src"}
+!102 = !{i32 0}
+!103 = !{!3, !77}
+!104 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/Thumb/fastcc.ll b/test/CodeGen/Thumb/fastcc.ll
index 98ff684..1a01246 100644
--- a/test/CodeGen/Thumb/fastcc.ll
+++ b/test/CodeGen/Thumb/fastcc.ll
@@ -33,4 +33,4 @@ attributes #0 = { optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"clang version 3.5.0 "}
+!0 = !{!"clang version 3.5.0 "}
diff --git a/test/CodeGen/Thumb/iabs.ll b/test/CodeGen/Thumb/iabs.ll
index 76224bc..ecd4a6b 100644
--- a/test/CodeGen/Thumb/iabs.ll
+++ b/test/CodeGen/Thumb/iabs.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -mtriple=thumb-unknown-unknown -filetype=obj -o %t.o
-; RUN: llvm-objdump -disassemble -arch=thumb %t.o | FileCheck %s
+; RUN: llvm-objdump -disassemble -arch-name=thumb %t.o | FileCheck %s
define i32 @test(i32 %a) {
%tmp1neg = sub i32 0, %a
diff --git a/test/CodeGen/Thumb/stack-access.ll b/test/CodeGen/Thumb/stack-access.ll
new file mode 100644
index 0000000..bcffda2
--- /dev/null
+++ b/test/CodeGen/Thumb/stack-access.ll
@@ -0,0 +1,74 @@
+; RUN: llc -mtriple=thumb-eabi < %s -o - | FileCheck %s
+
+; Check that stack addresses are generated using a single ADD
+define void @test1(i8** %p) {
+ %x = alloca i8, align 1
+ %y = alloca i8, align 1
+ %z = alloca i8, align 1
+; CHECK: add r1, sp, #8
+; CHECK: str r1, [r0]
+ store i8* %x, i8** %p, align 4
+; CHECK: add r1, sp, #4
+; CHECK: str r1, [r0]
+ store i8* %y, i8** %p, align 4
+; CHECK: mov r1, sp
+; CHECK: str r1, [r0]
+ store i8* %z, i8** %p, align 4
+ ret void
+}
+
+; Stack offsets larger than 1020 still need two ADDs
+define void @test2([1024 x i8]** %p) {
+ %arr1 = alloca [1024 x i8], align 1
+ %arr2 = alloca [1024 x i8], align 1
+; CHECK: add r1, sp, #1020
+; CHECK: adds r1, #4
+; CHECK: str r1, [r0]
+ store [1024 x i8]* %arr1, [1024 x i8]** %p, align 4
+; CHECK: mov r1, sp
+; CHECK: str r1, [r0]
+ store [1024 x i8]* %arr2, [1024 x i8]** %p, align 4
+ ret void
+}
+
+; If possible stack-based lrdb/ldrh are widened to use SP-based addressing
+define i32 @test3() #0 {
+ %x = alloca i8, align 1
+ %y = alloca i8, align 1
+; CHECK: ldr r0, [sp]
+ %1 = load i8* %x, align 1
+; CHECK: ldr r1, [sp, #4]
+ %2 = load i8* %y, align 1
+ %3 = add nsw i8 %1, %2
+ %4 = zext i8 %3 to i32
+ ret i32 %4
+}
+
+define i32 @test4() #0 {
+ %x = alloca i16, align 2
+ %y = alloca i16, align 2
+; CHECK: ldr r0, [sp]
+ %1 = load i16* %x, align 2
+; CHECK: ldr r1, [sp, #4]
+ %2 = load i16* %y, align 2
+ %3 = add nsw i16 %1, %2
+ %4 = zext i16 %3 to i32
+ ret i32 %4
+}
+
+; Don't widen if the value needs to be zero-extended
+define zeroext i8 @test5() {
+ %x = alloca i8, align 1
+; CHECK: mov r0, sp
+; CHECK: ldrb r0, [r0]
+ %1 = load i8* %x, align 1
+ ret i8 %1
+}
+
+define zeroext i16 @test6() {
+ %x = alloca i16, align 2
+; CHECK: mov r0, sp
+; CHECK: ldrh r0, [r0]
+ %1 = load i16* %x, align 2
+ ret i16 %1
+}
diff --git a/test/CodeGen/Thumb/stm-merge.ll b/test/CodeGen/Thumb/stm-merge.ll
index 76e71f4..d4b4cd2 100644
--- a/test/CodeGen/Thumb/stm-merge.ll
+++ b/test/CodeGen/Thumb/stm-merge.ll
@@ -7,16 +7,17 @@ target triple = "thumbv6m--linux-gnueabi"
@e = internal unnamed_addr global i32* null, align 4
; Function Attrs: nounwind optsize
-define void @fn1() #0 {
+define void @fn1(i32 %x, i32 %y, i32 %z) #0 {
entry:
; CHECK-LABEL: fn1:
; CHECK: stm r[[BASE:[0-9]]]!, {{.*}}
; CHECK-NOT: {{.*}} r[[BASE]]
-; CHECK: ldr r[[BASE]], {{.*}}
%g = alloca i32, align 4
%h = alloca i32, align 4
- store i32 1, i32* %g, align 4
- store i32 0, i32* %h, align 4
+ %i = alloca i32, align 4
+ store i32 %x, i32* %i, align 4
+ store i32 %y, i32* %h, align 4
+ store i32 %z, i32* %g, align 4
%.pr = load i32* @d, align 4
%cmp11 = icmp slt i32 %.pr, 1
br i1 %cmp11, label %for.inc.lr.ph, label %for.body5
diff --git a/test/CodeGen/Thumb/vargs.ll b/test/CodeGen/Thumb/vargs.ll
index 4078b01..71e8afa 100644
--- a/test/CodeGen/Thumb/vargs.ll
+++ b/test/CodeGen/Thumb/vargs.ll
@@ -6,6 +6,10 @@
define void @f(i32 %a, ...) {
entry:
+; Check that space is reserved above the pushed lr for variadic argument
+; registers to be stored in.
+; CHECK: sub sp, #[[IMM:[0-9]+]]
+; CHECK: push
%va = alloca i8*, align 4 ; <i8**> [#uses=4]
%va.upgrd.1 = bitcast i8** %va to i8* ; <i8*> [#uses=1]
call void @llvm.va_start( i8* %va.upgrd.1 )
@@ -27,6 +31,13 @@ bb7: ; preds = %bb
%va.upgrd.4 = bitcast i8** %va to i8* ; <i8*> [#uses=1]
call void @llvm.va_end( i8* %va.upgrd.4 )
ret void
+
+; The return sequence should pop the lr to r3, recover the stack space used to
+; store variadic argument registers, then return via r3. Possibly there is a pop
+; before this, but only if the function happened to use callee-saved registers.
+; CHECK: pop {r3}
+; CHECK: add sp, #[[IMM]]
+; CHECK: bx r3
}
declare void @llvm.va_start(i8*)
@@ -34,8 +45,3 @@ declare void @llvm.va_start(i8*)
declare i32 @printf(i8*, ...)
declare void @llvm.va_end(i8*)
-
-; CHECK: pop
-; CHECK: pop
-; CHECK-NOT: pop
-
diff --git a/test/CodeGen/Thumb2/aligned-spill.ll b/test/CodeGen/Thumb2/aligned-spill.ll
index 3a2803f..4ef294b 100644
--- a/test/CodeGen/Thumb2/aligned-spill.ll
+++ b/test/CodeGen/Thumb2/aligned-spill.ll
@@ -9,7 +9,7 @@ target triple = "thumbv7-apple-ios"
;
; The caller-saved r4 is used as a scratch register for stack realignment.
; CHECK: push {r4, r7, lr}
-; CHECK: bic r4, r4, #7
+; CHECK: bfc r4, #0, #3
; CHECK: mov sp, r4
define void @f(double* nocapture %p) nounwind ssp {
entry:
@@ -23,7 +23,7 @@ entry:
; NEON: f
; NEON: push {r4, r7, lr}
; NEON: sub.w r4, sp, #64
-; NEON: bic r4, r4, #15
+; NEON: bfc r4, #0, #4
; Stack pointer must be updated before the spills.
; NEON: mov sp, r4
; NEON: vst1.64 {d8, d9, d10, d11}, [r4:128]!
@@ -54,7 +54,7 @@ entry:
; NEON: f7
; NEON: push {r4, r7, lr}
; NEON: sub.w r4, sp, #56
-; NEON: bic r4, r4, #15
+; NEON: bfc r4, #0, #4
; Stack pointer must be updated before the spills.
; NEON: mov sp, r4
; NEON: vst1.64 {d8, d9, d10, d11}, [r4:128]!
@@ -81,7 +81,7 @@ entry:
; NEON: push {r4, r7, lr}
; NEON: vpush {d12, d13, d14, d15}
; NEON: sub.w r4, sp, #24
-; NEON: bic r4, r4, #15
+; NEON: bfc r4, #0, #4
; Stack pointer must be updated before the spills.
; NEON: mov sp, r4
; NEON: vst1.64 {d8, d9}, [r4:128]
diff --git a/test/CodeGen/Thumb2/constant-islands-jump-table.ll b/test/CodeGen/Thumb2/constant-islands-jump-table.ll
new file mode 100644
index 0000000..0dd7092
--- /dev/null
+++ b/test/CodeGen/Thumb2/constant-islands-jump-table.ll
@@ -0,0 +1,47 @@
+; RUN: llc < %s -mtriple=thumbv7-linux-gnueabihf -O1 %s -o - | FileCheck %s
+
+; CHECK-LABEL: test_jump_table:
+; CHECK: b .LBB
+; CHECK-NOT: tbh
+
+define i32 @test_jump_table(i32 %x, float %in) {
+
+h1:
+
+ %b0 = fadd float %in, 1234.5
+ %b1 = fptoui float %b0 to i32
+
+ switch i32 %x, label %h2 [
+ i32 0, label %h3
+ i32 2, label %h4
+ i32 4, label %h5
+ i32 6, label %h6
+ ]
+
+h2:
+ %a0 = add i32 %x, 5
+ br label %h3
+
+h3:
+ %d2 = phi i32 [%b1, %h1], [%a0, %h2]
+ %d3 = add i32 %d2, 3
+ br label %h4
+
+h4:
+ %c2 = phi i32 [%b1, %h1], [%d3, %h3]
+ %c3 = add i32 %c2, 5
+ br label %h5
+
+h5:
+ %a2 = phi i32 [%b1, %h1], [%c3, %h4]
+ %a3 = add i32 %a2, 6
+ br label %h6
+
+h6:
+ %y = phi i32 [0, %h1], [%a3, %h5]
+ call i32 @llvm.arm.space(i32 2000, i32 undef)
+ ret i32 %y
+
+}
+
+declare i32 @llvm.arm.space(i32, i32)
diff --git a/test/CodeGen/Thumb2/constant-islands-new-island-padding.ll b/test/CodeGen/Thumb2/constant-islands-new-island-padding.ll
new file mode 100644
index 0000000..991b043
--- /dev/null
+++ b/test/CodeGen/Thumb2/constant-islands-new-island-padding.ll
@@ -0,0 +1,42 @@
+; RUN: llc < %s -mtriple=thumbv7-apple-ios %s -o - | FileCheck %s
+
+@g0 = common global i32 0, align 4
+@d0 = common global double 0.000000e+00, align 8
+@f0 = common global float 0.000000e+00, align 4
+@g1 = common global i32 0, align 4
+
+declare i32 @llvm.arm.space(i32, i32)
+
+; Check that the constant island pass moves the float constant pool entry inside
+; the function.
+
+; CHECK: .long 1067320814 @ float 1.23455596
+; CHECK: {{.*}} %do.end
+
+define i32 @testpadding(i32 %a) {
+entry:
+ %0 = load i32* @g0, align 4
+ %add = add nsw i32 %0, 12
+ store i32 %add, i32* @g0, align 4
+ %1 = load double* @d0, align 8
+ %add1 = fadd double %1, 0x3FF3C0B8ED46EACB
+ store double %add1, double* @d0, align 8
+ %tmpcall11 = call i32 @llvm.arm.space(i32 28, i32 undef)
+ call void @foo20(i32 191)
+ %2 = load float* @f0, align 4
+ %add2 = fadd float %2, 0x3FF3C0BDC0000000
+ store float %add2, float* @f0, align 4
+ br label %do.body
+
+do.body: ; preds = %do.body, %entry
+ tail call void @foo20(i32 19)
+ %3 = load i32* @g1, align 4
+ %tobool = icmp eq i32 %3, 0
+ br i1 %tobool, label %do.end, label %do.body
+
+do.end: ; preds = %do.body
+ %tmpcall111 = call i32 @llvm.arm.space(i32 954, i32 undef)
+ ret i32 10
+}
+
+declare void @foo20(i32)
diff --git a/test/CodeGen/Thumb2/ifcvt-neon.ll b/test/CodeGen/Thumb2/ifcvt-neon.ll
index 501b0b6..00f3399 100644
--- a/test/CodeGen/Thumb2/ifcvt-neon.ll
+++ b/test/CodeGen/Thumb2/ifcvt-neon.ll
@@ -12,9 +12,9 @@ entry:
br i1 %0, label %bb, label %bb1
bb: ; preds = %entry
-; CHECK: ite lt
-; CHECK: vsublt.f32
-; CHECK-NEXT: vaddge.f32
+; CHECK: vsub.f32
+; CHECK-NEXT: vadd.f32
+; CHECK: it gt
%3 = fadd float %1, %2 ; <float> [#uses=1]
br label %bb2
diff --git a/test/CodeGen/Thumb2/thumb2-cmn.ll b/test/CodeGen/Thumb2/thumb2-cmn.ll
index efa1505..0f361d7 100644
--- a/test/CodeGen/Thumb2/thumb2-cmn.ll
+++ b/test/CodeGen/Thumb2/thumb2-cmn.ll
@@ -79,7 +79,7 @@ define void @f9(i32 %a, i32 %b) nounwind optsize {
ret void
}
-!0 = metadata !{i32 81}
+!0 = !{i32 81}
; CHECK-LABEL: f9:
; CHECK: cmn.w r0, r1
diff --git a/test/CodeGen/Thumb2/thumb2-spill-q.ll b/test/CodeGen/Thumb2/thumb2-spill-q.ll
index 94f4725..d1deb46 100644
--- a/test/CodeGen/Thumb2/thumb2-spill-q.ll
+++ b/test/CodeGen/Thumb2/thumb2-spill-q.ll
@@ -11,7 +11,7 @@ declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly
define void @aaa(%quuz* %this, i8* %block) {
; CHECK-LABEL: aaa:
-; CHECK: bic r4, r4, #15
+; CHECK: bfc r4, #0, #4
; CHECK: vst1.64 {{.*}}[{{.*}}:128]
; CHECK: vld1.64 {{.*}}[{{.*}}:128]
entry:
diff --git a/test/CodeGen/X86/2006-05-22-FPSetEQ.ll b/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
index 6c5a4fb..3be77f5 100644
--- a/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
+++ b/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
@@ -1,7 +1,10 @@
-; RUN: llc < %s -march=x86 -mattr=-sse | grep setnp
-; RUN: llc < %s -march=x86 -mattr=-sse -enable-unsafe-fp-math -enable-no-nans-fp-math | \
-; RUN: not grep setnp
+; RUN: llc < %s -march=x86 -mattr=-sse | FileCheck %s -check-prefix=WITHNANS
+; RUN: llc < %s -march=x86 -mattr=-sse -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s -check-prefix=NONANS
+; WITHNANS-LABEL: test:
+; WITHNANS: setnp
+; NONANS-LABEL: test:
+; NONANS-NOT: setnp
define i32 @test(float %f) {
%tmp = fcmp oeq float %f, 0.000000e+00 ; <i1> [#uses=1]
%tmp.upgrd.1 = zext i1 %tmp to i32 ; <i32> [#uses=1]
diff --git a/test/CodeGen/X86/2006-10-07-ScalarSSEMiscompile.ll b/test/CodeGen/X86/2006-10-07-ScalarSSEMiscompile.ll
deleted file mode 100644
index d09d061..0000000
--- a/test/CodeGen/X86/2006-10-07-ScalarSSEMiscompile.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=sse | grep movaps
-; Test that the load is NOT folded into the intrinsic, which would zero the top
-; elts of the loaded vector.
-
-target datalayout = "e-p:32:32"
-target triple = "i686-apple-darwin8.7.2"
-
-define <4 x float> @test(<4 x float> %A, <4 x float>* %B) nounwind {
- %BV = load <4 x float>* %B ; <<4 x float>> [#uses=1]
- %tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %A, <4 x float> %BV ) ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp28
-}
-
-declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
-
diff --git a/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll b/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll
deleted file mode 100644
index 11c0bf9..0000000
--- a/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll
+++ /dev/null
@@ -1,64 +0,0 @@
-; RUN: llc < %s -o - -march=x86 -mattr=+mmx | FileCheck %s
-; There are no MMX instructions here. We use add+adcl for the adds.
-
-define <1 x i64> @unsigned_add3(<1 x i64>* %a, <1 x i64>* %b, i32 %count) nounwind {
-entry:
- %tmp2942 = icmp eq i32 %count, 0 ; <i1> [#uses=1]
- br i1 %tmp2942, label %bb31, label %bb26
-
-bb26: ; preds = %bb26, %entry
-
-; CHECK: addl
-; CHECK: adcl
-
- %i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ] ; <i32> [#uses=3]
- %sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ] ; <<1 x i64>> [#uses=1]
- %tmp13 = getelementptr <1 x i64>* %b, i32 %i.037.0 ; <<1 x i64>*> [#uses=1]
- %tmp14 = load <1 x i64>* %tmp13 ; <<1 x i64>> [#uses=1]
- %tmp18 = getelementptr <1 x i64>* %a, i32 %i.037.0 ; <<1 x i64>*> [#uses=1]
- %tmp19 = load <1 x i64>* %tmp18 ; <<1 x i64>> [#uses=1]
- %tmp21 = add <1 x i64> %tmp19, %tmp14 ; <<1 x i64>> [#uses=1]
- %tmp22 = add <1 x i64> %tmp21, %sum.035.0 ; <<1 x i64>> [#uses=2]
- %tmp25 = add i32 %i.037.0, 1 ; <i32> [#uses=2]
- %tmp29 = icmp ult i32 %tmp25, %count ; <i1> [#uses=1]
- br i1 %tmp29, label %bb26, label %bb31
-
-bb31: ; preds = %bb26, %entry
- %sum.035.1 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ] ; <<1 x i64>> [#uses=1]
- ret <1 x i64> %sum.035.1
-}
-
-
-; This is the original test converted to use MMX intrinsics.
-
-define <1 x i64> @unsigned_add3a(x86_mmx* %a, x86_mmx* %b, i32 %count) nounwind {
-entry:
- %tmp2943 = bitcast <1 x i64><i64 0> to x86_mmx
- %tmp2942 = icmp eq i32 %count, 0 ; <i1> [#uses=1]
- br i1 %tmp2942, label %bb31, label %bb26
-
-bb26: ; preds = %bb26, %entry
-
-; CHECK: movq ({{.*}},8), %mm
-; CHECK: paddq ({{.*}},8), %mm
-; CHECK: paddq %mm{{[0-7]}}, %mm
-
- %i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ] ; <i32> [#uses=3]
- %sum.035.0 = phi x86_mmx [ %tmp2943, %entry ], [ %tmp22, %bb26 ] ; <x86_mmx> [#uses=1]
- %tmp13 = getelementptr x86_mmx* %b, i32 %i.037.0 ; <x86_mmx*> [#uses=1]
- %tmp14 = load x86_mmx* %tmp13 ; <x86_mmx> [#uses=1]
- %tmp18 = getelementptr x86_mmx* %a, i32 %i.037.0 ; <x86_mmx*> [#uses=1]
- %tmp19 = load x86_mmx* %tmp18 ; <x86_mmx> [#uses=1]
- %tmp21 = call x86_mmx @llvm.x86.mmx.padd.q (x86_mmx %tmp19, x86_mmx %tmp14) ; <x86_mmx> [#uses=1]
- %tmp22 = call x86_mmx @llvm.x86.mmx.padd.q (x86_mmx %tmp21, x86_mmx %sum.035.0) ; <x86_mmx> [#uses=2]
- %tmp25 = add i32 %i.037.0, 1 ; <i32> [#uses=2]
- %tmp29 = icmp ult i32 %tmp25, %count ; <i1> [#uses=1]
- br i1 %tmp29, label %bb26, label %bb31
-
-bb31: ; preds = %bb26, %entry
- %sum.035.1 = phi x86_mmx [ %tmp2943, %entry ], [ %tmp22, %bb26 ] ; <x86_mmx> [#uses=1]
- %t = bitcast x86_mmx %sum.035.1 to <1 x i64>
- ret <1 x i64> %t
-}
-
-declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
diff --git a/test/CodeGen/X86/2007-06-15-IntToMMX.ll b/test/CodeGen/X86/2007-06-15-IntToMMX.ll
deleted file mode 100644
index 5612d9e..0000000
--- a/test/CodeGen/X86/2007-06-15-IntToMMX.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | FileCheck %s
-
-; CHECK: paddusw
-
-@R = external global x86_mmx ; <x86_mmx*> [#uses=1]
-
-define void @foo(<1 x i64> %A, <1 x i64> %B) {
-entry:
- %tmp2 = bitcast <1 x i64> %A to x86_mmx
- %tmp3 = bitcast <1 x i64> %B to x86_mmx
- %tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp2, x86_mmx %tmp3 ) ; <x86_mmx> [#uses=1]
- store x86_mmx %tmp7, x86_mmx* @R
- tail call void @llvm.x86.mmx.emms( )
- ret void
-}
-
-declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
-
-declare void @llvm.x86.mmx.emms()
diff --git a/test/CodeGen/X86/2008-10-06-MMXISelBug.ll b/test/CodeGen/X86/2008-10-06-MMXISelBug.ll
deleted file mode 100644
index 7f7b1a4..0000000
--- a/test/CodeGen/X86/2008-10-06-MMXISelBug.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2
-; PR2850
-
-@tmp_V2i = common global <2 x i32> zeroinitializer ; <<2 x i32>*> [#uses=2]
-
-define void @f0() nounwind {
-entry:
- %0 = load <2 x i32>* @tmp_V2i, align 8 ; <<2 x i32>> [#uses=1]
- %1 = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer ; <<2 x i32>> [#uses=1]
- store <2 x i32> %1, <2 x i32>* @tmp_V2i, align 8
- ret void
-}
diff --git a/test/CodeGen/X86/2009-01-25-NoSSE.ll b/test/CodeGen/X86/2009-01-25-NoSSE.ll
index 8406c4a..c655f2c 100644
--- a/test/CodeGen/X86/2009-01-25-NoSSE.ll
+++ b/test/CodeGen/X86/2009-01-25-NoSSE.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86-64 -mattr=-sse,-sse2 | not grep xmm
+; RUN: llc < %s -march=x86-64 -mattr=-sse,-sse2 | FileCheck %s
; PR3402
target datalayout =
"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
@@ -6,6 +6,8 @@ target triple = "x86_64-unknown-linux-gnu"
%struct.ktermios = type { i32, i32, i32, i32, i8, [19 x i8], i32, i32 }
+; CHECK-NOT: xmm
+; CHECK-NOT: ymm
define void @foo() nounwind {
entry:
%termios = alloca %struct.ktermios, align 8
diff --git a/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll b/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll
index 207d122..e6202f9 100644
--- a/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll
+++ b/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll
@@ -1,9 +1,19 @@
; RUN: llc < %s
-; RUN: llc < %s -march=x86-64
+; RUN: llc < %s -march=x86-64 -verify-machineinstrs | FileCheck %s
; PR3538
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin9"
define signext i8 @foo(i8* %s1) nounwind ssp {
+
+; Make sure we generate:
+; movq -40(%rbp), %rsp
+; Instead of:
+; movq -40(%rbp), %rax
+; movq %rax, %rsp
+
+; CHECK-LABEL: @foo
+; CHECK: movq -40(%rbp), %rsp
+
entry:
%s1_addr = alloca i8* ; <i8**> [#uses=2]
%retval = alloca i32 ; <i32*> [#uses=2]
@@ -14,9 +24,9 @@ entry:
%2 = alloca i64 ; <i64*> [#uses=1]
%3 = alloca i64 ; <i64*> [#uses=6]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- call void @llvm.dbg.declare(metadata !{i8** %s1_addr}, metadata !0, metadata !{metadata !"0x102"}), !dbg !7
+ call void @llvm.dbg.declare(metadata i8** %s1_addr, metadata !0, metadata !{!"0x102"}), !dbg !7
store i8* %s1, i8** %s1_addr
- call void @llvm.dbg.declare(metadata !{[0 x i8]** %str.0}, metadata !8, metadata !{metadata !"0x102"}), !dbg !7
+ call void @llvm.dbg.declare(metadata [0 x i8]** %str.0, metadata !8, metadata !{!"0x102"}), !dbg !7
%4 = call i8* @llvm.stacksave(), !dbg !7 ; <i8*> [#uses=1]
store i8* %4, i8** %saved_stack.1, align 8, !dbg !7
%5 = load i8** %s1_addr, align 8, !dbg !13 ; <i8*> [#uses=1]
@@ -66,22 +76,22 @@ declare i64 @strlen(i8*) nounwind readonly
declare void @llvm.stackrestore(i8*) nounwind
-!0 = metadata !{metadata !"0x101\00s1\002\000", metadata !1, metadata !2, metadata !6} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{metadata !"0x2e\00foo\00foo\00foo\002\000\001\000\006\000\000\000", i32 0, metadata !2, metadata !3, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!2 = metadata !{metadata !"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\001\00\000\00\000", metadata !17, metadata !18, metadata !18, null, null, null} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", null, metadata !2, null, metadata !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5, metadata !6}
-!5 = metadata !{metadata !"0x24\00char\000\008\008\000\000\006", null, metadata !2} ; [ DW_TAG_base_type ]
-!6 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, metadata !2, metadata !5} ; [ DW_TAG_pointer_type ]
-!7 = metadata !{i32 2, i32 0, metadata !1, null}
-!8 = metadata !{metadata !"0x100\00str.0\003\000", metadata !1, metadata !2, metadata !9} ; [ DW_TAG_auto_variable ]
-!9 = metadata !{metadata !"0xf\00\000\0064\0064\000\0064", null, metadata !2, metadata !10} ; [ DW_TAG_pointer_type ]
-!10 = metadata !{metadata !"0x1\00\000\008\008\000\000", null, metadata !2, metadata !5, metadata !11, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 8, align 8, offset 0] [from char]
-!11 = metadata !{metadata !12}
-!12 = metadata !{metadata !"0x21\000\001"} ; [ DW_TAG_subrange_type ]
-!13 = metadata !{i32 3, i32 0, metadata !14, null}
-!14 = metadata !{metadata !"0xb\000\000\000", metadata !17, metadata !1} ; [ DW_TAG_lexical_block ]
-!15 = metadata !{i32 4, i32 0, metadata !14, null}
-!16 = metadata !{i32 5, i32 0, metadata !14, null}
-!17 = metadata !{metadata !"vla.c", metadata !"/tmp/"}
-!18 = metadata !{i32 0}
+!0 = !{!"0x101\00s1\002\000", !1, !2, !6} ; [ DW_TAG_arg_variable ]
+!1 = !{!"0x2e\00foo\00foo\00foo\002\000\001\000\006\000\000\000", i32 0, !2, !3, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!2 = !{!"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\001\00\000\00\000", !17, !18, !18, null, null, null} ; [ DW_TAG_compile_unit ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", null, !2, null, !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !{!5, !6}
+!5 = !{!"0x24\00char\000\008\008\000\000\006", null, !2} ; [ DW_TAG_base_type ]
+!6 = !{!"0xf\00\000\0064\0064\000\000", null, !2, !5} ; [ DW_TAG_pointer_type ]
+!7 = !MDLocation(line: 2, scope: !1)
+!8 = !{!"0x100\00str.0\003\000", !1, !2, !9} ; [ DW_TAG_auto_variable ]
+!9 = !{!"0xf\00\000\0064\0064\000\0064", null, !2, !10} ; [ DW_TAG_pointer_type ]
+!10 = !{!"0x1\00\000\008\008\000\000", null, !2, !5, !11, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 8, align 8, offset 0] [from char]
+!11 = !{!12}
+!12 = !{!"0x21\000\001"} ; [ DW_TAG_subrange_type ]
+!13 = !MDLocation(line: 3, scope: !14)
+!14 = !{!"0xb\000\000\000", !17, !1} ; [ DW_TAG_lexical_block ]
+!15 = !MDLocation(line: 4, scope: !14)
+!16 = !MDLocation(line: 5, scope: !14)
+!17 = !{!"vla.c", !"/tmp/"}
+!18 = !{i32 0}
diff --git a/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll b/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll
deleted file mode 100644
index 3061dc2..0000000
--- a/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mattr=+mmx,+sse2 | FileCheck %s
-
-; CHECK-NOT: movl
-
-define <8 x i8> @a(i8 zeroext %x) nounwind {
- %r = insertelement <8 x i8> undef, i8 %x, i32 0
- ret <8 x i8> %r
-}
-
diff --git a/test/CodeGen/X86/2009-06-07-ExpandMMXBitcast.ll b/test/CodeGen/X86/2009-06-07-ExpandMMXBitcast.ll
deleted file mode 100644
index 66caedf..0000000
--- a/test/CodeGen/X86/2009-06-07-ExpandMMXBitcast.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-linux -mcpu=corei7 -mattr=+mmx | grep movd | count 2
-
-define i64 @a(i32 %a, i32 %b) nounwind readnone {
-entry:
- %0 = insertelement <2 x i32> undef, i32 %a, i32 0 ; <<2 x i32>> [#uses=1]
- %1 = insertelement <2 x i32> %0, i32 %b, i32 1 ; <<2 x i32>> [#uses=1]
- %conv = bitcast <2 x i32> %1 to i64 ; <i64> [#uses=1]
- ret i64 %conv
-}
-
diff --git a/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll b/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll
index 8ea70b4..4c4552d 100644
--- a/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll
+++ b/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll
@@ -3,7 +3,7 @@
define <4 x float> @f4523(<4 x float> %a,<4 x float> %b) nounwind {
entry:
-; CHECK: shufps $-28, %xmm
+; CHECK: shufps $228, %xmm
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4,i32
5,i32 2,i32 3>
ret <4 x float> %shuffle
diff --git a/test/CodeGen/X86/2009-08-02-mmx-scalar-to-vector.ll b/test/CodeGen/X86/2009-08-02-mmx-scalar-to-vector.ll
deleted file mode 100644
index 288eef4..0000000
--- a/test/CodeGen/X86/2009-08-02-mmx-scalar-to-vector.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; PR4669
-declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
-
-define <1 x i64> @test(i64 %t) {
-entry:
- %t1 = insertelement <1 x i64> undef, i64 %t, i32 0
- %t0 = bitcast <1 x i64> %t1 to x86_mmx
- %t2 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %t0, i32 48)
- %t3 = bitcast x86_mmx %t2 to <1 x i64>
- ret <1 x i64> %t3
-}
diff --git a/test/CodeGen/X86/2009-10-16-Scope.ll b/test/CodeGen/X86/2009-10-16-Scope.ll
index 6fe2ee4..e75d594 100644
--- a/test/CodeGen/X86/2009-10-16-Scope.ll
+++ b/test/CodeGen/X86/2009-10-16-Scope.ll
@@ -9,7 +9,7 @@ entry:
br label %do.body, !dbg !0
do.body: ; preds = %entry
- call void @llvm.dbg.declare(metadata !{i32* %count_}, metadata !4, metadata !{metadata !"0x102"})
+ call void @llvm.dbg.declare(metadata i32* %count_, metadata !4, metadata !{!"0x102"})
%conv = ptrtoint i32* %count_ to i32, !dbg !0 ; <i32> [#uses=1]
%call = call i32 @foo(i32 %conv) ssp, !dbg !0 ; <i32> [#uses=0]
br label %do.end, !dbg !0
@@ -22,13 +22,13 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
declare i32 @foo(i32) ssp
-!0 = metadata !{i32 5, i32 2, metadata !1, null}
-!1 = metadata !{metadata !"0xb\001\001\000", null, metadata !2}; [DW_TAG_lexical_block ]
-!2 = metadata !{metadata !"0x2e\00bar\00bar\00bar\004\000\001\000\006\000\000\000", i32 0, metadata !3, null, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!3 = metadata !{metadata !"0x11\0012\00clang 1.1\001\00\000\00\000", metadata !8, null, metadata !9, null, null, null}; [DW_TAG_compile_unit ]
-!4 = metadata !{metadata !"0x100\00count_\005\000", metadata !5, metadata !3, metadata !6}; [ DW_TAG_auto_variable ]
-!5 = metadata !{metadata !"0xb\001\001\000", null, metadata !1}; [DW_TAG_lexical_block ]
-!6 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, metadata !3}; [DW_TAG_base_type ]
-!7 = metadata !{i32 6, i32 1, metadata !2, null}
-!8 = metadata !{metadata !"genmodes.i", metadata !"/Users/yash/Downloads"}
-!9 = metadata !{i32 0}
+!0 = !MDLocation(line: 5, column: 2, scope: !1)
+!1 = !{!"0xb\001\001\000", null, !2}; [DW_TAG_lexical_block ]
+!2 = !{!"0x2e\00bar\00bar\00bar\004\000\001\000\006\000\000\000", i32 0, !3, null, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!3 = !{!"0x11\0012\00clang 1.1\001\00\000\00\000", !8, null, !9, null, null, null}; [DW_TAG_compile_unit ]
+!4 = !{!"0x100\00count_\005\000", !5, !3, !6}; [ DW_TAG_auto_variable ]
+!5 = !{!"0xb\001\001\000", null, !1}; [DW_TAG_lexical_block ]
+!6 = !{!"0x24\00int\000\0032\0032\000\000\005", null, !3}; [DW_TAG_base_type ]
+!7 = !MDLocation(line: 6, column: 1, scope: !2)
+!8 = !{!"genmodes.i", !"/Users/yash/Downloads"}
+!9 = !{i32 0}
diff --git a/test/CodeGen/X86/2010-01-18-DbgValue.ll b/test/CodeGen/X86/2010-01-18-DbgValue.ll
index 0e2ed9d..b21846d 100644
--- a/test/CodeGen/X86/2010-01-18-DbgValue.ll
+++ b/test/CodeGen/X86/2010-01-18-DbgValue.ll
@@ -12,7 +12,7 @@ entry:
%retval = alloca double ; <double*> [#uses=2]
%0 = alloca double ; <double*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- call void @llvm.dbg.declare(metadata !{%struct.Rect* %my_r0}, metadata !0, metadata !{metadata !"0x102"}), !dbg !15
+ call void @llvm.dbg.declare(metadata %struct.Rect* %my_r0, metadata !0, metadata !{!"0x102"}), !dbg !15
%1 = getelementptr inbounds %struct.Rect* %my_r0, i32 0, i32 0, !dbg !16 ; <%struct.Pt*> [#uses=1]
%2 = getelementptr inbounds %struct.Pt* %1, i32 0, i32 0, !dbg !16 ; <double*> [#uses=1]
%3 = load double* %2, align 8, !dbg !16 ; <double> [#uses=1]
@@ -31,25 +31,25 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
!llvm.dbg.cu = !{!3}
!llvm.module.flags = !{!21}
-!0 = metadata !{metadata !"0x101\00my_r0\0011\000", metadata !1, metadata !2, metadata !7} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{metadata !"0x2e\00foo\00foo\00foo\0011\000\001\000\006\000\000\0011", metadata !19, metadata !2, metadata !4, null, double (%struct.Rect*)* @foo, null, null, null} ; [ DW_TAG_subprogram ]
-!2 = metadata !{metadata !"0x29", metadata !19} ; [ DW_TAG_file_type ]
-!3 = metadata !{metadata !"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\000\00\000\00\000", metadata !19, metadata !20, metadata !20, metadata !18, null, null} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !19, metadata !2, null, metadata !5, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!5 = metadata !{metadata !6, metadata !7}
-!6 = metadata !{metadata !"0x24\00double\000\0064\0064\000\000\004", metadata !19, metadata !2} ; [ DW_TAG_base_type ]
-!7 = metadata !{metadata !"0x13\00Rect\006\00256\0064\000\000\000", metadata !19, metadata !2, null, metadata !8, null, null, null} ; [ DW_TAG_structure_type ] [Rect] [line 6, size 256, align 64, offset 0] [def] [from ]
-!8 = metadata !{metadata !9, metadata !14}
-!9 = metadata !{metadata !"0xd\00P1\007\00128\0064\000\000", metadata !19, metadata !7, metadata !10} ; [ DW_TAG_member ]
-!10 = metadata !{metadata !"0x13\00Pt\001\00128\0064\000\000\000", metadata !19, metadata !2, null, metadata !11, null, null, null} ; [ DW_TAG_structure_type ] [Pt] [line 1, size 128, align 64, offset 0] [def] [from ]
-!11 = metadata !{metadata !12, metadata !13}
-!12 = metadata !{metadata !"0xd\00x\002\0064\0064\000\000", metadata !19, metadata !10, metadata !6} ; [ DW_TAG_member ]
-!13 = metadata !{metadata !"0xd\00y\003\0064\0064\0064\000", metadata !19, metadata !10, metadata !6} ; [ DW_TAG_member ]
-!14 = metadata !{metadata !"0xd\00P2\008\00128\0064\00128\000", metadata !19, metadata !7, metadata !10} ; [ DW_TAG_member ]
-!15 = metadata !{i32 11, i32 0, metadata !1, null}
-!16 = metadata !{i32 12, i32 0, metadata !17, null}
-!17 = metadata !{metadata !"0xb\0011\000\000", metadata !19, metadata !1} ; [ DW_TAG_lexical_block ]
-!18 = metadata !{metadata !1}
-!19 = metadata !{metadata !"b2.c", metadata !"/tmp/"}
-!20 = metadata !{i32 0}
-!21 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x101\00my_r0\0011\000", !1, !2, !7} ; [ DW_TAG_arg_variable ]
+!1 = !{!"0x2e\00foo\00foo\00foo\0011\000\001\000\006\000\000\0011", !19, !2, !4, null, double (%struct.Rect*)* @foo, null, null, null} ; [ DW_TAG_subprogram ]
+!2 = !{!"0x29", !19} ; [ DW_TAG_file_type ]
+!3 = !{!"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\000\00\000\00\000", !19, !20, !20, !18, null, null} ; [ DW_TAG_compile_unit ]
+!4 = !{!"0x15\00\000\000\000\000\000\000", !19, !2, null, !5, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!5 = !{!6, !7}
+!6 = !{!"0x24\00double\000\0064\0064\000\000\004", !19, !2} ; [ DW_TAG_base_type ]
+!7 = !{!"0x13\00Rect\006\00256\0064\000\000\000", !19, !2, null, !8, null, null, null} ; [ DW_TAG_structure_type ] [Rect] [line 6, size 256, align 64, offset 0] [def] [from ]
+!8 = !{!9, !14}
+!9 = !{!"0xd\00P1\007\00128\0064\000\000", !19, !7, !10} ; [ DW_TAG_member ]
+!10 = !{!"0x13\00Pt\001\00128\0064\000\000\000", !19, !2, null, !11, null, null, null} ; [ DW_TAG_structure_type ] [Pt] [line 1, size 128, align 64, offset 0] [def] [from ]
+!11 = !{!12, !13}
+!12 = !{!"0xd\00x\002\0064\0064\000\000", !19, !10, !6} ; [ DW_TAG_member ]
+!13 = !{!"0xd\00y\003\0064\0064\0064\000", !19, !10, !6} ; [ DW_TAG_member ]
+!14 = !{!"0xd\00P2\008\00128\0064\00128\000", !19, !7, !10} ; [ DW_TAG_member ]
+!15 = !MDLocation(line: 11, scope: !1)
+!16 = !MDLocation(line: 12, scope: !17)
+!17 = !{!"0xb\0011\000\000", !19, !1} ; [ DW_TAG_lexical_block ]
+!18 = !{!1}
+!19 = !{!"b2.c", !"/tmp/"}
+!20 = !{i32 0}
+!21 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/2010-02-01-DbgValueCrash.ll b/test/CodeGen/X86/2010-02-01-DbgValueCrash.ll
index a35efdc..b85f1af 100644
--- a/test/CodeGen/X86/2010-02-01-DbgValueCrash.ll
+++ b/test/CodeGen/X86/2010-02-01-DbgValueCrash.ll
@@ -8,7 +8,7 @@
define i32 @"main(tart.core.String[])->int32"(i32 %args) {
entry:
- tail call void @llvm.dbg.value(metadata !14, i64 0, metadata !8, metadata !{metadata !"0x102"})
+ tail call void @llvm.dbg.value(metadata %tart.reflect.ComplexType* @.type.SwitchStmtTest, i64 0, metadata !8, metadata !{!"0x102"})
tail call void @"tart.reflect.ComplexType.create->tart.core.Object"(%tart.reflect.ComplexType* @.type.SwitchStmtTest) ; <%tart.core.Object*> [#uses=2]
ret i32 3
}
@@ -16,20 +16,20 @@ entry:
declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnone
declare void @"tart.reflect.ComplexType.create->tart.core.Object"(%tart.reflect.ComplexType*) nounwind readnone
-!0 = metadata !{metadata !"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\001\00\000\00\000", metadata !15, metadata !16, metadata !16, null, null, null} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{metadata !"0x26\00\000\00192\0064\000\000", metadata !15, metadata !0, metadata !2} ; [ DW_TAG_const_type ]
-!2 = metadata !{metadata !"0x13\00C\001\00192\0064\000\000\000", metadata !15, metadata !0, null, metadata !3, null, null, null} ; [ DW_TAG_structure_type ] [C] [line 1, size 192, align 64, offset 0] [def] [from ]
-!3 = metadata !{metadata !4, metadata !6, metadata !7}
-!4 = metadata !{metadata !"0xd\00x\001\0064\0064\000\000", metadata !15, metadata !2, metadata !5} ; [ DW_TAG_member ]
-!5 = metadata !{metadata !"0x24\00double\000\0064\0064\000\000\004", metadata !15, metadata !0} ; [ DW_TAG_base_type ]
-!6 = metadata !{metadata !"0xd\00y\001\0064\0064\0064\000", metadata !15, metadata !2, metadata !5} ; [ DW_TAG_member ]
-!7 = metadata !{metadata !"0xd\00z\001\0064\0064\00128\000", metadata !15, metadata !2, metadata !5} ; [ DW_TAG_member ]
-!8 = metadata !{metadata !"0x100\00t\005\000", metadata !9, metadata !0, metadata !2} ; [ DW_TAG_auto_variable ]
-!9 = metadata !{metadata !"0xb\000\000\000", null, metadata !10} ; [ DW_TAG_lexical_block ]
-!10 = metadata !{metadata !"0x2e\00foo\00foo\00foo\004\000\001\000\006\000\000\000", i32 0, metadata !0, metadata !11, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!11 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !15, metadata !0, null, metadata !12, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!12 = metadata !{metadata !13}
-!13 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !15, metadata !0} ; [ DW_TAG_base_type ]
-!14 = metadata !{%tart.reflect.ComplexType* @.type.SwitchStmtTest}
-!15 = metadata !{metadata !"sm.c", metadata !""}
-!16 = metadata !{i32 0}
+!0 = !{!"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\001\00\000\00\000", !15, !16, !16, null, null, null} ; [ DW_TAG_compile_unit ]
+!1 = !{!"0x26\00\000\00192\0064\000\000", !15, !0, !2} ; [ DW_TAG_const_type ]
+!2 = !{!"0x13\00C\001\00192\0064\000\000\000", !15, !0, null, !3, null, null, null} ; [ DW_TAG_structure_type ] [C] [line 1, size 192, align 64, offset 0] [def] [from ]
+!3 = !{!4, !6, !7}
+!4 = !{!"0xd\00x\001\0064\0064\000\000", !15, !2, !5} ; [ DW_TAG_member ]
+!5 = !{!"0x24\00double\000\0064\0064\000\000\004", !15, !0} ; [ DW_TAG_base_type ]
+!6 = !{!"0xd\00y\001\0064\0064\0064\000", !15, !2, !5} ; [ DW_TAG_member ]
+!7 = !{!"0xd\00z\001\0064\0064\00128\000", !15, !2, !5} ; [ DW_TAG_member ]
+!8 = !{!"0x100\00t\005\000", !9, !0, !2} ; [ DW_TAG_auto_variable ]
+!9 = !{!"0xb\000\000\000", null, !10} ; [ DW_TAG_lexical_block ]
+!10 = !{!"0x2e\00foo\00foo\00foo\004\000\001\000\006\000\000\000", i32 0, !0, !11, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!11 = !{!"0x15\00\000\000\000\000\000\000", !15, !0, null, !12, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!12 = !{!13}
+!13 = !{!"0x24\00int\000\0032\0032\000\000\005", !15, !0} ; [ DW_TAG_base_type ]
+!14 = !{%tart.reflect.ComplexType* @.type.SwitchStmtTest}
+!15 = !{!"sm.c", !""}
+!16 = !{i32 0}
diff --git a/test/CodeGen/X86/2010-02-11-NonTemporal.ll b/test/CodeGen/X86/2010-02-11-NonTemporal.ll
index 5789a0b..f9cca8c 100644
--- a/test/CodeGen/X86/2010-02-11-NonTemporal.ll
+++ b/test/CodeGen/X86/2010-02-11-NonTemporal.ll
@@ -3,7 +3,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"
-!0 = metadata !{ i32 1 }
+!0 = !{ i32 1 }
define void @sub_(i32* noalias %n) {
"file movnt.f90, line 2, bb1":
diff --git a/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll b/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll
index 060c535..2c6d113 100644
--- a/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll
+++ b/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll
@@ -15,30 +15,30 @@
; Move return address from temporary register (%ebp) to new stack location (60(%esp))
; CHECK: movl [[REGISTER]], 60(%esp)
-%tupl_p = type [9 x i32]*
+%tupl = type [9 x i32]
declare fastcc void @l297(i32 %r10, i32 %r9, i32 %r8, i32 %r7, i32 %r6, i32 %r5, i32 %r3, i32 %r2) noreturn nounwind
declare fastcc void @l298(i32 %r10, i32 %r9, i32 %r4) noreturn nounwind
-define fastcc void @l186(%tupl_p %r1) noreturn nounwind {
+define fastcc void @l186(%tupl* %r1) noreturn nounwind {
entry:
- %ptr1 = getelementptr %tupl_p %r1, i32 0, i32 0
+ %ptr1 = getelementptr %tupl* %r1, i32 0, i32 0
%r2 = load i32* %ptr1
- %ptr3 = getelementptr %tupl_p %r1, i32 0, i32 1
+ %ptr3 = getelementptr %tupl* %r1, i32 0, i32 1
%r3 = load i32* %ptr3
- %ptr5 = getelementptr %tupl_p %r1, i32 0, i32 2
+ %ptr5 = getelementptr %tupl* %r1, i32 0, i32 2
%r4 = load i32* %ptr5
- %ptr7 = getelementptr %tupl_p %r1, i32 0, i32 3
+ %ptr7 = getelementptr %tupl* %r1, i32 0, i32 3
%r5 = load i32* %ptr7
- %ptr9 = getelementptr %tupl_p %r1, i32 0, i32 4
+ %ptr9 = getelementptr %tupl* %r1, i32 0, i32 4
%r6 = load i32* %ptr9
- %ptr11 = getelementptr %tupl_p %r1, i32 0, i32 5
+ %ptr11 = getelementptr %tupl* %r1, i32 0, i32 5
%r7 = load i32* %ptr11
- %ptr13 = getelementptr %tupl_p %r1, i32 0, i32 6
+ %ptr13 = getelementptr %tupl* %r1, i32 0, i32 6
%r8 = load i32* %ptr13
- %ptr15 = getelementptr %tupl_p %r1, i32 0, i32 7
+ %ptr15 = getelementptr %tupl* %r1, i32 0, i32 7
%r9 = load i32* %ptr15
- %ptr17 = getelementptr %tupl_p %r1, i32 0, i32 8
+ %ptr17 = getelementptr %tupl* %r1, i32 0, i32 8
%r10 = load i32* %ptr17
%cond = icmp eq i32 %r10, 3
br i1 %cond, label %true, label %false
diff --git a/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll b/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll
deleted file mode 100644
index 60025bf..0000000
--- a/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll
+++ /dev/null
@@ -1,100 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | FileCheck %s
-; There are no MMX operations here, so we use XMM or i64.
-
-; CHECK: ti8
-define void @ti8(double %a, double %b) nounwind {
-entry:
- %tmp1 = bitcast double %a to <8 x i8>
- %tmp2 = bitcast double %b to <8 x i8>
- %tmp3 = add <8 x i8> %tmp1, %tmp2
-; CHECK: paddb
- store <8 x i8> %tmp3, <8 x i8>* null
- ret void
-}
-
-; CHECK: ti16
-define void @ti16(double %a, double %b) nounwind {
-entry:
- %tmp1 = bitcast double %a to <4 x i16>
- %tmp2 = bitcast double %b to <4 x i16>
- %tmp3 = add <4 x i16> %tmp1, %tmp2
-; CHECK: paddw
- store <4 x i16> %tmp3, <4 x i16>* null
- ret void
-}
-
-; CHECK: ti32
-define void @ti32(double %a, double %b) nounwind {
-entry:
- %tmp1 = bitcast double %a to <2 x i32>
- %tmp2 = bitcast double %b to <2 x i32>
- %tmp3 = add <2 x i32> %tmp1, %tmp2
-; CHECK: paddd
- store <2 x i32> %tmp3, <2 x i32>* null
- ret void
-}
-
-; CHECK: ti64
-define void @ti64(double %a, double %b) nounwind {
-entry:
- %tmp1 = bitcast double %a to <1 x i64>
- %tmp2 = bitcast double %b to <1 x i64>
- %tmp3 = add <1 x i64> %tmp1, %tmp2
-; CHECK: addq
- store <1 x i64> %tmp3, <1 x i64>* null
- ret void
-}
-
-; MMX intrinsics calls get us MMX instructions.
-; CHECK: ti8a
-define void @ti8a(double %a, double %b) nounwind {
-entry:
- %tmp1 = bitcast double %a to x86_mmx
-; CHECK: movdq2q
- %tmp2 = bitcast double %b to x86_mmx
-; CHECK: movdq2q
- %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2)
- store x86_mmx %tmp3, x86_mmx* null
- ret void
-}
-
-; CHECK: ti16a
-define void @ti16a(double %a, double %b) nounwind {
-entry:
- %tmp1 = bitcast double %a to x86_mmx
-; CHECK: movdq2q
- %tmp2 = bitcast double %b to x86_mmx
-; CHECK: movdq2q
- %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2)
- store x86_mmx %tmp3, x86_mmx* null
- ret void
-}
-
-; CHECK: ti32a
-define void @ti32a(double %a, double %b) nounwind {
-entry:
- %tmp1 = bitcast double %a to x86_mmx
-; CHECK: movdq2q
- %tmp2 = bitcast double %b to x86_mmx
-; CHECK: movdq2q
- %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2)
- store x86_mmx %tmp3, x86_mmx* null
- ret void
-}
-
-; CHECK: ti64a
-define void @ti64a(double %a, double %b) nounwind {
-entry:
- %tmp1 = bitcast double %a to x86_mmx
-; CHECK: movdq2q
- %tmp2 = bitcast double %b to x86_mmx
-; CHECK: movdq2q
- %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2)
- store x86_mmx %tmp3, x86_mmx* null
- ret void
-}
-
-declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
diff --git a/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll b/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll
index fc8c895..86be390 100644
--- a/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll
+++ b/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll
@@ -29,4 +29,4 @@ entry:
ret i8* %1
}
-!0 = metadata !{i32 79}
+!0 = !{i32 79}
diff --git a/test/CodeGen/X86/2010-05-25-DotDebugLoc.ll b/test/CodeGen/X86/2010-05-25-DotDebugLoc.ll
index 1998011..0d30a3f 100644
--- a/test/CodeGen/X86/2010-05-25-DotDebugLoc.ll
+++ b/test/CodeGen/X86/2010-05-25-DotDebugLoc.ll
@@ -10,10 +10,10 @@
define hidden %0 @__divsc3(float %a, float %b, float %c, float %d) nounwind readnone {
entry:
- tail call void @llvm.dbg.value(metadata !{float %a}, i64 0, metadata !0, metadata !{metadata !"0x102"})
- tail call void @llvm.dbg.value(metadata !{float %b}, i64 0, metadata !11, metadata !{metadata !"0x102"})
- tail call void @llvm.dbg.value(metadata !{float %c}, i64 0, metadata !12, metadata !{metadata !"0x102"})
- tail call void @llvm.dbg.value(metadata !{float %d}, i64 0, metadata !13, metadata !{metadata !"0x102"})
+ tail call void @llvm.dbg.value(metadata float %a, i64 0, metadata !0, metadata !{!"0x102"})
+ tail call void @llvm.dbg.value(metadata float %b, i64 0, metadata !11, metadata !{!"0x102"})
+ tail call void @llvm.dbg.value(metadata float %c, i64 0, metadata !12, metadata !{!"0x102"})
+ tail call void @llvm.dbg.value(metadata float %d, i64 0, metadata !13, metadata !{!"0x102"})
%0 = tail call float @fabsf(float %c) nounwind readnone, !dbg !19 ; <float> [#uses=1]
%1 = tail call float @fabsf(float %d) nounwind readnone, !dbg !19 ; <float> [#uses=1]
%2 = fcmp olt float %0, %1, !dbg !19 ; <i1> [#uses=1]
@@ -21,34 +21,34 @@ entry:
bb: ; preds = %entry
%3 = fdiv float %c, %d, !dbg !20 ; <float> [#uses=3]
- tail call void @llvm.dbg.value(metadata !{float %3}, i64 0, metadata !16, metadata !{metadata !"0x102"}), !dbg !20
+ tail call void @llvm.dbg.value(metadata float %3, i64 0, metadata !16, metadata !{!"0x102"}), !dbg !20
%4 = fmul float %3, %c, !dbg !21 ; <float> [#uses=1]
%5 = fadd float %4, %d, !dbg !21 ; <float> [#uses=2]
- tail call void @llvm.dbg.value(metadata !{float %5}, i64 0, metadata !14, metadata !{metadata !"0x102"}), !dbg !21
+ tail call void @llvm.dbg.value(metadata float %5, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !21
%6 = fmul float %3, %a, !dbg !22 ; <float> [#uses=1]
%7 = fadd float %6, %b, !dbg !22 ; <float> [#uses=1]
%8 = fdiv float %7, %5, !dbg !22 ; <float> [#uses=1]
- tail call void @llvm.dbg.value(metadata !{float %8}, i64 0, metadata !17, metadata !{metadata !"0x102"}), !dbg !22
+ tail call void @llvm.dbg.value(metadata float %8, i64 0, metadata !17, metadata !{!"0x102"}), !dbg !22
%9 = fmul float %3, %b, !dbg !23 ; <float> [#uses=1]
%10 = fsub float %9, %a, !dbg !23 ; <float> [#uses=1]
%11 = fdiv float %10, %5, !dbg !23 ; <float> [#uses=1]
- tail call void @llvm.dbg.value(metadata !{float %11}, i64 0, metadata !18, metadata !{metadata !"0x102"}), !dbg !23
+ tail call void @llvm.dbg.value(metadata float %11, i64 0, metadata !18, metadata !{!"0x102"}), !dbg !23
br label %bb2, !dbg !23
bb1: ; preds = %entry
%12 = fdiv float %d, %c, !dbg !24 ; <float> [#uses=3]
- tail call void @llvm.dbg.value(metadata !{float %12}, i64 0, metadata !16, metadata !{metadata !"0x102"}), !dbg !24
+ tail call void @llvm.dbg.value(metadata float %12, i64 0, metadata !16, metadata !{!"0x102"}), !dbg !24
%13 = fmul float %12, %d, !dbg !25 ; <float> [#uses=1]
%14 = fadd float %13, %c, !dbg !25 ; <float> [#uses=2]
- tail call void @llvm.dbg.value(metadata !{float %14}, i64 0, metadata !14, metadata !{metadata !"0x102"}), !dbg !25
+ tail call void @llvm.dbg.value(metadata float %14, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !25
%15 = fmul float %12, %b, !dbg !26 ; <float> [#uses=1]
%16 = fadd float %15, %a, !dbg !26 ; <float> [#uses=1]
%17 = fdiv float %16, %14, !dbg !26 ; <float> [#uses=1]
- tail call void @llvm.dbg.value(metadata !{float %17}, i64 0, metadata !17, metadata !{metadata !"0x102"}), !dbg !26
+ tail call void @llvm.dbg.value(metadata float %17, i64 0, metadata !17, metadata !{!"0x102"}), !dbg !26
%18 = fmul float %12, %a, !dbg !27 ; <float> [#uses=1]
%19 = fsub float %b, %18, !dbg !27 ; <float> [#uses=1]
%20 = fdiv float %19, %14, !dbg !27 ; <float> [#uses=1]
- tail call void @llvm.dbg.value(metadata !{float %20}, i64 0, metadata !18, metadata !{metadata !"0x102"}), !dbg !27
+ tail call void @llvm.dbg.value(metadata float %20, i64 0, metadata !18, metadata !{!"0x102"}), !dbg !27
br label %bb2, !dbg !27
bb2: ; preds = %bb1, %bb
@@ -74,9 +74,9 @@ bb6: ; preds = %bb4
bb8: ; preds = %bb6
%27 = tail call float @copysignf(float 0x7FF0000000000000, float %c) nounwind readnone, !dbg !30 ; <float> [#uses=2]
%28 = fmul float %27, %a, !dbg !30 ; <float> [#uses=1]
- tail call void @llvm.dbg.value(metadata !{float %28}, i64 0, metadata !17, metadata !{metadata !"0x102"}), !dbg !30
+ tail call void @llvm.dbg.value(metadata float %28, i64 0, metadata !17, metadata !{!"0x102"}), !dbg !30
%29 = fmul float %27, %b, !dbg !31 ; <float> [#uses=1]
- tail call void @llvm.dbg.value(metadata !{float %29}, i64 0, metadata !18, metadata !{metadata !"0x102"}), !dbg !31
+ tail call void @llvm.dbg.value(metadata float %29, i64 0, metadata !18, metadata !{!"0x102"}), !dbg !31
br label %bb46, !dbg !31
bb9: ; preds = %bb6, %bb4
@@ -106,24 +106,24 @@ bb15: ; preds = %bb14
bb16: ; preds = %bb15
%iftmp.0.0 = select i1 %33, float 1.000000e+00, float 0.000000e+00 ; <float> [#uses=1]
%42 = tail call float @copysignf(float %iftmp.0.0, float %a) nounwind readnone, !dbg !33 ; <float> [#uses=2]
- tail call void @llvm.dbg.value(metadata !{float %42}, i64 0, metadata !0, metadata !{metadata !"0x102"}), !dbg !33
+ tail call void @llvm.dbg.value(metadata float %42, i64 0, metadata !0, metadata !{!"0x102"}), !dbg !33
%43 = fcmp ord float %b, 0.000000e+00 ; <i1> [#uses=1]
%44 = fsub float %b, %b, !dbg !34 ; <float> [#uses=1]
%45 = fcmp uno float %44, 0.000000e+00 ; <i1> [#uses=1]
%46 = and i1 %43, %45, !dbg !34 ; <i1> [#uses=1]
%iftmp.1.0 = select i1 %46, float 1.000000e+00, float 0.000000e+00 ; <float> [#uses=1]
%47 = tail call float @copysignf(float %iftmp.1.0, float %b) nounwind readnone, !dbg !34 ; <float> [#uses=2]
- tail call void @llvm.dbg.value(metadata !{float %47}, i64 0, metadata !11, metadata !{metadata !"0x102"}), !dbg !34
+ tail call void @llvm.dbg.value(metadata float %47, i64 0, metadata !11, metadata !{!"0x102"}), !dbg !34
%48 = fmul float %42, %c, !dbg !35 ; <float> [#uses=1]
%49 = fmul float %47, %d, !dbg !35 ; <float> [#uses=1]
%50 = fadd float %48, %49, !dbg !35 ; <float> [#uses=1]
%51 = fmul float %50, 0x7FF0000000000000, !dbg !35 ; <float> [#uses=1]
- tail call void @llvm.dbg.value(metadata !{float %51}, i64 0, metadata !17, metadata !{metadata !"0x102"}), !dbg !35
+ tail call void @llvm.dbg.value(metadata float %51, i64 0, metadata !17, metadata !{!"0x102"}), !dbg !35
%52 = fmul float %47, %c, !dbg !36 ; <float> [#uses=1]
%53 = fmul float %42, %d, !dbg !36 ; <float> [#uses=1]
%54 = fsub float %52, %53, !dbg !36 ; <float> [#uses=1]
%55 = fmul float %54, 0x7FF0000000000000, !dbg !36 ; <float> [#uses=1]
- tail call void @llvm.dbg.value(metadata !{float %55}, i64 0, metadata !18, metadata !{metadata !"0x102"}), !dbg !36
+ tail call void @llvm.dbg.value(metadata float %55, i64 0, metadata !18, metadata !{!"0x102"}), !dbg !36
br label %bb46, !dbg !36
bb27: ; preds = %bb15, %bb14, %bb11
@@ -154,24 +154,24 @@ bb34: ; preds = %bb33, %bb30
bb35: ; preds = %bb34
%iftmp.2.0 = select i1 %59, float 1.000000e+00, float 0.000000e+00 ; <float> [#uses=1]
%67 = tail call float @copysignf(float %iftmp.2.0, float %c) nounwind readnone, !dbg !38 ; <float> [#uses=2]
- tail call void @llvm.dbg.value(metadata !{float %67}, i64 0, metadata !12, metadata !{metadata !"0x102"}), !dbg !38
+ tail call void @llvm.dbg.value(metadata float %67, i64 0, metadata !12, metadata !{!"0x102"}), !dbg !38
%68 = fcmp ord float %d, 0.000000e+00 ; <i1> [#uses=1]
%69 = fsub float %d, %d, !dbg !39 ; <float> [#uses=1]
%70 = fcmp uno float %69, 0.000000e+00 ; <i1> [#uses=1]
%71 = and i1 %68, %70, !dbg !39 ; <i1> [#uses=1]
%iftmp.3.0 = select i1 %71, float 1.000000e+00, float 0.000000e+00 ; <float> [#uses=1]
%72 = tail call float @copysignf(float %iftmp.3.0, float %d) nounwind readnone, !dbg !39 ; <float> [#uses=2]
- tail call void @llvm.dbg.value(metadata !{float %72}, i64 0, metadata !13, metadata !{metadata !"0x102"}), !dbg !39
+ tail call void @llvm.dbg.value(metadata float %72, i64 0, metadata !13, metadata !{!"0x102"}), !dbg !39
%73 = fmul float %67, %a, !dbg !40 ; <float> [#uses=1]
%74 = fmul float %72, %b, !dbg !40 ; <float> [#uses=1]
%75 = fadd float %73, %74, !dbg !40 ; <float> [#uses=1]
%76 = fmul float %75, 0.000000e+00, !dbg !40 ; <float> [#uses=1]
- tail call void @llvm.dbg.value(metadata !{float %76}, i64 0, metadata !17, metadata !{metadata !"0x102"}), !dbg !40
+ tail call void @llvm.dbg.value(metadata float %76, i64 0, metadata !17, metadata !{!"0x102"}), !dbg !40
%77 = fmul float %67, %b, !dbg !41 ; <float> [#uses=1]
%78 = fmul float %72, %a, !dbg !41 ; <float> [#uses=1]
%79 = fsub float %77, %78, !dbg !41 ; <float> [#uses=1]
%80 = fmul float %79, 0.000000e+00, !dbg !41 ; <float> [#uses=1]
- tail call void @llvm.dbg.value(metadata !{float %80}, i64 0, metadata !18, metadata !{metadata !"0x102"}), !dbg !41
+ tail call void @llvm.dbg.value(metadata float %80, i64 0, metadata !18, metadata !{!"0x102"}), !dbg !41
br label %bb46, !dbg !41
bb46: ; preds = %bb35, %bb34, %bb33, %bb30, %bb16, %bb8, %bb2
@@ -200,52 +200,52 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!3}
!llvm.module.flags = !{!48}
-!0 = metadata !{metadata !"0x101\00a\001921\000", metadata !1, metadata !2, metadata !9} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{metadata !"0x2e\00__divsc3\00__divsc3\00__divsc3\001922\000\001\000\006\000\001\001922", metadata !45, metadata !2, metadata !4, null, %0 (float, float, float, float)* @__divsc3, null, null, metadata !43} ; [ DW_TAG_subprogram ]
-!2 = metadata !{metadata !"0x29", metadata !45} ; [ DW_TAG_file_type ]
-!3 = metadata !{metadata !"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\001\00\000\00\001", metadata !45, metadata !47, metadata !47, metadata !44, null, null} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !45, metadata !2, null, metadata !5, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!5 = metadata !{metadata !6, metadata !9, metadata !9, metadata !9, metadata !9}
-!6 = metadata !{metadata !"0x16\00SCtype\00170\000\000\000\000", metadata !46, metadata !7, metadata !8} ; [ DW_TAG_typedef ]
-!7 = metadata !{metadata !"0x29", metadata !46} ; [ DW_TAG_file_type ]
-!8 = metadata !{metadata !"0x24\00complex float\000\0064\0032\000\000\003", metadata !45, metadata !2} ; [ DW_TAG_base_type ]
-!9 = metadata !{metadata !"0x16\00SFtype\00167\000\000\000\000", metadata !46, metadata !7, metadata !10} ; [ DW_TAG_typedef ]
-!10 = metadata !{metadata !"0x24\00float\000\0032\0032\000\000\004", metadata !45, metadata !2} ; [ DW_TAG_base_type ]
-!11 = metadata !{metadata !"0x101\00b\001921\000", metadata !1, metadata !2, metadata !9} ; [ DW_TAG_arg_variable ]
-!12 = metadata !{metadata !"0x101\00c\001921\000", metadata !1, metadata !2, metadata !9} ; [ DW_TAG_arg_variable ]
-!13 = metadata !{metadata !"0x101\00d\001921\000", metadata !1, metadata !2, metadata !9} ; [ DW_TAG_arg_variable ]
-!14 = metadata !{metadata !"0x100\00denom\001923\000", metadata !15, metadata !2, metadata !9} ; [ DW_TAG_auto_variable ]
-!15 = metadata !{metadata !"0xb\001922\000\000", metadata !45, metadata !1} ; [ DW_TAG_lexical_block ]
-!16 = metadata !{metadata !"0x100\00ratio\001923\000", metadata !15, metadata !2, metadata !9} ; [ DW_TAG_auto_variable ]
-!17 = metadata !{metadata !"0x100\00x\001923\000", metadata !15, metadata !2, metadata !9} ; [ DW_TAG_auto_variable ]
-!18 = metadata !{metadata !"0x100\00y\001923\000", metadata !15, metadata !2, metadata !9} ; [ DW_TAG_auto_variable ]
-!19 = metadata !{i32 1929, i32 0, metadata !15, null}
-!20 = metadata !{i32 1931, i32 0, metadata !15, null}
-!21 = metadata !{i32 1932, i32 0, metadata !15, null}
-!22 = metadata !{i32 1933, i32 0, metadata !15, null}
-!23 = metadata !{i32 1934, i32 0, metadata !15, null}
-!24 = metadata !{i32 1938, i32 0, metadata !15, null}
-!25 = metadata !{i32 1939, i32 0, metadata !15, null}
-!26 = metadata !{i32 1940, i32 0, metadata !15, null}
-!27 = metadata !{i32 1941, i32 0, metadata !15, null}
-!28 = metadata !{i32 1946, i32 0, metadata !15, null}
-!29 = metadata !{i32 1948, i32 0, metadata !15, null}
-!30 = metadata !{i32 1950, i32 0, metadata !15, null}
-!31 = metadata !{i32 1951, i32 0, metadata !15, null}
-!32 = metadata !{i32 1953, i32 0, metadata !15, null}
-!33 = metadata !{i32 1955, i32 0, metadata !15, null}
-!34 = metadata !{i32 1956, i32 0, metadata !15, null}
-!35 = metadata !{i32 1957, i32 0, metadata !15, null}
-!36 = metadata !{i32 1958, i32 0, metadata !15, null}
-!37 = metadata !{i32 1960, i32 0, metadata !15, null}
-!38 = metadata !{i32 1962, i32 0, metadata !15, null}
-!39 = metadata !{i32 1963, i32 0, metadata !15, null}
-!40 = metadata !{i32 1964, i32 0, metadata !15, null}
-!41 = metadata !{i32 1965, i32 0, metadata !15, null}
-!42 = metadata !{i32 1969, i32 0, metadata !15, null}
-!43 = metadata !{metadata !0, metadata !11, metadata !12, metadata !13, metadata !14, metadata !16, metadata !17, metadata !18}
-!44 = metadata !{metadata !1}
-!45 = metadata !{metadata !"libgcc2.c", metadata !"/Users/yash/clean/LG.D/gcc/../../llvmgcc/gcc"}
-!46 = metadata !{metadata !"libgcc2.h", metadata !"/Users/yash/clean/LG.D/gcc/../../llvmgcc/gcc"}
-!47 = metadata !{i32 0}
-!48 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x101\00a\001921\000", !1, !2, !9} ; [ DW_TAG_arg_variable ]
+!1 = !{!"0x2e\00__divsc3\00__divsc3\00__divsc3\001922\000\001\000\006\000\001\001922", !45, !2, !4, null, %0 (float, float, float, float)* @__divsc3, null, null, !43} ; [ DW_TAG_subprogram ]
+!2 = !{!"0x29", !45} ; [ DW_TAG_file_type ]
+!3 = !{!"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\001\00\000\00\001", !45, !47, !47, !44, null, null} ; [ DW_TAG_compile_unit ]
+!4 = !{!"0x15\00\000\000\000\000\000\000", !45, !2, null, !5, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!5 = !{!6, !9, !9, !9, !9}
+!6 = !{!"0x16\00SCtype\00170\000\000\000\000", !46, !7, !8} ; [ DW_TAG_typedef ]
+!7 = !{!"0x29", !46} ; [ DW_TAG_file_type ]
+!8 = !{!"0x24\00complex float\000\0064\0032\000\000\003", !45, !2} ; [ DW_TAG_base_type ]
+!9 = !{!"0x16\00SFtype\00167\000\000\000\000", !46, !7, !10} ; [ DW_TAG_typedef ]
+!10 = !{!"0x24\00float\000\0032\0032\000\000\004", !45, !2} ; [ DW_TAG_base_type ]
+!11 = !{!"0x101\00b\001921\000", !1, !2, !9} ; [ DW_TAG_arg_variable ]
+!12 = !{!"0x101\00c\001921\000", !1, !2, !9} ; [ DW_TAG_arg_variable ]
+!13 = !{!"0x101\00d\001921\000", !1, !2, !9} ; [ DW_TAG_arg_variable ]
+!14 = !{!"0x100\00denom\001923\000", !15, !2, !9} ; [ DW_TAG_auto_variable ]
+!15 = !{!"0xb\001922\000\000", !45, !1} ; [ DW_TAG_lexical_block ]
+!16 = !{!"0x100\00ratio\001923\000", !15, !2, !9} ; [ DW_TAG_auto_variable ]
+!17 = !{!"0x100\00x\001923\000", !15, !2, !9} ; [ DW_TAG_auto_variable ]
+!18 = !{!"0x100\00y\001923\000", !15, !2, !9} ; [ DW_TAG_auto_variable ]
+!19 = !MDLocation(line: 1929, scope: !15)
+!20 = !MDLocation(line: 1931, scope: !15)
+!21 = !MDLocation(line: 1932, scope: !15)
+!22 = !MDLocation(line: 1933, scope: !15)
+!23 = !MDLocation(line: 1934, scope: !15)
+!24 = !MDLocation(line: 1938, scope: !15)
+!25 = !MDLocation(line: 1939, scope: !15)
+!26 = !MDLocation(line: 1940, scope: !15)
+!27 = !MDLocation(line: 1941, scope: !15)
+!28 = !MDLocation(line: 1946, scope: !15)
+!29 = !MDLocation(line: 1948, scope: !15)
+!30 = !MDLocation(line: 1950, scope: !15)
+!31 = !MDLocation(line: 1951, scope: !15)
+!32 = !MDLocation(line: 1953, scope: !15)
+!33 = !MDLocation(line: 1955, scope: !15)
+!34 = !MDLocation(line: 1956, scope: !15)
+!35 = !MDLocation(line: 1957, scope: !15)
+!36 = !MDLocation(line: 1958, scope: !15)
+!37 = !MDLocation(line: 1960, scope: !15)
+!38 = !MDLocation(line: 1962, scope: !15)
+!39 = !MDLocation(line: 1963, scope: !15)
+!40 = !MDLocation(line: 1964, scope: !15)
+!41 = !MDLocation(line: 1965, scope: !15)
+!42 = !MDLocation(line: 1969, scope: !15)
+!43 = !{!0, !11, !12, !13, !14, !16, !17, !18}
+!44 = !{!1}
+!45 = !{!"libgcc2.c", !"/Users/yash/clean/LG.D/gcc/../../llvmgcc/gcc"}
+!46 = !{!"libgcc2.h", !"/Users/yash/clean/LG.D/gcc/../../llvmgcc/gcc"}
+!47 = !{i32 0}
+!48 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll b/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
index 09120a1..9915a70 100644
--- a/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
+++ b/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-darwin10"
define i8* @bar(%struct.a* %myvar) nounwind optsize noinline ssp {
entry:
- tail call void @llvm.dbg.value(metadata !{%struct.a* %myvar}, i64 0, metadata !8, metadata !{metadata !"0x102"})
+ tail call void @llvm.dbg.value(metadata %struct.a* %myvar, i64 0, metadata !8, metadata !{!"0x102"})
%0 = getelementptr inbounds %struct.a* %myvar, i64 0, i32 0, !dbg !28 ; <i32*> [#uses=1]
%1 = load i32* %0, align 8, !dbg !28 ; <i32> [#uses=1]
tail call void @foo(i32 %1) nounwind optsize noinline ssp, !dbg !28
@@ -24,44 +24,44 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!38}
-!0 = metadata !{metadata !"0x34\00ret\00ret\00\007\000\001", metadata !1, metadata !1, metadata !3, null, null} ; [ DW_TAG_variable ]
-!1 = metadata !{metadata !"0x29", metadata !36} ; [ DW_TAG_file_type ]
-!2 = metadata !{metadata !"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\001\00\000\00\001", metadata !36, metadata !37, metadata !37, metadata !32, metadata !31, metadata !37} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !36, metadata !1} ; [ DW_TAG_base_type ]
-!4 = metadata !{metadata !"0x101\00x\0012\000", metadata !5, metadata !1, metadata !3} ; [ DW_TAG_arg_variable ]
-!5 = metadata !{metadata !"0x2e\00foo\00foo\00foo\0013\000\001\000\006\000\001\0013", metadata !36, metadata !1, metadata !6, null, void (i32)* @foo, null, null, metadata !33} ; [ DW_TAG_subprogram ]
-!6 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !36, metadata !1, null, metadata !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!7 = metadata !{null, metadata !3}
-!8 = metadata !{metadata !"0x101\00myvar\0017\000", metadata !9, metadata !1, metadata !13} ; [ DW_TAG_arg_variable ]
-!9 = metadata !{metadata !"0x2e\00bar\00bar\00bar\0017\000\001\000\006\000\001\0017", metadata !36, metadata !1, metadata !10, null, i8* (%struct.a*)* @bar, null, null, metadata !34} ; [ DW_TAG_subprogram ]
-!10 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !36, metadata !1, null, metadata !11, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!11 = metadata !{metadata !12, metadata !13}
-!12 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", metadata !36, metadata !1, null} ; [ DW_TAG_pointer_type ]
-!13 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", metadata !36, metadata !1, metadata !14} ; [ DW_TAG_pointer_type ]
-!14 = metadata !{metadata !"0x13\00a\002\00128\0064\000\000\000", metadata !36, metadata !1, null, metadata !15, null, null, null} ; [ DW_TAG_structure_type ] [a] [line 2, size 128, align 64, offset 0] [def] [from ]
-!15 = metadata !{metadata !16, metadata !17}
-!16 = metadata !{metadata !"0xd\00c\003\0032\0032\000\000", metadata !36, metadata !14, metadata !3} ; [ DW_TAG_member ]
-!17 = metadata !{metadata !"0xd\00d\004\0064\0064\0064\000", metadata !36, metadata !14, metadata !13} ; [ DW_TAG_member ]
-!18 = metadata !{metadata !"0x101\00argc\0022\000", metadata !19, metadata !1, metadata !3} ; [ DW_TAG_arg_variable ]
-!19 = metadata !{metadata !"0x2e\00main\00main\00main\0022\000\001\000\006\000\001\0022", metadata !36, metadata !1, metadata !20, null, null, null, null, metadata !35} ; [ DW_TAG_subprogram ]
-!20 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !36, metadata !1, null, metadata !21, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!21 = metadata !{metadata !3, metadata !3, metadata !22}
-!22 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", metadata !36, metadata !1, metadata !23} ; [ DW_TAG_pointer_type ]
-!23 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", metadata !36, metadata !1, metadata !24} ; [ DW_TAG_pointer_type ]
-!24 = metadata !{metadata !"0x24\00char\000\008\008\000\000\006", metadata !36, metadata !1} ; [ DW_TAG_base_type ]
-!25 = metadata !{metadata !"0x101\00argv\0022\000", metadata !19, metadata !1, metadata !22} ; [ DW_TAG_arg_variable ]
-!26 = metadata !{metadata !"0x100\00e\0023\000", metadata !27, metadata !1, metadata !14} ; [ DW_TAG_auto_variable ]
-!27 = metadata !{metadata !"0xb\0022\000\000", metadata !36, metadata !19} ; [ DW_TAG_lexical_block ]
-!28 = metadata !{i32 18, i32 0, metadata !29, null}
-!29 = metadata !{metadata !"0xb\0017\000\001", metadata !36, metadata !9} ; [ DW_TAG_lexical_block ]
-!30 = metadata !{i32 19, i32 0, metadata !29, null}
-!31 = metadata !{metadata !0}
-!32 = metadata !{metadata !5, metadata !9, metadata !19}
-!33 = metadata !{metadata !4}
-!34 = metadata !{metadata !8}
-!35 = metadata !{metadata !18, metadata !25, metadata !26}
-!36 = metadata !{metadata !"foo.c", metadata !"/tmp/"}
-!37 = metadata !{}
+!0 = !{!"0x34\00ret\00ret\00\007\000\001", !1, !1, !3, null, null} ; [ DW_TAG_variable ]
+!1 = !{!"0x29", !36} ; [ DW_TAG_file_type ]
+!2 = !{!"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\001\00\000\00\001", !36, !37, !37, !32, !31, !37} ; [ DW_TAG_compile_unit ]
+!3 = !{!"0x24\00int\000\0032\0032\000\000\005", !36, !1} ; [ DW_TAG_base_type ]
+!4 = !{!"0x101\00x\0012\000", !5, !1, !3} ; [ DW_TAG_arg_variable ]
+!5 = !{!"0x2e\00foo\00foo\00foo\0013\000\001\000\006\000\001\0013", !36, !1, !6, null, void (i32)* @foo, null, null, !33} ; [ DW_TAG_subprogram ]
+!6 = !{!"0x15\00\000\000\000\000\000\000", !36, !1, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = !{null, !3}
+!8 = !{!"0x101\00myvar\0017\000", !9, !1, !13} ; [ DW_TAG_arg_variable ]
+!9 = !{!"0x2e\00bar\00bar\00bar\0017\000\001\000\006\000\001\0017", !36, !1, !10, null, i8* (%struct.a*)* @bar, null, null, !34} ; [ DW_TAG_subprogram ]
+!10 = !{!"0x15\00\000\000\000\000\000\000", !36, !1, null, !11, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!11 = !{!12, !13}
+!12 = !{!"0xf\00\000\0064\0064\000\000", !36, !1, null} ; [ DW_TAG_pointer_type ]
+!13 = !{!"0xf\00\000\0064\0064\000\000", !36, !1, !14} ; [ DW_TAG_pointer_type ]
+!14 = !{!"0x13\00a\002\00128\0064\000\000\000", !36, !1, null, !15, null, null, null} ; [ DW_TAG_structure_type ] [a] [line 2, size 128, align 64, offset 0] [def] [from ]
+!15 = !{!16, !17}
+!16 = !{!"0xd\00c\003\0032\0032\000\000", !36, !14, !3} ; [ DW_TAG_member ]
+!17 = !{!"0xd\00d\004\0064\0064\0064\000", !36, !14, !13} ; [ DW_TAG_member ]
+!18 = !{!"0x101\00argc\0022\000", !19, !1, !3} ; [ DW_TAG_arg_variable ]
+!19 = !{!"0x2e\00main\00main\00main\0022\000\001\000\006\000\001\0022", !36, !1, !20, null, null, null, null, !35} ; [ DW_TAG_subprogram ]
+!20 = !{!"0x15\00\000\000\000\000\000\000", !36, !1, null, !21, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!21 = !{!3, !3, !22}
+!22 = !{!"0xf\00\000\0064\0064\000\000", !36, !1, !23} ; [ DW_TAG_pointer_type ]
+!23 = !{!"0xf\00\000\0064\0064\000\000", !36, !1, !24} ; [ DW_TAG_pointer_type ]
+!24 = !{!"0x24\00char\000\008\008\000\000\006", !36, !1} ; [ DW_TAG_base_type ]
+!25 = !{!"0x101\00argv\0022\000", !19, !1, !22} ; [ DW_TAG_arg_variable ]
+!26 = !{!"0x100\00e\0023\000", !27, !1, !14} ; [ DW_TAG_auto_variable ]
+!27 = !{!"0xb\0022\000\000", !36, !19} ; [ DW_TAG_lexical_block ]
+!28 = !MDLocation(line: 18, scope: !29)
+!29 = !{!"0xb\0017\000\001", !36, !9} ; [ DW_TAG_lexical_block ]
+!30 = !MDLocation(line: 19, scope: !29)
+!31 = !{!0}
+!32 = !{!5, !9, !19}
+!33 = !{!4}
+!34 = !{!8}
+!35 = !{!18, !25, !26}
+!36 = !{!"foo.c", !"/tmp/"}
+!37 = !{}
; The variable bar:myvar changes registers after the first movq.
; It is cobbered by popq %rbx
@@ -91,4 +91,4 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
; CHECK-NEXT: Ltmp{{.*}}:
; CHECK-NEXT: .byte 83
; CHECK-NEXT: Ltmp{{.*}}:
-!38 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!38 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/2010-05-28-Crash.ll b/test/CodeGen/X86/2010-05-28-Crash.ll
index b0a4e8d..7adacf5 100644
--- a/test/CodeGen/X86/2010-05-28-Crash.ll
+++ b/test/CodeGen/X86/2010-05-28-Crash.ll
@@ -4,7 +4,7 @@
define i32 @foo(i32 %y) nounwind optsize ssp {
entry:
- tail call void @llvm.dbg.value(metadata !{i32 %y}, i64 0, metadata !0, metadata !{metadata !"0x102"})
+ tail call void @llvm.dbg.value(metadata i32 %y, i64 0, metadata !0, metadata !{!"0x102"})
%0 = tail call i32 (...)* @zoo(i32 %y) nounwind, !dbg !9 ; <i32> [#uses=1]
ret i32 %0, !dbg !9
}
@@ -15,8 +15,8 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
define i32 @bar(i32 %x) nounwind optsize ssp {
entry:
- tail call void @llvm.dbg.value(metadata !{i32 %x}, i64 0, metadata !7, metadata !{metadata !"0x102"})
- tail call void @llvm.dbg.value(metadata !11, i64 0, metadata !0, metadata !{metadata !"0x102"}) nounwind
+ tail call void @llvm.dbg.value(metadata i32 %x, i64 0, metadata !7, metadata !{!"0x102"})
+ tail call void @llvm.dbg.value(metadata i32 1, i64 0, metadata !0, metadata !{!"0x102"}) nounwind
%0 = tail call i32 (...)* @zoo(i32 1) nounwind, !dbg !12 ; <i32> [#uses=1]
%1 = add nsw i32 %0, %x, !dbg !13 ; <i32> [#uses=1]
ret i32 %1, !dbg !13
@@ -25,28 +25,28 @@ entry:
!llvm.dbg.cu = !{!3}
!llvm.module.flags = !{!20}
-!0 = metadata !{metadata !"0x101\00y\002\000", metadata !1, metadata !2, metadata !6} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{metadata !"0x2e\00foo\00foo\00foo\002\000\001\000\006\000\001\002", metadata !18, metadata !2, metadata !4, null, i32 (i32)* @foo, null, null, metadata !15} ; [ DW_TAG_subprogram ]
-!2 = metadata !{metadata !"0x29", metadata !18} ; [ DW_TAG_file_type ]
-!3 = metadata !{metadata !"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\001\00\000\00\001", metadata !18, metadata !19, metadata !19, metadata !17, null, null} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !18, metadata !2, null, metadata !5, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!5 = metadata !{metadata !6, metadata !6}
-!6 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !18, metadata !2} ; [ DW_TAG_base_type ]
-!7 = metadata !{metadata !"0x101\00x\006\000", metadata !8, metadata !2, metadata !6} ; [ DW_TAG_arg_variable ]
-!8 = metadata !{metadata !"0x2e\00bar\00bar\00bar\006\000\001\000\006\000\001\006", metadata !18, metadata !2, metadata !4, null, i32 (i32)* @bar, null, null, metadata !16} ; [ DW_TAG_subprogram ]
-!9 = metadata !{i32 3, i32 0, metadata !10, null}
-!10 = metadata !{metadata !"0xb\002\000\000", metadata !18, metadata !1} ; [ DW_TAG_lexical_block ]
-!11 = metadata !{i32 1}
-!12 = metadata !{i32 3, i32 0, metadata !10, metadata !13}
-!13 = metadata !{i32 7, i32 0, metadata !14, null}
-!14 = metadata !{metadata !"0xb\006\000\000", metadata !18, metadata !8} ; [ DW_TAG_lexical_block ]
-!15 = metadata !{metadata !0}
-!16 = metadata !{metadata !7}
-!17 = metadata !{metadata !1, metadata !8}
-!18 = metadata !{metadata !"f.c", metadata !"/tmp"}
-!19 = metadata !{i32 0}
+!0 = !{!"0x101\00y\002\000", !1, !2, !6} ; [ DW_TAG_arg_variable ]
+!1 = !{!"0x2e\00foo\00foo\00foo\002\000\001\000\006\000\001\002", !18, !2, !4, null, i32 (i32)* @foo, null, null, !15} ; [ DW_TAG_subprogram ]
+!2 = !{!"0x29", !18} ; [ DW_TAG_file_type ]
+!3 = !{!"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\001\00\000\00\001", !18, !19, !19, !17, null, null} ; [ DW_TAG_compile_unit ]
+!4 = !{!"0x15\00\000\000\000\000\000\000", !18, !2, null, !5, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!5 = !{!6, !6}
+!6 = !{!"0x24\00int\000\0032\0032\000\000\005", !18, !2} ; [ DW_TAG_base_type ]
+!7 = !{!"0x101\00x\006\000", !8, !2, !6} ; [ DW_TAG_arg_variable ]
+!8 = !{!"0x2e\00bar\00bar\00bar\006\000\001\000\006\000\001\006", !18, !2, !4, null, i32 (i32)* @bar, null, null, !16} ; [ DW_TAG_subprogram ]
+!9 = !MDLocation(line: 3, scope: !10)
+!10 = !{!"0xb\002\000\000", !18, !1} ; [ DW_TAG_lexical_block ]
+!11 = !{i32 1}
+!12 = !MDLocation(line: 3, scope: !10, inlinedAt: !13)
+!13 = !MDLocation(line: 7, scope: !14)
+!14 = !{!"0xb\006\000\000", !18, !8} ; [ DW_TAG_lexical_block ]
+!15 = !{!0}
+!16 = !{!7}
+!17 = !{!1, !8}
+!18 = !{!"f.c", !"/tmp"}
+!19 = !{i32 0}
;CHECK: DEBUG_VALUE: bar:x <- E
;CHECK: Ltmp
;CHECK: DEBUG_VALUE: foo:y <- 1{{$}}
-!20 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!20 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll b/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
index dea9162..3687b82 100644
--- a/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
+++ b/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
@@ -10,8 +10,8 @@ target triple = "x86_64-apple-darwin10.2"
define i32 @_ZN3foo3bazEi(%struct.foo* nocapture %this, i32 %x) nounwind readnone optsize noinline ssp align 2 {
;CHECK: DEBUG_VALUE: baz:this <- RDI{{$}}
entry:
- tail call void @llvm.dbg.value(metadata !{%struct.foo* %this}, i64 0, metadata !15, metadata !{metadata !"0x102"})
- tail call void @llvm.dbg.value(metadata !{i32 %x}, i64 0, metadata !16, metadata !{metadata !"0x102"})
+ tail call void @llvm.dbg.value(metadata %struct.foo* %this, i64 0, metadata !15, metadata !{!"0x102"})
+ tail call void @llvm.dbg.value(metadata i32 %x, i64 0, metadata !16, metadata !{!"0x102"})
%0 = mul nsw i32 %x, 7, !dbg !29 ; <i32> [#uses=1]
%1 = add nsw i32 %0, 1, !dbg !29 ; <i32> [#uses=1]
ret i32 %1, !dbg !29
@@ -23,38 +23,38 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.module.flags = !{!34}
!llvm.dbg.lv = !{!0, !14, !15, !16, !17, !24, !25, !28}
-!0 = metadata !{metadata !"0x101\00this\0011\000", metadata !1, metadata !3, metadata !12} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{metadata !"0x2e\00bar\00bar\00_ZN3foo3barEi\0011\000\001\000\006\000\001\0011", metadata !31, metadata !2, metadata !9, null, i32 (%struct.foo*, i32)* null, null, null, null} ; [ DW_TAG_subprogram ]
-!2 = metadata !{metadata !"0x13\00foo\003\0032\0032\000\000\000", metadata !31, metadata !3, null, metadata !5, null, null, null} ; [ DW_TAG_structure_type ] [foo] [line 3, size 32, align 32, offset 0] [def] [from ]
-!3 = metadata !{metadata !"0x29", metadata !31} ; [ DW_TAG_file_type ]
-!4 = metadata !{metadata !"0x11\004\004.2.1 LLVM build\001\00\000\00\000", metadata !31, metadata !32, metadata !32, metadata !33, null, null} ; [ DW_TAG_compile_unit ]
-!5 = metadata !{metadata !6, metadata !1, metadata !8}
-!6 = metadata !{metadata !"0xd\00y\008\0032\0032\000\000", metadata !31, metadata !2, metadata !7} ; [ DW_TAG_member ]
-!7 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !31, metadata !3} ; [ DW_TAG_base_type ]
-!8 = metadata !{metadata !"0x2e\00baz\00baz\00_ZN3foo3bazEi\0015\000\001\000\006\000\001\0015", metadata !31, metadata !2, metadata !9, null, i32 (%struct.foo*, i32)* @_ZN3foo3bazEi, null, null, null} ; [ DW_TAG_subprogram ]
-!9 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !31, metadata !3, null, metadata !10, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!10 = metadata !{metadata !7, metadata !11, metadata !7}
-!11 = metadata !{metadata !"0xf\00\000\0064\0064\000\0064", metadata !31, metadata !3, metadata !2} ; [ DW_TAG_pointer_type ]
-!12 = metadata !{metadata !"0x26\00\000\0064\0064\000\0064", metadata !31, metadata !3, metadata !13} ; [ DW_TAG_const_type ]
-!13 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", metadata !31, metadata !3, metadata !2} ; [ DW_TAG_pointer_type ]
-!14 = metadata !{metadata !"0x101\00x\0011\000", metadata !1, metadata !3, metadata !7} ; [ DW_TAG_arg_variable ]
-!15 = metadata !{metadata !"0x101\00this\0015\000", metadata !8, metadata !3, metadata !12} ; [ DW_TAG_arg_variable ]
-!16 = metadata !{metadata !"0x101\00x\0015\000", metadata !8, metadata !3, metadata !7} ; [ DW_TAG_arg_variable ]
-!17 = metadata !{metadata !"0x101\00argc\0019\000", metadata !18, metadata !3, metadata !7} ; [ DW_TAG_arg_variable ]
-!18 = metadata !{metadata !"0x2e\00main\00main\00main\0019\000\001\000\006\000\001\0019", metadata !31, metadata !3, metadata !19, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!19 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !31, metadata !3, null, metadata !20, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!20 = metadata !{metadata !7, metadata !7, metadata !21}
-!21 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", metadata !31, metadata !3, metadata !22} ; [ DW_TAG_pointer_type ]
-!22 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", metadata !31, metadata !3, metadata !23} ; [ DW_TAG_pointer_type ]
-!23 = metadata !{metadata !"0x24\00char\000\008\008\000\000\006", metadata !31, metadata !3} ; [ DW_TAG_base_type ]
-!24 = metadata !{metadata !"0x101\00argv\0019\000", metadata !18, metadata !3, metadata !21} ; [ DW_TAG_arg_variable ]
-!25 = metadata !{metadata !"0x100\00a\0020\000", metadata !26, metadata !3, metadata !2} ; [ DW_TAG_auto_variable ]
-!26 = metadata !{metadata !"0xb\0019\000\000", metadata !31, metadata !27} ; [ DW_TAG_lexical_block ]
-!27 = metadata !{metadata !"0xb\0019\000\000", metadata !31, metadata !18} ; [ DW_TAG_lexical_block ]
-!28 = metadata !{metadata !"0x100\00b\0021\000", metadata !26, metadata !3, metadata !7} ; [ DW_TAG_auto_variable ]
-!29 = metadata !{i32 16, i32 0, metadata !30, null}
-!30 = metadata !{metadata !"0xb\0015\000\000", metadata !31, metadata !8} ; [ DW_TAG_lexical_block ]
-!31 = metadata !{metadata !"foo.cp", metadata !"/tmp/"}
-!32 = metadata !{i32 0}
-!33 = metadata !{metadata !1, metadata !8, metadata !18}
-!34 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x101\00this\0011\000", !1, !3, !12} ; [ DW_TAG_arg_variable ]
+!1 = !{!"0x2e\00bar\00bar\00_ZN3foo3barEi\0011\000\001\000\006\000\001\0011", !31, !2, !9, null, i32 (%struct.foo*, i32)* null, null, null, null} ; [ DW_TAG_subprogram ]
+!2 = !{!"0x13\00foo\003\0032\0032\000\000\000", !31, !3, null, !5, null, null, null} ; [ DW_TAG_structure_type ] [foo] [line 3, size 32, align 32, offset 0] [def] [from ]
+!3 = !{!"0x29", !31} ; [ DW_TAG_file_type ]
+!4 = !{!"0x11\004\004.2.1 LLVM build\001\00\000\00\000", !31, !32, !32, !33, null, null} ; [ DW_TAG_compile_unit ]
+!5 = !{!6, !1, !8}
+!6 = !{!"0xd\00y\008\0032\0032\000\000", !31, !2, !7} ; [ DW_TAG_member ]
+!7 = !{!"0x24\00int\000\0032\0032\000\000\005", !31, !3} ; [ DW_TAG_base_type ]
+!8 = !{!"0x2e\00baz\00baz\00_ZN3foo3bazEi\0015\000\001\000\006\000\001\0015", !31, !2, !9, null, i32 (%struct.foo*, i32)* @_ZN3foo3bazEi, null, null, null} ; [ DW_TAG_subprogram ]
+!9 = !{!"0x15\00\000\000\000\000\000\000", !31, !3, null, !10, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!10 = !{!7, !11, !7}
+!11 = !{!"0xf\00\000\0064\0064\000\0064", !31, !3, !2} ; [ DW_TAG_pointer_type ]
+!12 = !{!"0x26\00\000\0064\0064\000\0064", !31, !3, !13} ; [ DW_TAG_const_type ]
+!13 = !{!"0xf\00\000\0064\0064\000\000", !31, !3, !2} ; [ DW_TAG_pointer_type ]
+!14 = !{!"0x101\00x\0011\000", !1, !3, !7} ; [ DW_TAG_arg_variable ]
+!15 = !{!"0x101\00this\0015\000", !8, !3, !12} ; [ DW_TAG_arg_variable ]
+!16 = !{!"0x101\00x\0015\000", !8, !3, !7} ; [ DW_TAG_arg_variable ]
+!17 = !{!"0x101\00argc\0019\000", !18, !3, !7} ; [ DW_TAG_arg_variable ]
+!18 = !{!"0x2e\00main\00main\00main\0019\000\001\000\006\000\001\0019", !31, !3, !19, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!19 = !{!"0x15\00\000\000\000\000\000\000", !31, !3, null, !20, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!20 = !{!7, !7, !21}
+!21 = !{!"0xf\00\000\0064\0064\000\000", !31, !3, !22} ; [ DW_TAG_pointer_type ]
+!22 = !{!"0xf\00\000\0064\0064\000\000", !31, !3, !23} ; [ DW_TAG_pointer_type ]
+!23 = !{!"0x24\00char\000\008\008\000\000\006", !31, !3} ; [ DW_TAG_base_type ]
+!24 = !{!"0x101\00argv\0019\000", !18, !3, !21} ; [ DW_TAG_arg_variable ]
+!25 = !{!"0x100\00a\0020\000", !26, !3, !2} ; [ DW_TAG_auto_variable ]
+!26 = !{!"0xb\0019\000\000", !31, !27} ; [ DW_TAG_lexical_block ]
+!27 = !{!"0xb\0019\000\000", !31, !18} ; [ DW_TAG_lexical_block ]
+!28 = !{!"0x100\00b\0021\000", !26, !3, !7} ; [ DW_TAG_auto_variable ]
+!29 = !MDLocation(line: 16, scope: !30)
+!30 = !{!"0xb\0015\000\000", !31, !8} ; [ DW_TAG_lexical_block ]
+!31 = !{!"foo.cp", !"/tmp/"}
+!32 = !{i32 0}
+!33 = !{!1, !8, !18}
+!34 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll b/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll
index 0f8855d..74a7610 100644
--- a/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll
+++ b/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll
@@ -26,4 +26,4 @@ entry:
declare i32 @printf(i8*, ...)
-!0 = metadata !{i32 191}
+!0 = !{i32 191}
diff --git a/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll b/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
index 0df9dc1..3470a06 100644
--- a/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
+++ b/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
@@ -16,4 +16,4 @@ entry:
declare x86_stdcallcc void @RtlUnwind(...)
-!0 = metadata !{i32 215}
+!0 = !{i32 215}
diff --git a/test/CodeGen/X86/2010-06-28-FastAllocTiedOperand.ll b/test/CodeGen/X86/2010-06-28-FastAllocTiedOperand.ll
index d7bc21f..7cffdc5 100644
--- a/test/CodeGen/X86/2010-06-28-FastAllocTiedOperand.ll
+++ b/test/CodeGen/X86/2010-06-28-FastAllocTiedOperand.ll
@@ -19,4 +19,4 @@ entry:
ret i32 %asmresult
}
-!0 = metadata !{i32 108}
+!0 = !{i32 108}
diff --git a/test/CodeGen/X86/2010-07-06-DbgCrash.ll b/test/CodeGen/X86/2010-07-06-DbgCrash.ll
index 9d65dc1..457c498 100644
--- a/test/CodeGen/X86/2010-07-06-DbgCrash.ll
+++ b/test/CodeGen/X86/2010-07-06-DbgCrash.ll
@@ -3,27 +3,27 @@
@.str = private constant [4 x i8] c"one\00", align 1 ; <[4 x i8]*> [#uses=1]
@.str1 = private constant [4 x i8] c"two\00", align 1 ; <[5 x i8]*> [#uses=1]
@C.9.2167 = internal constant [2 x i8*] [i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i8* getelementptr inbounds ([4 x i8]* @.str1, i64 0, i64 0)]
-!38 = metadata !{metadata !"0x29", metadata !109} ; [ DW_TAG_file_type ]
-!39 = metadata !{metadata !"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build 9999)\001\00\000\00\000", metadata !109, metadata !108, metadata !108, null, null, null} ; [ DW_TAG_compile_unit ]
-!46 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", metadata !109, null, metadata !47} ; [ DW_TAG_pointer_type ]
-!47 = metadata !{metadata !"0x24\00char\000\008\008\000\000\006", metadata !109, null} ; [ DW_TAG_base_type ]
-!97 = metadata !{metadata !"0x2e\00main\00main\00main\0073\000\001\000\006\000\000\000", i32 0, metadata !39, metadata !98, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!98 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !109, null, null, metadata !99, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!99 = metadata !{metadata !100}
-!100 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !109, null} ; [ DW_TAG_base_type ]
-!101 = metadata !{[2 x i8*]* @C.9.2167}
-!102 = metadata !{metadata !"0x100\00find_strings\0075\000", metadata !103, metadata !38, metadata !104} ; [ DW_TAG_auto_variable ]
-!103 = metadata !{metadata !"0xb\0073\000\000", null, metadata !97} ; [ DW_TAG_lexical_block ]
-!104 = metadata !{metadata !"0x1\00\000\0085312\0064\000\000", metadata !109, null, metadata !46, metadata !105, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 85312, align 64, offset 0] [from ]
-!105 = metadata !{metadata !106}
-!106 = metadata !{metadata !"0x21\000\001333"} ; [ DW_TAG_subrange_type ]
-!107 = metadata !{i32 73, i32 0, metadata !103, null}
-!108 = metadata !{i32 0}
-!109 = metadata !{metadata !"pbmsrch.c", metadata !"/Users/grawp/LLVM/test-suite/MultiSource/Benchmarks/MiBench/office-stringsearch"}
+!38 = !{!"0x29", !109} ; [ DW_TAG_file_type ]
+!39 = !{!"0x11\001\004.2.1 (Based on Apple Inc. build 5658) (LLVM build 9999)\001\00\000\00\000", !109, !108, !108, null, null, null} ; [ DW_TAG_compile_unit ]
+!46 = !{!"0xf\00\000\0064\0064\000\000", !109, null, !47} ; [ DW_TAG_pointer_type ]
+!47 = !{!"0x24\00char\000\008\008\000\000\006", !109, null} ; [ DW_TAG_base_type ]
+!97 = !{!"0x2e\00main\00main\00main\0073\000\001\000\006\000\000\000", i32 0, !39, !98, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!98 = !{!"0x15\00\000\000\000\000\000\000", !109, null, null, !99, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!99 = !{!100}
+!100 = !{!"0x24\00int\000\0032\0032\000\000\005", !109, null} ; [ DW_TAG_base_type ]
+!101 = !{[2 x i8*]* @C.9.2167}
+!102 = !{!"0x100\00find_strings\0075\000", !103, !38, !104} ; [ DW_TAG_auto_variable ]
+!103 = !{!"0xb\0073\000\000", null, !97} ; [ DW_TAG_lexical_block ]
+!104 = !{!"0x1\00\000\0085312\0064\000\000", !109, null, !46, !105, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 85312, align 64, offset 0] [from ]
+!105 = !{!106}
+!106 = !{!"0x21\000\001333"} ; [ DW_TAG_subrange_type ]
+!107 = !MDLocation(line: 73, scope: !103)
+!108 = !{i32 0}
+!109 = !{!"pbmsrch.c", !"/Users/grawp/LLVM/test-suite/MultiSource/Benchmarks/MiBench/office-stringsearch"}
define i32 @main() nounwind ssp {
bb.nph:
- tail call void @llvm.dbg.declare(metadata !101, metadata !102, metadata !{metadata !"0x102"}), !dbg !107
+ tail call void @llvm.dbg.declare(metadata [2 x i8*]* @C.9.2167, metadata !102, metadata !{!"0x102"}), !dbg !107
ret i32 0, !dbg !107
}
diff --git a/test/CodeGen/X86/2010-08-04-StackVariable.ll b/test/CodeGen/X86/2010-08-04-StackVariable.ll
index a613939..e3decf0 100644
--- a/test/CodeGen/X86/2010-08-04-StackVariable.ll
+++ b/test/CodeGen/X86/2010-08-04-StackVariable.ll
@@ -6,8 +6,8 @@
define i32 @_Z3fooi4SVal(i32 %i, %struct.SVal* noalias %location) nounwind ssp {
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- call void @llvm.dbg.value(metadata !{i32 %i}, i64 0, metadata !23, metadata !{metadata !"0x102"}), !dbg !24
- call void @llvm.dbg.value(metadata !{%struct.SVal* %location}, i64 0, metadata !25, metadata !{metadata !"0x102"}), !dbg !24
+ call void @llvm.dbg.value(metadata i32 %i, i64 0, metadata !23, metadata !{!"0x102"}), !dbg !24
+ call void @llvm.dbg.value(metadata %struct.SVal* %location, i64 0, metadata !25, metadata !{!"0x102"}), !dbg !24
%0 = icmp ne i32 %i, 0, !dbg !27 ; <i1> [#uses=1]
br i1 %0, label %bb, label %bb1, !dbg !27
@@ -34,7 +34,7 @@ return: ; preds = %bb2
define linkonce_odr void @_ZN4SValC1Ev(%struct.SVal* %this) nounwind ssp align 2 {
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- call void @llvm.dbg.value(metadata !{%struct.SVal* %this}, i64 0, metadata !31, metadata !{metadata !"0x102"}), !dbg !34
+ call void @llvm.dbg.value(metadata %struct.SVal* %this, i64 0, metadata !31, metadata !{!"0x102"}), !dbg !34
%0 = getelementptr inbounds %struct.SVal* %this, i32 0, i32 0, !dbg !34 ; <i8**> [#uses=1]
store i8* null, i8** %0, align 8, !dbg !34
%1 = getelementptr inbounds %struct.SVal* %this, i32 0, i32 1, !dbg !34 ; <i32*> [#uses=1]
@@ -52,7 +52,7 @@ entry:
%0 = alloca %struct.SVal ; <%struct.SVal*> [#uses=3]
%v = alloca %struct.SVal ; <%struct.SVal*> [#uses=4]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- call void @llvm.dbg.declare(metadata !{%struct.SVal* %v}, metadata !38, metadata !{metadata !"0x102"}), !dbg !41
+ call void @llvm.dbg.declare(metadata %struct.SVal* %v, metadata !38, metadata !{!"0x102"}), !dbg !41
call void @_ZN4SValC1Ev(%struct.SVal* %v) nounwind, !dbg !41
%1 = getelementptr inbounds %struct.SVal* %v, i32 0, i32 1, !dbg !42 ; <i32*> [#uses=1]
store i32 1, i32* %1, align 8, !dbg !42
@@ -65,7 +65,7 @@ entry:
%7 = load i32* %6, align 8, !dbg !43 ; <i32> [#uses=1]
store i32 %7, i32* %5, align 8, !dbg !43
%8 = call i32 @_Z3fooi4SVal(i32 2, %struct.SVal* noalias %0) nounwind, !dbg !43 ; <i32> [#uses=0]
- call void @llvm.dbg.value(metadata !{i32 %8}, i64 0, metadata !44, metadata !{metadata !"0x102"}), !dbg !43
+ call void @llvm.dbg.value(metadata i32 %8, i64 0, metadata !44, metadata !{!"0x102"}), !dbg !43
br label %return, !dbg !45
return: ; preds = %entry
@@ -76,54 +76,54 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!3}
!llvm.module.flags = !{!49}
-!46 = metadata !{metadata !16, metadata !17, metadata !20}
+!46 = !{!16, !17, !20}
-!0 = metadata !{metadata !"0x2e\00SVal\00SVal\00\0011\000\000\000\006\000\000\0011", metadata !47, metadata !1, metadata !14, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!1 = metadata !{metadata !"0x13\00SVal\001\00128\0064\000\000\000", metadata !47, metadata !2, null, metadata !4, null, null, null} ; [ DW_TAG_structure_type ] [SVal] [line 1, size 128, align 64, offset 0] [def] [from ]
-!2 = metadata !{metadata !"0x29", metadata !47} ; [ DW_TAG_file_type ]
-!3 = metadata !{metadata !"0x11\004\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\000\00\000\00\001", metadata !47, metadata !48, metadata !48, metadata !46, null, null} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{metadata !5, metadata !7, metadata !0, metadata !9}
-!5 = metadata !{metadata !"0xd\00Data\007\0064\0064\000\000", metadata !47, metadata !1, metadata !6} ; [ DW_TAG_member ]
-!6 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", metadata !47, metadata !2, null} ; [ DW_TAG_pointer_type ]
-!7 = metadata !{metadata !"0xd\00Kind\008\0032\0032\0064\000", metadata !47, metadata !1, metadata !8} ; [ DW_TAG_member ]
-!8 = metadata !{metadata !"0x24\00unsigned int\000\0032\0032\000\000\007", metadata !47, metadata !2} ; [ DW_TAG_base_type ]
-!9 = metadata !{metadata !"0x2e\00~SVal\00~SVal\00\0012\000\000\000\006\000\000\0012", metadata !47, metadata !1, metadata !10, null, null, null, null, null} ; [ DW_TAG_subprogram ]
-!10 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !47, metadata !2, null, metadata !11, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!11 = metadata !{null, metadata !12, metadata !13}
-!12 = metadata !{metadata !"0xf\00\000\0064\0064\000\0064", metadata !47, metadata !2, metadata !1} ; [ DW_TAG_pointer_type ]
-!13 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !47, metadata !2} ; [ DW_TAG_base_type ]
-!14 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !47, metadata !2, null, metadata !15, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!15 = metadata !{null, metadata !12}
-!16 = metadata !{metadata !"0x2e\00SVal\00SVal\00_ZN4SValC1Ev\0011\000\001\000\006\000\000\0011", metadata !47, metadata !1, metadata !14, null, void (%struct.SVal*)* @_ZN4SValC1Ev, null, null, null} ; [ DW_TAG_subprogram ]
-!17 = metadata !{metadata !"0x2e\00foo\00foo\00_Z3fooi4SVal\0016\000\001\000\006\000\000\0016", metadata !47, metadata !2, metadata !18, null, i32 (i32, %struct.SVal*)* @_Z3fooi4SVal, null, null, null} ; [ DW_TAG_subprogram ]
-!18 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !47, metadata !2, null, metadata !19, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!19 = metadata !{metadata !13, metadata !13, metadata !1}
-!20 = metadata !{metadata !"0x2e\00main\00main\00main\0023\000\001\000\006\000\000\0023", metadata !47, metadata !2, metadata !21, null, i32 ()* @main, null, null, null} ; [ DW_TAG_subprogram ]
-!21 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !47, metadata !2, null, metadata !22, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!22 = metadata !{metadata !13}
-!23 = metadata !{metadata !"0x101\00i\0016\000", metadata !17, metadata !2, metadata !13} ; [ DW_TAG_arg_variable ]
-!24 = metadata !{i32 16, i32 0, metadata !17, null}
-!25 = metadata !{metadata !"0x101\00location\0016\000", metadata !17, metadata !2, metadata !26} ; [ DW_TAG_arg_variable ]
-!26 = metadata !{metadata !"0x10\00SVal\000\0064\0064\000\000", metadata !47, metadata !2, metadata !1} ; [ DW_TAG_reference_type ]
-!27 = metadata !{i32 17, i32 0, metadata !28, null}
-!28 = metadata !{metadata !"0xb\0016\000\002", metadata !47, metadata !17} ; [ DW_TAG_lexical_block ]
-!29 = metadata !{i32 18, i32 0, metadata !28, null}
-!30 = metadata !{i32 20, i32 0, metadata !28, null}
-!31 = metadata !{metadata !"0x101\00this\0011\000", metadata !16, metadata !2, metadata !32} ; [ DW_TAG_arg_variable ]
-!32 = metadata !{metadata !"0x26\00\000\0064\0064\000\0064", metadata !47, metadata !2, metadata !33} ; [ DW_TAG_const_type ]
-!33 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", metadata !47, metadata !2, metadata !1} ; [ DW_TAG_pointer_type ]
-!34 = metadata !{i32 11, i32 0, metadata !16, null}
-!35 = metadata !{i32 11, i32 0, metadata !36, null}
-!36 = metadata !{metadata !"0xb\0011\000\001", metadata !47, metadata !37} ; [ DW_TAG_lexical_block ]
-!37 = metadata !{metadata !"0xb\0011\000\000", metadata !47, metadata !16} ; [ DW_TAG_lexical_block ]
-!38 = metadata !{metadata !"0x100\00v\0024\000", metadata !39, metadata !2, metadata !1} ; [ DW_TAG_auto_variable ]
-!39 = metadata !{metadata !"0xb\0023\000\004", metadata !47, metadata !40} ; [ DW_TAG_lexical_block ]
-!40 = metadata !{metadata !"0xb\0023\000\003", metadata !47, metadata !20} ; [ DW_TAG_lexical_block ]
-!41 = metadata !{i32 24, i32 0, metadata !39, null}
-!42 = metadata !{i32 25, i32 0, metadata !39, null}
-!43 = metadata !{i32 26, i32 0, metadata !39, null}
-!44 = metadata !{metadata !"0x100\00k\0026\000", metadata !39, metadata !2, metadata !13} ; [ DW_TAG_auto_variable ]
-!45 = metadata !{i32 27, i32 0, metadata !39, null}
-!47 = metadata !{metadata !"small.cc", metadata !"/Users/manav/R8248330"}
-!48 = metadata !{i32 0}
-!49 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x2e\00SVal\00SVal\00\0011\000\000\000\006\000\000\0011", !47, !1, !14, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!1 = !{!"0x13\00SVal\001\00128\0064\000\000\000", !47, !2, null, !4, null, null, null} ; [ DW_TAG_structure_type ] [SVal] [line 1, size 128, align 64, offset 0] [def] [from ]
+!2 = !{!"0x29", !47} ; [ DW_TAG_file_type ]
+!3 = !{!"0x11\004\004.2.1 (Based on Apple Inc. build 5658) (LLVM build)\000\00\000\00\001", !47, !48, !48, !46, null, null} ; [ DW_TAG_compile_unit ]
+!4 = !{!5, !7, !0, !9}
+!5 = !{!"0xd\00Data\007\0064\0064\000\000", !47, !1, !6} ; [ DW_TAG_member ]
+!6 = !{!"0xf\00\000\0064\0064\000\000", !47, !2, null} ; [ DW_TAG_pointer_type ]
+!7 = !{!"0xd\00Kind\008\0032\0032\0064\000", !47, !1, !8} ; [ DW_TAG_member ]
+!8 = !{!"0x24\00unsigned int\000\0032\0032\000\000\007", !47, !2} ; [ DW_TAG_base_type ]
+!9 = !{!"0x2e\00~SVal\00~SVal\00\0012\000\000\000\006\000\000\0012", !47, !1, !10, null, null, null, null, null} ; [ DW_TAG_subprogram ]
+!10 = !{!"0x15\00\000\000\000\000\000\000", !47, !2, null, !11, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!11 = !{null, !12, !13}
+!12 = !{!"0xf\00\000\0064\0064\000\0064", !47, !2, !1} ; [ DW_TAG_pointer_type ]
+!13 = !{!"0x24\00int\000\0032\0032\000\000\005", !47, !2} ; [ DW_TAG_base_type ]
+!14 = !{!"0x15\00\000\000\000\000\000\000", !47, !2, null, !15, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!15 = !{null, !12}
+!16 = !{!"0x2e\00SVal\00SVal\00_ZN4SValC1Ev\0011\000\001\000\006\000\000\0011", !47, !1, !14, null, void (%struct.SVal*)* @_ZN4SValC1Ev, null, null, null} ; [ DW_TAG_subprogram ]
+!17 = !{!"0x2e\00foo\00foo\00_Z3fooi4SVal\0016\000\001\000\006\000\000\0016", !47, !2, !18, null, i32 (i32, %struct.SVal*)* @_Z3fooi4SVal, null, null, null} ; [ DW_TAG_subprogram ]
+!18 = !{!"0x15\00\000\000\000\000\000\000", !47, !2, null, !19, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!19 = !{!13, !13, !1}
+!20 = !{!"0x2e\00main\00main\00main\0023\000\001\000\006\000\000\0023", !47, !2, !21, null, i32 ()* @main, null, null, null} ; [ DW_TAG_subprogram ]
+!21 = !{!"0x15\00\000\000\000\000\000\000", !47, !2, null, !22, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!22 = !{!13}
+!23 = !{!"0x101\00i\0016\000", !17, !2, !13} ; [ DW_TAG_arg_variable ]
+!24 = !MDLocation(line: 16, scope: !17)
+!25 = !{!"0x101\00location\0016\000", !17, !2, !26} ; [ DW_TAG_arg_variable ]
+!26 = !{!"0x10\00SVal\000\0064\0064\000\000", !47, !2, !1} ; [ DW_TAG_reference_type ]
+!27 = !MDLocation(line: 17, scope: !28)
+!28 = !{!"0xb\0016\000\002", !47, !17} ; [ DW_TAG_lexical_block ]
+!29 = !MDLocation(line: 18, scope: !28)
+!30 = !MDLocation(line: 20, scope: !28)
+!31 = !{!"0x101\00this\0011\000", !16, !2, !32} ; [ DW_TAG_arg_variable ]
+!32 = !{!"0x26\00\000\0064\0064\000\0064", !47, !2, !33} ; [ DW_TAG_const_type ]
+!33 = !{!"0xf\00\000\0064\0064\000\000", !47, !2, !1} ; [ DW_TAG_pointer_type ]
+!34 = !MDLocation(line: 11, scope: !16)
+!35 = !MDLocation(line: 11, scope: !36)
+!36 = !{!"0xb\0011\000\001", !47, !37} ; [ DW_TAG_lexical_block ]
+!37 = !{!"0xb\0011\000\000", !47, !16} ; [ DW_TAG_lexical_block ]
+!38 = !{!"0x100\00v\0024\000", !39, !2, !1} ; [ DW_TAG_auto_variable ]
+!39 = !{!"0xb\0023\000\004", !47, !40} ; [ DW_TAG_lexical_block ]
+!40 = !{!"0xb\0023\000\003", !47, !20} ; [ DW_TAG_lexical_block ]
+!41 = !MDLocation(line: 24, scope: !39)
+!42 = !MDLocation(line: 25, scope: !39)
+!43 = !MDLocation(line: 26, scope: !39)
+!44 = !{!"0x100\00k\0026\000", !39, !2, !13} ; [ DW_TAG_auto_variable ]
+!45 = !MDLocation(line: 27, scope: !39)
+!47 = !{!"small.cc", !"/Users/manav/R8248330"}
+!48 = !{i32 0}
+!49 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/2010-09-16-EmptyFilename.ll b/test/CodeGen/X86/2010-09-16-EmptyFilename.ll
index f52e922..cf9897a 100644
--- a/test/CodeGen/X86/2010-09-16-EmptyFilename.ll
+++ b/test/CodeGen/X86/2010-09-16-EmptyFilename.ll
@@ -15,21 +15,21 @@ entry:
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!17}
-!0 = metadata !{metadata !"0x2e\00foo\00foo\00foo\0053\000\001\000\006\000\000\000", metadata !14, metadata !1, metadata !3, null, i32 ()* @foo, null, null, null} ; [ DW_TAG_subprogram ]
-!1 = metadata !{metadata !"0x29", metadata !14} ; [ DW_TAG_file_type ]
-!2 = metadata !{metadata !"0x11\0012\00clang version 2.9 (trunk 114084)\000\00\000\00\000", metadata !15, metadata !16, metadata !16, metadata !13, null, null} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !14, metadata !1, null, metadata !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5}
-!5 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !14, metadata !1} ; [ DW_TAG_base_type ]
-!6 = metadata !{metadata !"0x2e\00bar\00bar\00bar\004\000\001\000\006\000\000\000", metadata !15, metadata !7, metadata !3, null, i32 ()* @bar, null, null, null} ; [ DW_TAG_subprogram ]
-!7 = metadata !{metadata !"0x29", metadata !15} ; [ DW_TAG_file_type ]
-!8 = metadata !{i32 53, i32 13, metadata !9, null}
-!9 = metadata !{metadata !"0xb\0053\0011\000", metadata !14, metadata !0} ; [ DW_TAG_lexical_block ]
-!10 = metadata !{i32 4, i32 13, metadata !11, null}
-!11 = metadata !{metadata !"0xb\004\0013\002", metadata !15, metadata !12} ; [ DW_TAG_lexical_block ]
-!12 = metadata !{metadata !"0xb\004\0011\001", metadata !15, metadata !6} ; [ DW_TAG_lexical_block ]
-!13 = metadata !{metadata !0, metadata !6}
-!14 = metadata !{metadata !"", metadata !"/private/tmp"}
-!15 = metadata !{metadata !"bug.c", metadata !"/private/tmp"}
-!16 = metadata !{i32 0}
-!17 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x2e\00foo\00foo\00foo\0053\000\001\000\006\000\000\000", !14, !1, !3, null, i32 ()* @foo, null, null, null} ; [ DW_TAG_subprogram ]
+!1 = !{!"0x29", !14} ; [ DW_TAG_file_type ]
+!2 = !{!"0x11\0012\00clang version 2.9 (trunk 114084)\000\00\000\00\000", !15, !16, !16, !13, null, null} ; [ DW_TAG_compile_unit ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", !14, !1, null, !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !{!5}
+!5 = !{!"0x24\00int\000\0032\0032\000\000\005", !14, !1} ; [ DW_TAG_base_type ]
+!6 = !{!"0x2e\00bar\00bar\00bar\004\000\001\000\006\000\000\000", !15, !7, !3, null, i32 ()* @bar, null, null, null} ; [ DW_TAG_subprogram ]
+!7 = !{!"0x29", !15} ; [ DW_TAG_file_type ]
+!8 = !MDLocation(line: 53, column: 13, scope: !9)
+!9 = !{!"0xb\0053\0011\000", !14, !0} ; [ DW_TAG_lexical_block ]
+!10 = !MDLocation(line: 4, column: 13, scope: !11)
+!11 = !{!"0xb\004\0013\002", !15, !12} ; [ DW_TAG_lexical_block ]
+!12 = !{!"0xb\004\0011\001", !15, !6} ; [ DW_TAG_lexical_block ]
+!13 = !{!0, !6}
+!14 = !{!"", !"/private/tmp"}
+!15 = !{!"bug.c", !"/private/tmp"}
+!16 = !{i32 0}
+!17 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/2010-09-16-asmcrash.ll b/test/CodeGen/X86/2010-09-16-asmcrash.ll
index 9bbd691..7aa9f32 100644
--- a/test/CodeGen/X86/2010-09-16-asmcrash.ll
+++ b/test/CodeGen/X86/2010-09-16-asmcrash.ll
@@ -53,4 +53,4 @@ return: ; preds = %while.end, %while.b
ret void
}
-!0 = metadata !{i32 158484}
+!0 = !{i32 158484}
diff --git a/test/CodeGen/X86/2010-11-02-DbgParameter.ll b/test/CodeGen/X86/2010-11-02-DbgParameter.ll
index 53fb0af..df3aa1f 100644
--- a/test/CodeGen/X86/2010-11-02-DbgParameter.ll
+++ b/test/CodeGen/X86/2010-11-02-DbgParameter.ll
@@ -9,7 +9,7 @@ target triple = "i386-apple-darwin11.0.0"
define i32 @foo(%struct.bar* nocapture %i) nounwind readnone optsize noinline ssp {
; CHECK: TAG_formal_parameter
entry:
- tail call void @llvm.dbg.value(metadata !{%struct.bar* %i}, i64 0, metadata !6, metadata !{metadata !"0x102"}), !dbg !12
+ tail call void @llvm.dbg.value(metadata %struct.bar* %i, i64 0, metadata !6, metadata !{!"0x102"}), !dbg !12
ret i32 1, !dbg !13
}
@@ -18,23 +18,23 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!19}
-!0 = metadata !{metadata !"0x2e\00foo\00foo\00\003\000\001\000\006\00256\001\003", metadata !17, metadata !1, metadata !3, null, i32 (%struct.bar*)* @foo, null, null, metadata !16} ; [ DW_TAG_subprogram ]
-!1 = metadata !{metadata !"0x29", metadata !17} ; [ DW_TAG_file_type ]
-!2 = metadata !{metadata !"0x11\0012\00clang version 2.9 (trunk 117922)\001\00\000\00\000", metadata !17, metadata !18, metadata !18, metadata !15, null, null} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !17, metadata !1, null, metadata !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5}
-!5 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !17, metadata !2} ; [ DW_TAG_base_type ]
-!6 = metadata !{metadata !"0x101\00i\003\000", metadata !0, metadata !1, metadata !7} ; [ DW_TAG_arg_variable ]
-!7 = metadata !{metadata !"0xf\00\000\0032\0032\000\000", metadata !17, metadata !1, metadata !8} ; [ DW_TAG_pointer_type ]
-!8 = metadata !{metadata !"0x13\00bar\002\0064\0032\000\000\000", metadata !17, metadata !1, null, metadata !9, null, null, null} ; [ DW_TAG_structure_type ] [bar] [line 2, size 64, align 32, offset 0] [def] [from ]
-!9 = metadata !{metadata !10, metadata !11}
-!10 = metadata !{metadata !"0xd\00x\002\0032\0032\000\000", metadata !17, metadata !1, metadata !5} ; [ DW_TAG_member ]
-!11 = metadata !{metadata !"0xd\00y\002\0032\0032\0032\000", metadata !17, metadata !1, metadata !5} ; [ DW_TAG_member ]
-!12 = metadata !{i32 3, i32 47, metadata !0, null}
-!13 = metadata !{i32 4, i32 2, metadata !14, null}
-!14 = metadata !{metadata !"0xb\003\0050\000", metadata !17, metadata !0} ; [ DW_TAG_lexical_block ]
-!15 = metadata !{metadata !0}
-!16 = metadata !{metadata !6}
-!17 = metadata !{metadata !"one.c", metadata !"/private/tmp"}
-!18 = metadata !{i32 0}
-!19 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x2e\00foo\00foo\00\003\000\001\000\006\00256\001\003", !17, !1, !3, null, i32 (%struct.bar*)* @foo, null, null, !16} ; [ DW_TAG_subprogram ]
+!1 = !{!"0x29", !17} ; [ DW_TAG_file_type ]
+!2 = !{!"0x11\0012\00clang version 2.9 (trunk 117922)\001\00\000\00\000", !17, !18, !18, !15, null, null} ; [ DW_TAG_compile_unit ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", !17, !1, null, !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !{!5}
+!5 = !{!"0x24\00int\000\0032\0032\000\000\005", !17, !2} ; [ DW_TAG_base_type ]
+!6 = !{!"0x101\00i\003\000", !0, !1, !7} ; [ DW_TAG_arg_variable ]
+!7 = !{!"0xf\00\000\0032\0032\000\000", !17, !1, !8} ; [ DW_TAG_pointer_type ]
+!8 = !{!"0x13\00bar\002\0064\0032\000\000\000", !17, !1, null, !9, null, null, null} ; [ DW_TAG_structure_type ] [bar] [line 2, size 64, align 32, offset 0] [def] [from ]
+!9 = !{!10, !11}
+!10 = !{!"0xd\00x\002\0032\0032\000\000", !17, !1, !5} ; [ DW_TAG_member ]
+!11 = !{!"0xd\00y\002\0032\0032\0032\000", !17, !1, !5} ; [ DW_TAG_member ]
+!12 = !MDLocation(line: 3, column: 47, scope: !0)
+!13 = !MDLocation(line: 4, column: 2, scope: !14)
+!14 = !{!"0xb\003\0050\000", !17, !0} ; [ DW_TAG_lexical_block ]
+!15 = !{!0}
+!16 = !{!6}
+!17 = !{!"one.c", !"/private/tmp"}
+!18 = !{i32 0}
+!19 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll b/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
index ac7fbf2..8404020 100644
--- a/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
+++ b/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
@@ -22,8 +22,8 @@ target triple = "x86_64-apple-darwin10.0.0"
define i64 @gcd(i64 %a, i64 %b) nounwind readnone optsize noinline ssp {
entry:
- tail call void @llvm.dbg.value(metadata !{i64 %a}, i64 0, metadata !10, metadata !{metadata !"0x102"}), !dbg !18
- tail call void @llvm.dbg.value(metadata !{i64 %b}, i64 0, metadata !11, metadata !{metadata !"0x102"}), !dbg !19
+ tail call void @llvm.dbg.value(metadata i64 %a, i64 0, metadata !10, metadata !{!"0x102"}), !dbg !18
+ tail call void @llvm.dbg.value(metadata i64 %b, i64 0, metadata !11, metadata !{!"0x102"}), !dbg !19
br label %while.body, !dbg !20
while.body: ; preds = %while.body, %entry
@@ -34,14 +34,14 @@ while.body: ; preds = %while.body, %entry
br i1 %cmp, label %if.then, label %while.body, !dbg !23
if.then: ; preds = %while.body
- tail call void @llvm.dbg.value(metadata !{i64 %rem}, i64 0, metadata !12, metadata !{metadata !"0x102"}), !dbg !21
+ tail call void @llvm.dbg.value(metadata i64 %rem, i64 0, metadata !12, metadata !{!"0x102"}), !dbg !21
ret i64 %b.addr.0, !dbg !23
}
define i32 @main() nounwind optsize ssp {
entry:
%call = tail call i32 @rand() nounwind optsize, !dbg !24
- tail call void @llvm.dbg.value(metadata !{i32 %call}, i64 0, metadata !14, metadata !{metadata !"0x102"}), !dbg !24
+ tail call void @llvm.dbg.value(metadata i32 %call, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !24
%cmp = icmp ugt i32 %call, 21, !dbg !25
br i1 %cmp, label %cond.true, label %cond.end, !dbg !25
@@ -51,7 +51,7 @@ cond.true: ; preds = %entry
cond.end: ; preds = %entry, %cond.true
%cond = phi i32 [ %call1, %cond.true ], [ %call, %entry ], !dbg !25
- tail call void @llvm.dbg.value(metadata !{i32 %cond}, i64 0, metadata !17, metadata !{metadata !"0x102"}), !dbg !25
+ tail call void @llvm.dbg.value(metadata i32 %cond, i64 0, metadata !17, metadata !{!"0x102"}), !dbg !25
%conv = sext i32 %cond to i64, !dbg !26
%conv5 = zext i32 %call to i64, !dbg !26
%call6 = tail call i64 @gcd(i64 %conv, i64 %conv5) optsize, !dbg !26
@@ -78,37 +78,37 @@ declare i32 @puts(i8* nocapture) nounwind
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!33}
-!0 = metadata !{metadata !"0x2e\00gcd\00gcd\00\005\000\001\000\006\00256\001\000", metadata !31, metadata !1, metadata !3, null, i64 (i64, i64)* @gcd, null, null, metadata !29} ; [ DW_TAG_subprogram ] [line 5] [def] [scope 0] [gcd]
-!1 = metadata !{metadata !"0x29", metadata !31} ; [ DW_TAG_file_type ]
-!2 = metadata !{metadata !"0x11\0012\00clang version 2.9 (trunk 124117)\001\00\000\00\001", metadata !31, metadata !32, metadata !32, metadata !28, null, null} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !31, metadata !1, null, metadata !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5}
-!5 = metadata !{metadata !"0x24\00long int\000\0064\0064\000\000\005", null, metadata !2} ; [ DW_TAG_base_type ]
-!6 = metadata !{metadata !"0x2e\00main\00main\00\0025\000\001\000\006\000\001\000", metadata !31, metadata !1, metadata !7, null, i32 ()* @main, null, null, metadata !30} ; [ DW_TAG_subprogram ] [line 25] [def] [scope 0] [main]
-!7 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !31, metadata !1, null, metadata !8, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!8 = metadata !{metadata !9}
-!9 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, metadata !2} ; [ DW_TAG_base_type ]
-!10 = metadata !{metadata !"0x101\00a\005\000", metadata !0, metadata !1, metadata !5} ; [ DW_TAG_arg_variable ]
-!11 = metadata !{metadata !"0x101\00b\005\000", metadata !0, metadata !1, metadata !5} ; [ DW_TAG_arg_variable ]
-!12 = metadata !{metadata !"0x100\00c\006\000", metadata !13, metadata !1, metadata !5} ; [ DW_TAG_auto_variable ]
-!13 = metadata !{metadata !"0xb\005\0052\000", metadata !31, metadata !0} ; [ DW_TAG_lexical_block ]
-!14 = metadata !{metadata !"0x100\00m\0026\000", metadata !15, metadata !1, metadata !16} ; [ DW_TAG_auto_variable ]
-!15 = metadata !{metadata !"0xb\0025\0012\002", metadata !31, metadata !6} ; [ DW_TAG_lexical_block ]
-!16 = metadata !{metadata !"0x24\00unsigned int\000\0032\0032\000\000\007", null, metadata !2} ; [ DW_TAG_base_type ]
-!17 = metadata !{metadata !"0x100\00z_s\0027\000", metadata !15, metadata !1, metadata !9} ; [ DW_TAG_auto_variable ]
-!18 = metadata !{i32 5, i32 41, metadata !0, null}
-!19 = metadata !{i32 5, i32 49, metadata !0, null}
-!20 = metadata !{i32 7, i32 5, metadata !13, null}
-!21 = metadata !{i32 8, i32 9, metadata !22, null}
-!22 = metadata !{metadata !"0xb\007\0014\001", metadata !31, metadata !13} ; [ DW_TAG_lexical_block ]
-!23 = metadata !{i32 9, i32 9, metadata !22, null}
-!24 = metadata !{i32 26, i32 38, metadata !15, null}
-!25 = metadata !{i32 27, i32 38, metadata !15, null}
-!26 = metadata !{i32 28, i32 9, metadata !15, null}
-!27 = metadata !{i32 30, i32 1, metadata !15, null}
-!28 = metadata !{metadata !0, metadata !6}
-!29 = metadata !{metadata !10, metadata !11, metadata !12}
-!30 = metadata !{metadata !14, metadata !17}
-!31 = metadata !{metadata !"rem_small.c", metadata !"/private/tmp"}
-!32 = metadata !{i32 0}
-!33 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x2e\00gcd\00gcd\00\005\000\001\000\006\00256\001\000", !31, !1, !3, null, i64 (i64, i64)* @gcd, null, null, !29} ; [ DW_TAG_subprogram ] [line 5] [def] [scope 0] [gcd]
+!1 = !{!"0x29", !31} ; [ DW_TAG_file_type ]
+!2 = !{!"0x11\0012\00clang version 2.9 (trunk 124117)\001\00\000\00\001", !31, !32, !32, !28, null, null} ; [ DW_TAG_compile_unit ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", !31, !1, null, !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !{!5}
+!5 = !{!"0x24\00long int\000\0064\0064\000\000\005", null, !2} ; [ DW_TAG_base_type ]
+!6 = !{!"0x2e\00main\00main\00\0025\000\001\000\006\000\001\000", !31, !1, !7, null, i32 ()* @main, null, null, !30} ; [ DW_TAG_subprogram ] [line 25] [def] [scope 0] [main]
+!7 = !{!"0x15\00\000\000\000\000\000\000", !31, !1, null, !8, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = !{!9}
+!9 = !{!"0x24\00int\000\0032\0032\000\000\005", null, !2} ; [ DW_TAG_base_type ]
+!10 = !{!"0x101\00a\005\000", !0, !1, !5} ; [ DW_TAG_arg_variable ]
+!11 = !{!"0x101\00b\005\000", !0, !1, !5} ; [ DW_TAG_arg_variable ]
+!12 = !{!"0x100\00c\006\000", !13, !1, !5} ; [ DW_TAG_auto_variable ]
+!13 = !{!"0xb\005\0052\000", !31, !0} ; [ DW_TAG_lexical_block ]
+!14 = !{!"0x100\00m\0026\000", !15, !1, !16} ; [ DW_TAG_auto_variable ]
+!15 = !{!"0xb\0025\0012\002", !31, !6} ; [ DW_TAG_lexical_block ]
+!16 = !{!"0x24\00unsigned int\000\0032\0032\000\000\007", null, !2} ; [ DW_TAG_base_type ]
+!17 = !{!"0x100\00z_s\0027\000", !15, !1, !9} ; [ DW_TAG_auto_variable ]
+!18 = !MDLocation(line: 5, column: 41, scope: !0)
+!19 = !MDLocation(line: 5, column: 49, scope: !0)
+!20 = !MDLocation(line: 7, column: 5, scope: !13)
+!21 = !MDLocation(line: 8, column: 9, scope: !22)
+!22 = !{!"0xb\007\0014\001", !31, !13} ; [ DW_TAG_lexical_block ]
+!23 = !MDLocation(line: 9, column: 9, scope: !22)
+!24 = !MDLocation(line: 26, column: 38, scope: !15)
+!25 = !MDLocation(line: 27, column: 38, scope: !15)
+!26 = !MDLocation(line: 28, column: 9, scope: !15)
+!27 = !MDLocation(line: 30, column: 1, scope: !15)
+!28 = !{!0, !6}
+!29 = !{!10, !11, !12}
+!30 = !{!14, !17}
+!31 = !{!"rem_small.c", !"/private/tmp"}
+!32 = !{i32 0}
+!33 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/2011-06-14-mmx-inlineasm.ll b/test/CodeGen/X86/2011-06-14-mmx-inlineasm.ll
index 445fc01..b764da1 100644
--- a/test/CodeGen/X86/2011-06-14-mmx-inlineasm.ll
+++ b/test/CodeGen/X86/2011-06-14-mmx-inlineasm.ll
@@ -41,5 +41,5 @@ entry:
declare void @llvm.x86.mmx.emms() nounwind
-!0 = metadata !{i32 888, i32 917, i32 945, i32 973, i32 1001, i32 1029, i32 1057}
-!1 = metadata !{i32 1390, i32 1430, i32 1469, i32 1508, i32 1547, i32 1586, i32 1625, i32 1664}
+!0 = !{i32 888, i32 917, i32 945, i32 973, i32 1001, i32 1029, i32 1057}
+!1 = !{i32 1390, i32 1430, i32 1469, i32 1508, i32 1547, i32 1586, i32 1625, i32 1664}
diff --git a/test/CodeGen/X86/2011-10-19-widen_vselect.ll b/test/CodeGen/X86/2011-10-19-widen_vselect.ll
index 222068d..7eaa5bb 100644
--- a/test/CodeGen/X86/2011-10-19-widen_vselect.ll
+++ b/test/CodeGen/X86/2011-10-19-widen_vselect.ll
@@ -26,7 +26,7 @@ entry:
}
; CHECK-LABEL: zero_test
-; CHECK: xorps %xmm0, %xmm0
+; CHECK: pxor %xmm0, %xmm0
; CHECK: ret
define void @zero_test() {
diff --git a/test/CodeGen/X86/2011-11-30-or.ll b/test/CodeGen/X86/2011-11-30-or.ll
index 8ac4632..4260e81 100644
--- a/test/CodeGen/X86/2011-11-30-or.ll
+++ b/test/CodeGen/X86/2011-11-30-or.ll
@@ -2,13 +2,13 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
target triple = "x86_64-apple-macosx10.6.6"
-
-; Test that the order of operands is correct
-; CHECK: select_func
-; CHECK: pblendvb %xmm1, %xmm2
-; CHECK: ret
-
-define void @select_func(<8 x i16> %in) {
+
+; Test that the order of operands is correct
+; CHECK: select_func
+; CHECK: pblendvb {{LCPI0_[0-9]*}}(%rip), %xmm1
+; CHECK: ret
+
+define void @select_func(<8 x i16> %in) {
entry:
%c.lobit.i.i.i = ashr <8 x i16> %in, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
%and.i56.i.i.i = and <8 x i16> %c.lobit.i.i.i, <i16 25, i16 8, i16 65, i16 25, i16 8, i16 95, i16 15, i16 45>
diff --git a/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll b/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll
index cd8a16f..b78c13f 100644
--- a/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll
+++ b/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i686-linux -mattr=-sse | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mtriple=i686-linux -mattr=-sse | FileCheck %s
; PR11768
@ptr = external global i8*
diff --git a/test/CodeGen/X86/2012-05-19-avx2-store.ll b/test/CodeGen/X86/2012-05-19-avx2-store.ll
deleted file mode 100644
index 1c1e8e2..0000000
--- a/test/CodeGen/X86/2012-05-19-avx2-store.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx2 | FileCheck %s
-
-define void @double_save(<4 x i32>* %Ap, <4 x i32>* %Bp, <8 x i32>* %P) nounwind ssp {
-entry:
- ; CHECK: vmovaps
- ; CHECK: vinsertf128 $1, ([[A0:%rdi|%rsi]]),
- ; CHECK: vmovups
- %A = load <4 x i32>* %Ap
- %B = load <4 x i32>* %Bp
- %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- store <8 x i32> %Z, <8 x i32>* %P, align 16
- ret void
-}
diff --git a/test/CodeGen/X86/2012-07-15-broadcastfold.ll b/test/CodeGen/X86/2012-07-15-broadcastfold.ll
index 519c7ca..1c39c74 100644
--- a/test/CodeGen/X86/2012-07-15-broadcastfold.ll
+++ b/test/CodeGen/X86/2012-07-15-broadcastfold.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s -march=x86 -mcpu=corei7 -mattr=+avx2 | FileCheck %s
-; RUN: llc < %s -march=x86 -mcpu=corei7 -mattr=+avx2 -x86-experimental-vector-shuffle-lowering | FileCheck %s
declare x86_fastcallcc i64 @barrier()
diff --git a/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll b/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
index 1a5efda..c33b48d 100644
--- a/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
+++ b/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
@@ -16,7 +16,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
define signext i16 @subdivp(%struct.node.0.27* nocapture %p, double %dsq, double %tolsq, %struct.hgstruct.2.29* nocapture byval align 8 %hg) nounwind uwtable readonly ssp {
entry:
- call void @llvm.dbg.declare(metadata !{%struct.hgstruct.2.29* %hg}, metadata !4, metadata !{metadata !"0x102"})
+ call void @llvm.dbg.declare(metadata %struct.hgstruct.2.29* %hg, metadata !4, metadata !{!"0x102"})
%type = getelementptr inbounds %struct.node.0.27* %p, i64 0, i32 0
%0 = load i16* %type, align 2
%cmp = icmp eq i16 %0, 1
@@ -38,15 +38,15 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!12}
-!0 = metadata !{metadata !"0x11\0012\00clang version 3.3 (trunk 168918) (llvm/trunk 168920)\001\00\000\00\000", metadata !11, metadata !2, metadata !2, metadata !13, metadata !2, null} ; [ DW_TAG_compile_unit ] [MultiSource/Benchmarks/Olden/bh/newbh.c] [DW_LANG_C99]
-!2 = metadata !{}
-!4 = metadata !{metadata !"0x101\00hg\0067109589\000", null, metadata !5, metadata !6} ; [ DW_TAG_arg_variable ] [hg] [line 725]
-!5 = metadata !{metadata !"0x29", metadata !11} ; [ DW_TAG_file_type ]
-!6 = metadata !{metadata !"0x16\00hgstruct\00492\000\000\000\000", metadata !11, null, metadata !7} ; [ DW_TAG_typedef ] [hgstruct] [line 492, size 0, align 0, offset 0] [from ]
-!7 = metadata !{metadata !"0x13\00\00487\00512\0064\000\000\000", metadata !11, null, null, null, null, i32 0, null} ; [ DW_TAG_structure_type ] [line 487, size 512, align 64, offset 0] [def] [from ]
-!11 = metadata !{metadata !"MultiSource/Benchmarks/Olden/bh/newbh.c", metadata !"MultiSource/Benchmarks/Olden/bh"}
-!12 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
-!13 = metadata !{metadata !14}
-!14 = metadata !{metadata !"0x2e\00subdivp\00subdivp\00\000\000\001\000\006\00256\001\001", metadata !11, metadata !5, metadata !15, null, i16 (%struct.node.0.27*, double, double, %struct.hgstruct.2.29* )* @subdivp, null, null, null} ; [ DW_TAG_subprogram ] [def] [subdivp]
-!15 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !16, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!16 = metadata !{null}
+!0 = !{!"0x11\0012\00clang version 3.3 (trunk 168918) (llvm/trunk 168920)\001\00\000\00\000", !11, !2, !2, !13, !2, null} ; [ DW_TAG_compile_unit ] [MultiSource/Benchmarks/Olden/bh/newbh.c] [DW_LANG_C99]
+!2 = !{}
+!4 = !{!"0x101\00hg\0067109589\000", null, !5, !6} ; [ DW_TAG_arg_variable ] [hg] [line 725]
+!5 = !{!"0x29", !11} ; [ DW_TAG_file_type ]
+!6 = !{!"0x16\00hgstruct\00492\000\000\000\000", !11, null, !7} ; [ DW_TAG_typedef ] [hgstruct] [line 492, size 0, align 0, offset 0] [from ]
+!7 = !{!"0x13\00\00487\00512\0064\000\000\000", !11, null, null, null, null, i32 0, null} ; [ DW_TAG_structure_type ] [line 487, size 512, align 64, offset 0] [def] [from ]
+!11 = !{!"MultiSource/Benchmarks/Olden/bh/newbh.c", !"MultiSource/Benchmarks/Olden/bh"}
+!12 = !{i32 1, !"Debug Info Version", i32 2}
+!13 = !{!14}
+!14 = !{!"0x2e\00subdivp\00subdivp\00\000\000\001\000\006\00256\001\001", !11, !5, !15, null, i16 (%struct.node.0.27*, double, double, %struct.hgstruct.2.29* )* @subdivp, null, null, null} ; [ DW_TAG_subprogram ] [def] [subdivp]
+!15 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !16, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!16 = !{null}
diff --git a/test/CodeGen/X86/2012-11-30-misched-dbg.ll b/test/CodeGen/X86/2012-11-30-misched-dbg.ll
index 083aacd..28ceb2f 100644
--- a/test/CodeGen/X86/2012-11-30-misched-dbg.ll
+++ b/test/CodeGen/X86/2012-11-30-misched-dbg.ll
@@ -43,7 +43,7 @@ if.then3344:
br label %if.then4073
if.then4073: ; preds = %if.then3344
- call void @llvm.dbg.declare(metadata !{[20 x i8]* %num14075}, metadata !4, metadata !{metadata !"0x102"})
+ call void @llvm.dbg.declare(metadata [20 x i8]* %num14075, metadata !4, metadata !{!"0x102"})
%arraydecay4078 = getelementptr inbounds [20 x i8]* %num14075, i64 0, i64 0
%0 = load i32* undef, align 4
%add4093 = add nsw i32 %0, 0
@@ -65,30 +65,30 @@ declare i32 @__sprintf_chk(i8*, i32, i64, i8*, ...)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!35}
-!0 = metadata !{metadata !"0x11\0012\00clang version 3.3 (trunk 168918) (llvm/trunk 168920)\001\00\000\00\000", metadata !19, metadata !2, metadata !2, metadata !20, metadata !2, null} ; [ DW_TAG_compile_unit ] [MultiSource/Benchmarks/MiBench/consumer-typeset/MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c] [DW_LANG_C99]
-!1 = metadata !{metadata !2}
-!2 = metadata !{}
-!4 = metadata !{metadata !"0x100\00num1\00815\000", metadata !5, metadata !14, metadata !15} ; [ DW_TAG_auto_variable ] [num1] [line 815]
-!5 = metadata !{metadata !"0xb\00815\000\00177", metadata !14, metadata !6} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!6 = metadata !{metadata !"0xb\00812\000\00176", metadata !14, metadata !7} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!7 = metadata !{metadata !"0xb\00807\000\00175", metadata !14, metadata !8} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!8 = metadata !{metadata !"0xb\00440\000\0094", metadata !14, metadata !9} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!9 = metadata !{metadata !"0xb\00435\000\0091", metadata !14, metadata !10} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!10 = metadata !{metadata !"0xb\00434\000\0090", metadata !14, metadata !11} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!11 = metadata !{metadata !"0xb\00250\000\0024", metadata !14, metadata !12} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!12 = metadata !{metadata !"0xb\00249\000\0023", metadata !14, metadata !13} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!13 = metadata !{metadata !"0xb\00221\000\0019", metadata !14, metadata !2} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
-!14 = metadata !{metadata !"0x29", metadata !19} ; [ DW_TAG_file_type ]
-!15 = metadata !{metadata !"0x1\00\000\00160\008\000\000", null, null, metadata !16, metadata !17, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 160, align 8, offset 0] [from char]
-!16 = metadata !{metadata !"0x24\00char\000\008\008\000\000\006", null, null} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
-!17 = metadata !{metadata !18}
-!18 = metadata !{metadata !"0x21\000\0020"} ; [ DW_TAG_subrange_type ] [0, 19]
-!19 = metadata !{metadata !"MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c", metadata !"MultiSource/Benchmarks/MiBench/consumer-typeset"}
-
-!20 = metadata !{metadata !21}
-!21 = metadata !{metadata !"0x2e\00AttachGalley\00AttachGalley\00\000\000\001\000\006\00256\001\001", metadata !19, metadata !14, metadata !22, null, i32 (%union.rec**)* @AttachGalley, null, null, null} ; [ DW_TAG_subprogram ] [def] [AttachGalley]
-!22 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !23, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!23 = metadata !{null}
+!0 = !{!"0x11\0012\00clang version 3.3 (trunk 168918) (llvm/trunk 168920)\001\00\000\00\000", !19, !2, !2, !20, !2, null} ; [ DW_TAG_compile_unit ] [MultiSource/Benchmarks/MiBench/consumer-typeset/MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c] [DW_LANG_C99]
+!1 = !{!2}
+!2 = !{}
+!4 = !{!"0x100\00num1\00815\000", !5, !14, !15} ; [ DW_TAG_auto_variable ] [num1] [line 815]
+!5 = !{!"0xb\00815\000\00177", !14, !6} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!6 = !{!"0xb\00812\000\00176", !14, !7} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!7 = !{!"0xb\00807\000\00175", !14, !8} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!8 = !{!"0xb\00440\000\0094", !14, !9} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!9 = !{!"0xb\00435\000\0091", !14, !10} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!10 = !{!"0xb\00434\000\0090", !14, !11} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!11 = !{!"0xb\00250\000\0024", !14, !12} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!12 = !{!"0xb\00249\000\0023", !14, !13} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!13 = !{!"0xb\00221\000\0019", !14, !2} ; [ DW_TAG_lexical_block ] [MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c]
+!14 = !{!"0x29", !19} ; [ DW_TAG_file_type ]
+!15 = !{!"0x1\00\000\00160\008\000\000", null, null, !16, !17, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 160, align 8, offset 0] [from char]
+!16 = !{!"0x24\00char\000\008\008\000\000\006", null, null} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
+!17 = !{!18}
+!18 = !{!"0x21\000\0020"} ; [ DW_TAG_subrange_type ] [0, 19]
+!19 = !{!"MultiSource/Benchmarks/MiBench/consumer-typeset/z19.c", !"MultiSource/Benchmarks/MiBench/consumer-typeset"}
+
+!20 = !{!21}
+!21 = !{!"0x2e\00AttachGalley\00AttachGalley\00\000\000\001\000\006\00256\001\001", !19, !14, !22, null, i32 (%union.rec**)* @AttachGalley, null, null, null} ; [ DW_TAG_subprogram ] [def] [AttachGalley]
+!22 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !23, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!23 = !{null}
; Test DebugValue uses visited by RegisterPressureTracker findUseBetween().
;
@@ -108,7 +108,7 @@ cond.true: ; preds = %entry
unreachable
cond.end: ; preds = %entry
- call void @llvm.dbg.declare(metadata !{%"class.__gnu_cxx::hash_map"* %X}, metadata !31, metadata !{metadata !"0x102"})
+ call void @llvm.dbg.declare(metadata %"class.__gnu_cxx::hash_map"* %X, metadata !31, metadata !{!"0x102"})
%_M_num_elements.i.i.i.i = getelementptr inbounds %"class.__gnu_cxx::hash_map"* %X, i64 0, i32 0, i32 5
invoke void @_Znwm()
to label %exit.i unwind label %lpad2.i.i.i.i
@@ -134,11 +134,11 @@ declare void @_Znwm()
!llvm.dbg.cu = !{!30}
-!30 = metadata !{metadata !"0x11\004\00clang version 3.3 (trunk 169129) (llvm/trunk 169135)\001\00\000\00\000", metadata !34, metadata !2, metadata !2, metadata !36, null, null} ; [ DW_TAG_compile_unit ] [SingleSource/Benchmarks/Shootout-C++/hash.cpp] [DW_LANG_C_plus_plus]
-!31 = metadata !{metadata !"0x100\00X\0029\000", null, null, metadata !32} ; [ DW_TAG_auto_variable ] [X] [line 29]
-!32 = metadata !{metadata !"0x16\00HM\0028\000\000\000\000", metadata !34, null, null} ; [ DW_TAG_typedef ] [HM] [line 28, size 0, align 0, offset 0] [from ]
-!33 = metadata !{metadata !"0x29", metadata !34} ; [ DW_TAG_file_type ]
-!34 = metadata !{metadata !"SingleSource/Benchmarks/Shootout-C++/hash.cpp", metadata !"SingleSource/Benchmarks/Shootout-C++"}
-!35 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
-!36 = metadata !{metadata !37}
-!37 = metadata !{metadata !"0x2e\00main\00main\00\000\000\001\000\006\00256\001\001", metadata !19, metadata !14, metadata !22, null, void ()* @main, null, null, null} ; [ DW_TAG_subprogram ] [def] [main]
+!30 = !{!"0x11\004\00clang version 3.3 (trunk 169129) (llvm/trunk 169135)\001\00\000\00\000", !34, !2, !2, !36, null, null} ; [ DW_TAG_compile_unit ] [SingleSource/Benchmarks/Shootout-C++/hash.cpp] [DW_LANG_C_plus_plus]
+!31 = !{!"0x100\00X\0029\000", null, null, !32} ; [ DW_TAG_auto_variable ] [X] [line 29]
+!32 = !{!"0x16\00HM\0028\000\000\000\000", !34, null, null} ; [ DW_TAG_typedef ] [HM] [line 28, size 0, align 0, offset 0] [from ]
+!33 = !{!"0x29", !34} ; [ DW_TAG_file_type ]
+!34 = !{!"SingleSource/Benchmarks/Shootout-C++/hash.cpp", !"SingleSource/Benchmarks/Shootout-C++"}
+!35 = !{i32 1, !"Debug Info Version", i32 2}
+!36 = !{!37}
+!37 = !{!"0x2e\00main\00main\00\000\000\001\000\006\00256\001\001", !19, !14, !22, null, void ()* @main, null, null, null} ; [ DW_TAG_subprogram ] [def] [main]
diff --git a/test/CodeGen/X86/2012-11-30-regpres-dbg.ll b/test/CodeGen/X86/2012-11-30-regpres-dbg.ll
index 458ce4f..04b3174 100644
--- a/test/CodeGen/X86/2012-11-30-regpres-dbg.ll
+++ b/test/CodeGen/X86/2012-11-30-regpres-dbg.ll
@@ -20,7 +20,7 @@ if.then: ; preds = %entry
unreachable
if.end: ; preds = %entry
- call void @llvm.dbg.declare(metadata !{%struct.btCompoundLeafCallback* %callback}, metadata !3, metadata !{metadata !"0x102"})
+ call void @llvm.dbg.declare(metadata %struct.btCompoundLeafCallback* %callback, metadata !3, metadata !{!"0x102"})
%m = getelementptr inbounds %struct.btCompoundLeafCallback* %callback, i64 0, i32 1
store i32 0, i32* undef, align 8
%cmp12447 = icmp sgt i32 undef, 0
@@ -36,13 +36,13 @@ invoke.cont44: ; preds = %if.end
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!8}
-!0 = metadata !{metadata !"0x11\004\00clang version 3.3 (trunk 168984) (llvm/trunk 168983)\001\00\000\00\000", metadata !6, null, null, metadata !1, null, null} ; [ DW_TAG_compile_unit ] [MultiSource/Benchmarks/Bullet/MultiSource/Benchmarks/Bullet/btCompoundCollisionAlgorithm.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{metadata !2}
-!2 = metadata !{metadata !"0x2e\00test\00test\00\000\000\001\000\006\00256\001\001", metadata !6, metadata !5, metadata !7, null, void ()* @test, null, null, null} ; [ DW_TAG_subprogram ] [def] [test]
-!3 = metadata !{metadata !"0x100\00callback\00214\000", null, null, metadata !4} ; [ DW_TAG_auto_variable ] [callback] [line 214]
-!4 = metadata !{metadata !"0x13\00btCompoundLeafCallback\0090\00512\0064\000\000\000", metadata !6, null, null, null, null, null, null} ; [ DW_TAG_structure_type ] [btCompoundLeafCallback] [line 90, size 512, align 64, offset 0] [def] [from ]
-!5 = metadata !{metadata !"0x29", metadata !6} ; [ DW_TAG_file_type ]
-!6 = metadata !{metadata !"MultiSource/Benchmarks/Bullet/btCompoundCollisionAlgorithm.cpp", metadata !"MultiSource/Benchmarks/Bullet"}
-!7 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !9, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!8 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
-!9 = metadata !{null}
+!0 = !{!"0x11\004\00clang version 3.3 (trunk 168984) (llvm/trunk 168983)\001\00\000\00\000", !6, null, null, !1, null, null} ; [ DW_TAG_compile_unit ] [MultiSource/Benchmarks/Bullet/MultiSource/Benchmarks/Bullet/btCompoundCollisionAlgorithm.cpp] [DW_LANG_C_plus_plus]
+!1 = !{!2}
+!2 = !{!"0x2e\00test\00test\00\000\000\001\000\006\00256\001\001", !6, !5, !7, null, void ()* @test, null, null, null} ; [ DW_TAG_subprogram ] [def] [test]
+!3 = !{!"0x100\00callback\00214\000", null, null, !4} ; [ DW_TAG_auto_variable ] [callback] [line 214]
+!4 = !{!"0x13\00btCompoundLeafCallback\0090\00512\0064\000\000\000", !6, null, null, null, null, null, null} ; [ DW_TAG_structure_type ] [btCompoundLeafCallback] [line 90, size 512, align 64, offset 0] [def] [from ]
+!5 = !{!"0x29", !6} ; [ DW_TAG_file_type ]
+!6 = !{!"MultiSource/Benchmarks/Bullet/btCompoundCollisionAlgorithm.cpp", !"MultiSource/Benchmarks/Bullet"}
+!7 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !9, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = !{i32 1, !"Debug Info Version", i32 2}
+!9 = !{null}
diff --git a/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll b/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll
index 10dc927..9cd150a 100644
--- a/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll
+++ b/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll
@@ -41,7 +41,7 @@ entry:
i1 false, label %label_end
]
default:
- unreachable
+ br label %label_end
label_true:
br label %label_end
@@ -80,7 +80,7 @@ entry:
i1 false, label %label_end
]
default:
- unreachable
+ br label %label_end
label_true:
br label %label_end
@@ -119,7 +119,7 @@ entry:
i1 false, label %label_end
]
default:
- unreachable
+ br label %label_end
label_true:
br label %label_end
diff --git a/test/CodeGen/X86/MachineBranchProb.ll b/test/CodeGen/X86/MachineBranchProb.ll
index a893152..cf41ef2 100644
--- a/test/CodeGen/X86/MachineBranchProb.ll
+++ b/test/CodeGen/X86/MachineBranchProb.ll
@@ -31,4 +31,4 @@ for.inc20: ; preds = %for.cond2
ret void
}
-!0 = metadata !{metadata !"branch_weights", i32 112017436, i32 -735157296}
+!0 = !{!"branch_weights", i32 112017436, i32 -735157296}
diff --git a/test/CodeGen/X86/MachineSink-DbgValue.ll b/test/CodeGen/X86/MachineSink-DbgValue.ll
index 54d8f65..3a2c58f 100644
--- a/test/CodeGen/X86/MachineSink-DbgValue.ll
+++ b/test/CodeGen/X86/MachineSink-DbgValue.ll
@@ -4,10 +4,10 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "x86_64-apple-macosx10.7.0"
define i32 @foo(i32 %i, i32* nocapture %c) nounwind uwtable readonly ssp {
- tail call void @llvm.dbg.value(metadata !{i32 %i}, i64 0, metadata !6, metadata !{metadata !"0x102"}), !dbg !12
+ tail call void @llvm.dbg.value(metadata i32 %i, i64 0, metadata !6, metadata !{!"0x102"}), !dbg !12
%ab = load i32* %c, align 1, !dbg !14
- tail call void @llvm.dbg.value(metadata !{i32* %c}, i64 0, metadata !7, metadata !{metadata !"0x102"}), !dbg !13
- tail call void @llvm.dbg.value(metadata !{i32 %ab}, i64 0, metadata !10, metadata !{metadata !"0x102"}), !dbg !14
+ tail call void @llvm.dbg.value(metadata i32* %c, i64 0, metadata !7, metadata !{!"0x102"}), !dbg !13
+ tail call void @llvm.dbg.value(metadata i32 %ab, i64 0, metadata !10, metadata !{!"0x102"}), !dbg !14
%cd = icmp eq i32 %i, 42, !dbg !15
br i1 %cd, label %bb1, label %bb2, !dbg !15
@@ -28,26 +28,26 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) nounwind readnon
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!22}
-!0 = metadata !{metadata !"0x11\0012\00Apple clang version 3.0 (tags/Apple/clang-211.10.1) (based on LLVM 3.0svn)\001\00\000\00\001", metadata !20, metadata !21, metadata !21, metadata !18, null, null} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{metadata !"0x2e\00foo\00foo\00\002\000\001\000\006\00256\001\000", metadata !20, metadata !2, metadata !3, null, i32 (i32, i32*)* @foo, null, null, metadata !19} ; [ DW_TAG_subprogram ] [line 2] [def] [scope 0] [foo]
-!2 = metadata !{metadata !"0x29", metadata !20} ; [ DW_TAG_file_type ]
-!3 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !20, metadata !2, null, metadata !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!4 = metadata !{metadata !5}
-!5 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, metadata !0} ; [ DW_TAG_base_type ]
-!6 = metadata !{metadata !"0x101\00i\0016777218\000", metadata !1, metadata !2, metadata !5} ; [ DW_TAG_arg_variable ]
-!7 = metadata !{metadata !"0x101\00c\0033554434\000", metadata !1, metadata !2, metadata !8} ; [ DW_TAG_arg_variable ]
-!8 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, metadata !0, metadata !9} ; [ DW_TAG_pointer_type ]
-!9 = metadata !{metadata !"0x24\00char\000\008\008\000\000\006", null, metadata !0} ; [ DW_TAG_base_type ]
-!10 = metadata !{metadata !"0x100\00a\003\000", metadata !11, metadata !2, metadata !9} ; [ DW_TAG_auto_variable ]
-!11 = metadata !{metadata !"0xb\002\0025\000", metadata !20, metadata !1} ; [ DW_TAG_lexical_block ]
-!12 = metadata !{i32 2, i32 13, metadata !1, null}
-!13 = metadata !{i32 2, i32 22, metadata !1, null}
-!14 = metadata !{i32 3, i32 14, metadata !11, null}
-!15 = metadata !{i32 4, i32 3, metadata !11, null}
-!16 = metadata !{i32 5, i32 5, metadata !11, null}
-!17 = metadata !{i32 7, i32 1, metadata !11, null}
-!18 = metadata !{metadata !1}
-!19 = metadata !{metadata !6, metadata !7, metadata !10}
-!20 = metadata !{metadata !"a.c", metadata !"/private/tmp"}
-!21 = metadata !{i32 0}
-!22 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x11\0012\00Apple clang version 3.0 (tags/Apple/clang-211.10.1) (based on LLVM 3.0svn)\001\00\000\00\001", !20, !21, !21, !18, null, null} ; [ DW_TAG_compile_unit ]
+!1 = !{!"0x2e\00foo\00foo\00\002\000\001\000\006\00256\001\000", !20, !2, !3, null, i32 (i32, i32*)* @foo, null, null, !19} ; [ DW_TAG_subprogram ] [line 2] [def] [scope 0] [foo]
+!2 = !{!"0x29", !20} ; [ DW_TAG_file_type ]
+!3 = !{!"0x15\00\000\000\000\000\000\000", !20, !2, null, !4, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!4 = !{!5}
+!5 = !{!"0x24\00int\000\0032\0032\000\000\005", null, !0} ; [ DW_TAG_base_type ]
+!6 = !{!"0x101\00i\0016777218\000", !1, !2, !5} ; [ DW_TAG_arg_variable ]
+!7 = !{!"0x101\00c\0033554434\000", !1, !2, !8} ; [ DW_TAG_arg_variable ]
+!8 = !{!"0xf\00\000\0064\0064\000\000", null, !0, !9} ; [ DW_TAG_pointer_type ]
+!9 = !{!"0x24\00char\000\008\008\000\000\006", null, !0} ; [ DW_TAG_base_type ]
+!10 = !{!"0x100\00a\003\000", !11, !2, !9} ; [ DW_TAG_auto_variable ]
+!11 = !{!"0xb\002\0025\000", !20, !1} ; [ DW_TAG_lexical_block ]
+!12 = !MDLocation(line: 2, column: 13, scope: !1)
+!13 = !MDLocation(line: 2, column: 22, scope: !1)
+!14 = !MDLocation(line: 3, column: 14, scope: !11)
+!15 = !MDLocation(line: 4, column: 3, scope: !11)
+!16 = !MDLocation(line: 5, column: 5, scope: !11)
+!17 = !MDLocation(line: 7, column: 1, scope: !11)
+!18 = !{!1}
+!19 = !{!6, !7, !10}
+!20 = !{!"a.c", !"/private/tmp"}
+!21 = !{i32 0}
+!22 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/MergeConsecutiveStores.ll b/test/CodeGen/X86/MergeConsecutiveStores.ll
index f6d6852..f396e88 100644
--- a/test/CodeGen/X86/MergeConsecutiveStores.ll
+++ b/test/CodeGen/X86/MergeConsecutiveStores.ll
@@ -148,12 +148,12 @@ define void @merge_nonconst_store(i32 %count, i8 %zz, %struct.A* nocapture %p) n
}
-;CHECK-LABEL: merge_loads_i16:
-; load:
-;CHECK: movw
-; store:
-;CHECK: movw
-;CHECK: ret
+; CHECK-LABEL: merge_loads_i16:
+; load:
+; CHECK: movw
+; store:
+; CHECK: movw
+; CHECK: ret
define void @merge_loads_i16(i32 %count, %struct.A* noalias nocapture %q, %struct.A* noalias nocapture %p) nounwind uwtable noinline ssp {
%1 = icmp sgt i32 %count, 0
br i1 %1, label %.lr.ph, label %._crit_edge
@@ -181,13 +181,13 @@ define void @merge_loads_i16(i32 %count, %struct.A* noalias nocapture %q, %struc
ret void
}
-; The loads and the stores are interleved. Can't merge them.
-;CHECK-LABEL: no_merge_loads:
-;CHECK: movb
-;CHECK: movb
-;CHECK: movb
-;CHECK: movb
-;CHECK: ret
+; The loads and the stores are interleaved. Can't merge them.
+; CHECK-LABEL: no_merge_loads:
+; CHECK: movb
+; CHECK: movb
+; CHECK: movb
+; CHECK: movb
+; CHECK: ret
define void @no_merge_loads(i32 %count, %struct.A* noalias nocapture %q, %struct.A* noalias nocapture %p) nounwind uwtable noinline ssp {
%1 = icmp sgt i32 %count, 0
br i1 %1, label %.lr.ph, label %._crit_edge
@@ -216,12 +216,12 @@ a4: ; preds = %4, %.lr.ph
}
-;CHECK-LABEL: merge_loads_integer:
-; load:
-;CHECK: movq
-; store:
-;CHECK: movq
-;CHECK: ret
+; CHECK-LABEL: merge_loads_integer:
+; load:
+; CHECK: movq
+; store:
+; CHECK: movq
+; CHECK: ret
define void @merge_loads_integer(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp {
%1 = icmp sgt i32 %count, 0
br i1 %1, label %.lr.ph, label %._crit_edge
@@ -250,12 +250,12 @@ define void @merge_loads_integer(i32 %count, %struct.B* noalias nocapture %q, %s
}
-;CHECK-LABEL: merge_loads_vector:
-; load:
-;CHECK: movups
-; store:
-;CHECK: movups
-;CHECK: ret
+; CHECK-LABEL: merge_loads_vector:
+; load:
+; CHECK: movups
+; store:
+; CHECK: movups
+; CHECK: ret
define void @merge_loads_vector(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp {
%a1 = icmp sgt i32 %count, 0
br i1 %a1, label %.lr.ph, label %._crit_edge
@@ -291,18 +291,18 @@ block4: ; preds = %4, %.lr.ph
ret void
}
-;CHECK-LABEL: merge_loads_no_align:
-; load:
-;CHECK: movl
-;CHECK: movl
-;CHECK: movl
-;CHECK: movl
-; store:
-;CHECK: movl
-;CHECK: movl
-;CHECK: movl
-;CHECK: movl
-;CHECK: ret
+; CHECK-LABEL: merge_loads_no_align:
+; load:
+; CHECK: movl
+; CHECK: movl
+; CHECK: movl
+; CHECK: movl
+; store:
+; CHECK: movl
+; CHECK: movl
+; CHECK: movl
+; CHECK: movl
+; CHECK: ret
define void @merge_loads_no_align(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp {
%a1 = icmp sgt i32 %count, 0
br i1 %a1, label %.lr.ph, label %._crit_edge
@@ -434,3 +434,62 @@ define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) {
; <label>:14
ret void
}
+
+; PR21711 ( http://llvm.org/bugs/show_bug.cgi?id=21711 )
+define void @merge_vec_element_store(<8 x float> %v, float* %ptr) {
+ %vecext0 = extractelement <8 x float> %v, i32 0
+ %vecext1 = extractelement <8 x float> %v, i32 1
+ %vecext2 = extractelement <8 x float> %v, i32 2
+ %vecext3 = extractelement <8 x float> %v, i32 3
+ %vecext4 = extractelement <8 x float> %v, i32 4
+ %vecext5 = extractelement <8 x float> %v, i32 5
+ %vecext6 = extractelement <8 x float> %v, i32 6
+ %vecext7 = extractelement <8 x float> %v, i32 7
+ %arrayidx1 = getelementptr inbounds float* %ptr, i64 1
+ %arrayidx2 = getelementptr inbounds float* %ptr, i64 2
+ %arrayidx3 = getelementptr inbounds float* %ptr, i64 3
+ %arrayidx4 = getelementptr inbounds float* %ptr, i64 4
+ %arrayidx5 = getelementptr inbounds float* %ptr, i64 5
+ %arrayidx6 = getelementptr inbounds float* %ptr, i64 6
+ %arrayidx7 = getelementptr inbounds float* %ptr, i64 7
+ store float %vecext0, float* %ptr, align 4
+ store float %vecext1, float* %arrayidx1, align 4
+ store float %vecext2, float* %arrayidx2, align 4
+ store float %vecext3, float* %arrayidx3, align 4
+ store float %vecext4, float* %arrayidx4, align 4
+ store float %vecext5, float* %arrayidx5, align 4
+ store float %vecext6, float* %arrayidx6, align 4
+ store float %vecext7, float* %arrayidx7, align 4
+ ret void
+
+; CHECK-LABEL: merge_vec_element_store
+; CHECK: vmovups
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+}
+
+; This is a minimized test based on real code that was failing.
+; We could merge stores (and loads) like this...
+
+define void @merge_vec_element_and_scalar_load([6 x i64]* %array) {
+ %idx0 = getelementptr inbounds [6 x i64]* %array, i64 0, i64 0
+ %idx1 = getelementptr inbounds [6 x i64]* %array, i64 0, i64 1
+ %idx4 = getelementptr inbounds [6 x i64]* %array, i64 0, i64 4
+ %idx5 = getelementptr inbounds [6 x i64]* %array, i64 0, i64 5
+
+ %a0 = load i64* %idx0, align 8
+ store i64 %a0, i64* %idx4, align 8
+
+ %b = bitcast i64* %idx1 to <2 x i64>*
+ %v = load <2 x i64>* %b, align 8
+ %a1 = extractelement <2 x i64> %v, i32 0
+ store i64 %a1, i64* %idx5, align 8
+ ret void
+
+; CHECK-LABEL: merge_vec_element_and_scalar_load
+; CHECK: movq (%rdi), %rax
+; CHECK-NEXT: movq %rax, 32(%rdi)
+; CHECK-NEXT: movq 8(%rdi), %rax
+; CHECK-NEXT: movq %rax, 40(%rdi)
+; CHECK-NEXT: retq
+}
diff --git a/test/CodeGen/X86/StackColoring-dbg.ll b/test/CodeGen/X86/StackColoring-dbg.ll
index 6865873..498ad7e 100644
--- a/test/CodeGen/X86/StackColoring-dbg.ll
+++ b/test/CodeGen/X86/StackColoring-dbg.ll
@@ -17,7 +17,7 @@ entry:
for.body:
call void @llvm.lifetime.end(i64 -1, i8* %0) nounwind
call void @llvm.lifetime.start(i64 -1, i8* %x.i) nounwind
- call void @llvm.dbg.declare(metadata !{i8* %x.i}, metadata !22, metadata !{metadata !"0x102"}) nounwind
+ call void @llvm.dbg.declare(metadata i8* %x.i, metadata !22, metadata !{!"0x102"}) nounwind
br label %for.body
}
@@ -27,9 +27,9 @@ declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!23}
-!0 = metadata !{metadata !"0x11\001\00clang\001\00\000\00\000", metadata !1, metadata !2, metadata !2, null, null, null} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{metadata !"t.c", metadata !""}
-!16 = metadata !{metadata !"0x24\00char\000\008\008\000\000\006", null, null} ; [ DW_TAG_base_type ]
-!2 = metadata !{i32 0}
-!22 = metadata !{metadata !"0x100\00x\0016\000", null, metadata !2, metadata !16} ; [ DW_TAG_auto_variable ]
-!23 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x11\001\00clang\001\00\000\00\000", !1, !2, !2, null, null, null} ; [ DW_TAG_compile_unit ]
+!1 = !{!"t.c", !""}
+!16 = !{!"0x24\00char\000\008\008\000\000\006", null, null} ; [ DW_TAG_base_type ]
+!2 = !{i32 0}
+!22 = !{!"0x100\00x\0016\000", null, !2, !16} ; [ DW_TAG_auto_variable ]
+!23 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/SwizzleShuff.ll b/test/CodeGen/X86/SwizzleShuff.ll
index a435272..d387850 100644
--- a/test/CodeGen/X86/SwizzleShuff.ll
+++ b/test/CodeGen/X86/SwizzleShuff.ll
@@ -14,11 +14,12 @@ define void @pull_bitcast (<4 x i8>* %pA, <4 x i8>* %pB) {
}
; CHECK: multi_use_swizzle
-; CHECK: mov
-; CHECK-NEXT: shuf
-; CHECK-NEXT: shuf
-; CHECK-NEXT: shuf
-; CHECK-NEXT: xor
+; CHECK: pshufd
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: pblendw
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: pshufd
+; CHECK-NEXT: pxor
; CHECK-NEXT: ret
define <4 x i32> @multi_use_swizzle (<4 x i32>* %pA, <4 x i32>* %pB) {
%A = load <4 x i32>* %pA
@@ -45,7 +46,7 @@ define <4 x i8> @pull_bitcast2 (<4 x i8>* %pA, <4 x i8>* %pB, <4 x i8>* %pC) {
; CHECK: reverse_1
-; CHECK-NOT: shuf
+; CHECK-NOT: pshufd
; CHECK: ret
define <4 x i32> @reverse_1 (<4 x i32>* %pA, <4 x i32>* %pB) {
%A = load <4 x i32>* %pA
@@ -57,7 +58,7 @@ define <4 x i32> @reverse_1 (<4 x i32>* %pA, <4 x i32>* %pB) {
; CHECK: no_reverse_shuff
-; CHECK: shuf
+; CHECK: pshufd
; CHECK: ret
define <4 x i32> @no_reverse_shuff (<4 x i32>* %pA, <4 x i32>* %pB) {
%A = load <4 x i32>* %pA
diff --git a/test/CodeGen/X86/asm-label.ll b/test/CodeGen/X86/asm-label.ll
index 1fc6e2e..1da66e7 100644
--- a/test/CodeGen/X86/asm-label.ll
+++ b/test/CodeGen/X86/asm-label.ll
@@ -24,7 +24,7 @@ if.end: ; preds = %if.then
br label %cleanup
cleanup: ; preds = %if.end, %if.then9
- switch i32 undef, label %unreachable [
+ switch i32 undef, label %default [
i32 0, label %cleanup.cont
i32 1, label %if.end11
]
@@ -35,6 +35,6 @@ cleanup.cont: ; preds = %cleanup
if.end11: ; preds = %cleanup.cont, %cleanup, %land.lhs.true, %entry
ret void
-unreachable: ; preds = %cleanup
- unreachable
+default: ; preds = %cleanup
+ br label %if.end11
}
diff --git a/test/CodeGen/X86/atomic16.ll b/test/CodeGen/X86/atomic16.ll
index faaa4c4..f6892de 100644
--- a/test/CodeGen/X86/atomic16.ll
+++ b/test/CodeGen/X86/atomic16.ll
@@ -15,17 +15,17 @@ entry:
; X32: incw
%t2 = atomicrmw add i16* @sc16, i16 3 acquire
; X64: lock
-; X64: addw $3, {{.*}} # encoding: [0xf0,0x66
+; X64: addw $3, {{.*}} # encoding: [0x66,0xf0
; X32: lock
; X32: addw $3
%t3 = atomicrmw add i16* @sc16, i16 5 acquire
; X64: lock
-; X64: xaddw {{.*}} # encoding: [0xf0,0x66
+; X64: xaddw {{.*}} # encoding: [0x66,0xf0
; X32: lock
; X32: xaddw
%t4 = atomicrmw add i16* @sc16, i16 %t3 acquire
; X64: lock
-; X64: addw {{.*}} # encoding: [0xf0,0x66
+; X64: addw {{.*}} # encoding: [0x66,0xf0
; X32: lock
; X32: addw
ret void
@@ -43,17 +43,17 @@ define void @atomic_fetch_sub16() nounwind {
; X32: decw
%t2 = atomicrmw sub i16* @sc16, i16 3 acquire
; X64: lock
-; X64: subw $3, {{.*}} # encoding: [0xf0,0x66
+; X64: subw $3, {{.*}} # encoding: [0x66,0xf0
; X32: lock
; X32: subw $3
%t3 = atomicrmw sub i16* @sc16, i16 5 acquire
; X64: lock
-; X64: xaddw {{.*}} # encoding: [0xf0,0x66
+; X64: xaddw {{.*}} # encoding: [0x66,0xf0
; X32: lock
; X32: xaddw
%t4 = atomicrmw sub i16* @sc16, i16 %t3 acquire
; X64: lock
-; X64: subw {{.*}} # encoding: [0xf0,0x66
+; X64: subw {{.*}} # encoding: [0x66,0xf0
; X32: lock
; X32: subw
ret void
@@ -66,7 +66,7 @@ define void @atomic_fetch_and16() nounwind {
; X32-LABEL: atomic_fetch_and16
%t1 = atomicrmw and i16* @sc16, i16 3 acquire
; X64: lock
-; X64: andw $3, {{.*}} # encoding: [0xf0,0x66
+; X64: andw $3, {{.*}} # encoding: [0x66,0xf0
; X32: lock
; X32: andw $3
%t2 = atomicrmw and i16* @sc16, i16 5 acquire
@@ -78,7 +78,7 @@ define void @atomic_fetch_and16() nounwind {
; X32: cmpxchgw
%t3 = atomicrmw and i16* @sc16, i16 %t2 acquire
; X64: lock
-; X64: andw {{.*}} # encoding: [0xf0,0x66
+; X64: andw {{.*}} # encoding: [0x66,0xf0
; X32: lock
; X32: andw
ret void
@@ -91,7 +91,7 @@ define void @atomic_fetch_or16() nounwind {
; X32-LABEL: atomic_fetch_or16
%t1 = atomicrmw or i16* @sc16, i16 3 acquire
; X64: lock
-; X64: orw $3, {{.*}} # encoding: [0xf0,0x66
+; X64: orw $3, {{.*}} # encoding: [0x66,0xf0
; X32: lock
; X32: orw $3
%t2 = atomicrmw or i16* @sc16, i16 5 acquire
@@ -103,7 +103,7 @@ define void @atomic_fetch_or16() nounwind {
; X32: cmpxchgw
%t3 = atomicrmw or i16* @sc16, i16 %t2 acquire
; X64: lock
-; X64: orw {{.*}} # encoding: [0xf0,0x66
+; X64: orw {{.*}} # encoding: [0x66,0xf0
; X32: lock
; X32: orw
ret void
@@ -116,7 +116,7 @@ define void @atomic_fetch_xor16() nounwind {
; X32-LABEL: atomic_fetch_xor16
%t1 = atomicrmw xor i16* @sc16, i16 3 acquire
; X64: lock
-; X64: xorw $3, {{.*}} # encoding: [0xf0,0x66
+; X64: xorw $3, {{.*}} # encoding: [0x66,0xf0
; X32: lock
; X32: xorw $3
%t2 = atomicrmw xor i16* @sc16, i16 5 acquire
@@ -128,7 +128,7 @@ define void @atomic_fetch_xor16() nounwind {
; X32: cmpxchgw
%t3 = atomicrmw xor i16* @sc16, i16 %t2 acquire
; X64: lock
-; X64: xorw {{.*}} # encoding: [0xf0,0x66
+; X64: xorw {{.*}} # encoding: [0x66,0xf0
; X32: lock
; X32: xorw
ret void
diff --git a/test/CodeGen/X86/avx-cvt.ll b/test/CodeGen/X86/avx-cvt.ll
index 22fad7c..10ab971 100644
--- a/test/CodeGen/X86/avx-cvt.ll
+++ b/test/CodeGen/X86/avx-cvt.ll
@@ -87,3 +87,20 @@ entry:
ret void
}
+define double @nearbyint_f64(double %a) {
+; CHECK-LABEL: nearbyint_f64
+; CHECK: vroundsd $12
+ %res = call double @llvm.nearbyint.f64(double %a)
+ ret double %res
+}
+declare double @llvm.nearbyint.f64(double %p)
+
+define float @floor_f32(float %a) {
+; CHECK-LABEL: floor_f32
+; CHECK: vroundss $1
+ %res = call float @llvm.floor.f32(float %a)
+ ret float %res
+}
+declare float @llvm.floor.f32(float %p)
+
+
diff --git a/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
index d2b44cd..c65b021 100644
--- a/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
@@ -24,3 +24,17 @@ define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) {
declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i32) nounwind readnone
+define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) {
+ ; CHECK: vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+ %res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
+ ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64>, i32) nounwind readnone
+
+
+define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) {
+ ; CHECK: vpsrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+ %res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
+ ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32) nounwind readnone
diff --git a/test/CodeGen/X86/avx-intrinsics-x86.ll b/test/CodeGen/X86/avx-intrinsics-x86.ll
index ef3e83f..3ecf709 100644
--- a/test/CodeGen/X86/avx-intrinsics-x86.ll
+++ b/test/CodeGen/X86/avx-intrinsics-x86.ll
@@ -455,22 +455,6 @@ define <4 x i32> @test_x86_sse2_psll_d(<4 x i32> %a0, <4 x i32> %a1) {
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) {
- ; CHECK: vpslldq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
- %res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64>, i32) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_psll_dq_bs(<2 x i64> %a0) {
- ; CHECK: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8]
- %res = call <2 x i64> @llvm.x86.sse2.psll.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.psll.dq.bs(<2 x i64>, i32) nounwind readnone
define <2 x i64> @test_x86_sse2_psll_q(<2 x i64> %a0, <2 x i64> %a1) {
@@ -551,22 +535,6 @@ define <4 x i32> @test_x86_sse2_psrl_d(<4 x i32> %a0, <4 x i32> %a1) {
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) {
- ; CHECK: vpsrldq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
- %res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_psrl_dq_bs(<2 x i64> %a0) {
- ; CHECK: vpsrldq {{.*#+}} xmm0 = xmm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero
- %res = call <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64>, i32) nounwind readnone
define <2 x i64> @test_x86_sse2_psrl_q(<2 x i64> %a0, <2 x i64> %a1) {
diff --git a/test/CodeGen/X86/avx-splat.ll b/test/CodeGen/X86/avx-splat.ll
index 98c1645..c7e8b3b 100644
--- a/test/CodeGen/X86/avx-splat.ll
+++ b/test/CodeGen/X86/avx-splat.ll
@@ -18,7 +18,7 @@ entry:
}
; CHECK: vmovq
-; CHECK-NEXT: vunpcklpd %xmm
+; CHECK-NEXT: vmovddup %xmm
; CHECK-NEXT: vinsertf128 $1
define <4 x i64> @funcC(i64 %q) nounwind uwtable readnone ssp {
entry:
@@ -29,7 +29,7 @@ entry:
ret <4 x i64> %vecinit6.i
}
-; CHECK: vunpcklpd %xmm
+; CHECK: vmovddup %xmm
; CHECK-NEXT: vinsertf128 $1
define <4 x double> @funcD(double %q) nounwind uwtable readnone ssp {
entry:
@@ -42,7 +42,7 @@ entry:
; Test this turns into a broadcast:
; shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
-;
+;
; CHECK: vbroadcastss
define <8 x float> @funcE() nounwind {
allocas:
diff --git a/test/CodeGen/X86/avx-trunc.ll b/test/CodeGen/X86/avx-trunc.ll
index bf8d9a7..27be9fd 100644
--- a/test/CodeGen/X86/avx-trunc.ll
+++ b/test/CodeGen/X86/avx-trunc.ll
@@ -2,9 +2,9 @@
define <4 x i32> @trunc_64_32(<4 x i64> %A) nounwind uwtable readnone ssp{
; CHECK-LABEL: trunc_64_32
-; CHECK: shufps
-; CHECK-NOT: pshufd
-; CHECK-NOT: movlhps
+; CHECK: pshufd
+; CHECK: pshufd
+; CHECK: pblendw
%B = trunc <4 x i64> %A to <4 x i32>
ret <4 x i32>%B
}
diff --git a/test/CodeGen/X86/avx-vperm2x128.ll b/test/CodeGen/X86/avx-vperm2x128.ll
index a103405..43303ca 100644
--- a/test/CodeGen/X86/avx-vperm2x128.ll
+++ b/test/CodeGen/X86/avx-vperm2x128.ll
@@ -182,20 +182,11 @@ entry:
;;;; Cases we must not select vperm2f128
define <8 x float> @G(<8 x float> %a, <8 x float> %b) nounwind uwtable readnone ssp {
-; AVX1-LABEL: G:
-; AVX1: ## BB#0: ## %entry
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,2,3]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: G:
-; AVX2: ## BB#0: ## %entry
-; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,2,3,4,4,6,7]
-; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3]
-; AVX2-NEXT: retq
+; ALL-LABEL: G:
+; ALL: ## BB#0: ## %entry
+; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,3,4,4,6,7]
+; ALL-NEXT: retq
entry:
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 undef, i32 undef, i32 6, i32 7, i32 undef, i32 12, i32 undef, i32 15>
ret <8 x float> %shuffle
diff --git a/test/CodeGen/X86/avx.ll b/test/CodeGen/X86/avx.ll
index cba6d98..6069c14 100644
--- a/test/CodeGen/X86/avx.ll
+++ b/test/CodeGen/X86/avx.ll
@@ -60,7 +60,7 @@ define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x floa
; X32: movl 8(%esp), %ecx
; CHECK-NOT: mov
;; Try to match a bit more of the instr, since we need the load's offset.
-; CHECK: vinsertps $-64, 12(%{{...}},%{{...}}), %
+; CHECK: vinsertps $192, 12(%{{...}},%{{...}}), %
; CHECK-NEXT: ret
%1 = getelementptr inbounds <4 x float>* %pb, i64 %index
%2 = load <4 x float>* %1, align 16
diff --git a/test/CodeGen/X86/avx1-stack-reload-folding.ll b/test/CodeGen/X86/avx1-stack-reload-folding.ll
deleted file mode 100644
index 2e669b0..0000000
--- a/test/CodeGen/X86/avx1-stack-reload-folding.ll
+++ /dev/null
@@ -1,68 +0,0 @@
-; RUN: llc -O3 -disable-peephole -mcpu=corei7-avx -mattr=+avx < %s | FileCheck %s
-
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-unknown"
-
-; Stack reload folding tests - we use the 'big vectors' pattern to guarantee spilling to stack.
-;
-; Many of these tests are primarily to check memory folding with specific instructions. Using a basic
-; load/cvt/store pattern to test for this would mean that it wouldn't be the memory folding code thats
-; being tested - the load-execute version of the instruction from the tables would be matched instead.
-
-define void @stack_fold_vmulpd(<64 x double>* %a, <64 x double>* %b, <64 x double>* %c) {
- ;CHECK-LABEL: stack_fold_vmulpd
- ;CHECK: vmulpd {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
-
- %1 = load <64 x double>* %a
- %2 = load <64 x double>* %b
- %3 = fadd <64 x double> %1, %2
- %4 = fsub <64 x double> %1, %2
- %5 = fmul <64 x double> %3, %4
- store <64 x double> %5, <64 x double>* %c
- ret void
-}
-
-define void @stack_fold_cvtdq2ps(<128 x i32>* %a, <128 x i32>* %b, <128 x float>* %c) {
- ;CHECK-LABEL: stack_fold_cvtdq2ps
- ;CHECK: vcvtdq2ps {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
-
- %1 = load <128 x i32>* %a
- %2 = load <128 x i32>* %b
- %3 = and <128 x i32> %1, %2
- %4 = xor <128 x i32> %1, %2
- %5 = sitofp <128 x i32> %3 to <128 x float>
- %6 = sitofp <128 x i32> %4 to <128 x float>
- %7 = fadd <128 x float> %5, %6
- store <128 x float> %7, <128 x float>* %c
- ret void
-}
-
-define void @stack_fold_cvttpd2dq(<64 x double>* %a, <64 x double>* %b, <64 x i32>* %c) #0 {
- ;CHECK-LABEL: stack_fold_cvttpd2dq
- ;CHECK: vcvttpd2dqy {{[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
-
- %1 = load <64 x double>* %a
- %2 = load <64 x double>* %b
- %3 = fadd <64 x double> %1, %2
- %4 = fsub <64 x double> %1, %2
- %5 = fptosi <64 x double> %3 to <64 x i32>
- %6 = fptosi <64 x double> %4 to <64 x i32>
- %7 = or <64 x i32> %5, %6
- store <64 x i32> %7, <64 x i32>* %c
- ret void
-}
-
-define void @stack_fold_cvttps2dq(<128 x float>* %a, <128 x float>* %b, <128 x i32>* %c) #0 {
- ;CHECK-LABEL: stack_fold_cvttps2dq
- ;CHECK: vcvttps2dq {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
-
- %1 = load <128 x float>* %a
- %2 = load <128 x float>* %b
- %3 = fadd <128 x float> %1, %2
- %4 = fsub <128 x float> %1, %2
- %5 = fptosi <128 x float> %3 to <128 x i32>
- %6 = fptosi <128 x float> %4 to <128 x i32>
- %7 = or <128 x i32> %5, %6
- store <128 x i32> %7, <128 x i32>* %c
- ret void
-}
diff --git a/test/CodeGen/X86/avx2-conversions.ll b/test/CodeGen/X86/avx2-conversions.ll
index f49718e..5f17f1b 100644
--- a/test/CodeGen/X86/avx2-conversions.ll
+++ b/test/CodeGen/X86/avx2-conversions.ll
@@ -84,7 +84,7 @@ define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
; CHECK-LABEL: trunc_16i16_16i8:
; CHECK: vpshufb
; CHECK: vpshufb
-; CHECK: vpor
+; CHECK: vpunpcklqdq
; CHECK: ret
define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
%t = trunc <16 x i16> %z to <16 x i8>
diff --git a/test/CodeGen/X86/avx2-gather.ll b/test/CodeGen/X86/avx2-gather.ll
index a9ac025..91fa20b 100644
--- a/test/CodeGen/X86/avx2-gather.ll
+++ b/test/CodeGen/X86/avx2-gather.ll
@@ -32,3 +32,30 @@ define <2 x double> @test_x86_avx2_gather_d_pd(i8* %a1,
; CHECK: vgatherdpd
; CHECK: vmovapd
; CHECK: ret
+
+declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*,
+ <8 x i32>, <8 x float>, i8) nounwind readonly
+
+define <8 x float> @test_x86_avx2_gather_d_ps_256(i8* %a1,
+ <8 x i32> %idx, <8 x float> %mask) {
+ %res = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef,
+ i8* %a1, <8 x i32> %idx, <8 x float> %mask, i8 4) ;
+ ret <8 x float> %res
+}
+; CHECK-LABEL: @test_x86_avx2_gather_d_ps_256
+; CHECK: vgatherdps %ymm
+; CHECK: ret
+
+declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*,
+ <4 x i32>, <4 x double>, i8) nounwind readonly
+
+define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1,
+ <4 x i32> %idx, <4 x double> %mask) {
+ %res = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef,
+ i8* %a1, <4 x i32> %idx, <4 x double> %mask, i8 8) ;
+ ret <4 x double> %res
+}
+
+; CHECK-LABEL: test_x86_avx2_gather_d_pd_256
+; CHECK: vgatherdpd %ymm
+; CHECK: ret
diff --git a/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
index ac2c73b..acc3098 100644
--- a/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
@@ -31,3 +31,34 @@ define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) {
}
declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i32) nounwind readnone
+
+define <4 x i64> @test_x86_avx2_psll_dq_bs(<4 x i64> %a0) {
+ ; CHECK: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8],zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24]
+ %res = call <4 x i64> @llvm.x86.avx2.psll.dq.bs(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
+ ret <4 x i64> %res
+}
+declare <4 x i64> @llvm.x86.avx2.psll.dq.bs(<4 x i64>, i32) nounwind readnone
+
+
+define <4 x i64> @test_x86_avx2_psrl_dq_bs(<4 x i64> %a0) {
+ ; CHECK: vpsrldq {{.*#+}} ymm0 = ymm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,ymm0[23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero
+ %res = call <4 x i64> @llvm.x86.avx2.psrl.dq.bs(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
+ ret <4 x i64> %res
+}
+declare <4 x i64> @llvm.x86.avx2.psrl.dq.bs(<4 x i64>, i32) nounwind readnone
+
+
+define <4 x i64> @test_x86_avx2_psll_dq(<4 x i64> %a0) {
+ ; CHECK: vpslldq {{.*#+}} ymm0 = zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
+ %res = call <4 x i64> @llvm.x86.avx2.psll.dq(<4 x i64> %a0, i32 8) ; <<4 x i64>> [#uses=1]
+ ret <4 x i64> %res
+}
+declare <4 x i64> @llvm.x86.avx2.psll.dq(<4 x i64>, i32) nounwind readnone
+
+
+define <4 x i64> @test_x86_avx2_psrl_dq(<4 x i64> %a0) {
+ ; CHECK: vpsrldq {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,ymm0[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],zero
+ %res = call <4 x i64> @llvm.x86.avx2.psrl.dq(<4 x i64> %a0, i32 8) ; <<4 x i64>> [#uses=1]
+ ret <4 x i64> %res
+}
+declare <4 x i64> @llvm.x86.avx2.psrl.dq(<4 x i64>, i32) nounwind readnone
diff --git a/test/CodeGen/X86/avx2-intrinsics-x86.ll b/test/CodeGen/X86/avx2-intrinsics-x86.ll
index 84b22b7..da0f17a 100644
--- a/test/CodeGen/X86/avx2-intrinsics-x86.ll
+++ b/test/CodeGen/X86/avx2-intrinsics-x86.ll
@@ -158,22 +158,6 @@ define <8 x i32> @test_x86_avx2_psll_d(<8 x i32> %a0, <4 x i32> %a1) {
ret <8 x i32> %res
}
declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone
-
-
-define <4 x i64> @test_x86_avx2_psll_dq(<4 x i64> %a0) {
- ; CHECK: vpslldq {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
- %res = call <4 x i64> @llvm.x86.avx2.psll.dq(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
- ret <4 x i64> %res
-}
-declare <4 x i64> @llvm.x86.avx2.psll.dq(<4 x i64>, i32) nounwind readnone
-
-
-define <4 x i64> @test_x86_avx2_psll_dq_bs(<4 x i64> %a0) {
- ; CHECK: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8],zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24]
- %res = call <4 x i64> @llvm.x86.avx2.psll.dq.bs(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
- ret <4 x i64> %res
-}
-declare <4 x i64> @llvm.x86.avx2.psll.dq.bs(<4 x i64>, i32) nounwind readnone
define <4 x i64> @test_x86_avx2_psll_q(<4 x i64> %a0, <2 x i64> %a1) {
@@ -254,22 +238,6 @@ define <8 x i32> @test_x86_avx2_psrl_d(<8 x i32> %a0, <4 x i32> %a1) {
ret <8 x i32> %res
}
declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone
-
-
-define <4 x i64> @test_x86_avx2_psrl_dq(<4 x i64> %a0) {
- ; CHECK: vpsrldq {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
- %res = call <4 x i64> @llvm.x86.avx2.psrl.dq(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
- ret <4 x i64> %res
-}
-declare <4 x i64> @llvm.x86.avx2.psrl.dq(<4 x i64>, i32) nounwind readnone
-
-
-define <4 x i64> @test_x86_avx2_psrl_dq_bs(<4 x i64> %a0) {
- ; CHECK: vpsrldq {{.*#+}} ymm0 = ymm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,ymm0[23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero
- %res = call <4 x i64> @llvm.x86.avx2.psrl.dq.bs(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
- ret <4 x i64> %res
-}
-declare <4 x i64> @llvm.x86.avx2.psrl.dq.bs(<4 x i64>, i32) nounwind readnone
define <4 x i64> @test_x86_avx2_psrl_q(<4 x i64> %a0, <2 x i64> %a1) {
diff --git a/test/CodeGen/X86/avx2-nontemporal.ll b/test/CodeGen/X86/avx2-nontemporal.ll
index 0768aae..4d28a97 100644
--- a/test/CodeGen/X86/avx2-nontemporal.ll
+++ b/test/CodeGen/X86/avx2-nontemporal.ll
@@ -19,4 +19,4 @@ define void @f(<8 x float> %A, i8* %B, <4 x double> %C, i32 %D, <4 x i64> %E) {
ret void
}
-!0 = metadata !{i32 1}
+!0 = !{i32 1}
diff --git a/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll b/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll
new file mode 100644
index 0000000..7301b7c
--- /dev/null
+++ b/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll
@@ -0,0 +1,110 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 | FileCheck %s
+
+define <16 x i16> @test_lvm_x86_avx2_pmovsxbw(<16 x i8>* %a) {
+; CHECK-LABEL: test_lvm_x86_avx2_pmovsxbw
+; CHECK: vpmovsxbw (%rdi), %ymm0
+ %1 = load <16 x i8>* %a, align 1
+ %2 = call <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8> %1)
+ ret <16 x i16> %2
+}
+
+define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) {
+; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbd
+; CHECK: vpmovsxbd (%rdi), %ymm0
+ %1 = load <16 x i8>* %a, align 1
+ %2 = call <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8> %1)
+ ret <8 x i32> %2
+}
+
+define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) {
+; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbq
+; CHECK: vpmovsxbq (%rdi), %ymm0
+ %1 = load <16 x i8>* %a, align 1
+ %2 = call <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8> %1)
+ ret <4 x i64> %2
+}
+
+define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(<8 x i16>* %a) {
+; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwd
+; CHECK: vpmovsxwd (%rdi), %ymm0
+ %1 = load <8 x i16>* %a, align 1
+ %2 = call <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16> %1)
+ ret <8 x i32> %2
+}
+
+define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) {
+; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwq
+; CHECK: vpmovsxwq (%rdi), %ymm0
+ %1 = load <8 x i16>* %a, align 1
+ %2 = call <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16> %1)
+ ret <4 x i64> %2
+}
+
+define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(<4 x i32>* %a) {
+; CHECK-LABEL: test_llvm_x86_avx2_pmovsxdq
+; CHECK: vpmovsxdq (%rdi), %ymm0
+ %1 = load <4 x i32>* %a, align 1
+ %2 = call <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32> %1)
+ ret <4 x i64> %2
+}
+
+define <16 x i16> @test_lvm_x86_avx2_pmovzxbw(<16 x i8>* %a) {
+; CHECK-LABEL: test_lvm_x86_avx2_pmovzxbw
+; CHECK: vpmovzxbw (%rdi), %ymm0
+ %1 = load <16 x i8>* %a, align 1
+ %2 = call <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8> %1)
+ ret <16 x i16> %2
+}
+
+define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) {
+; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbd
+; CHECK: vpmovzxbd (%rdi), %ymm0
+ %1 = load <16 x i8>* %a, align 1
+ %2 = call <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8> %1)
+ ret <8 x i32> %2
+}
+
+define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) {
+; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbq
+; CHECK: vpmovzxbq (%rdi), %ymm0
+ %1 = load <16 x i8>* %a, align 1
+ %2 = call <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8> %1)
+ ret <4 x i64> %2
+}
+
+define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(<8 x i16>* %a) {
+; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwd
+; CHECK: vpmovzxwd (%rdi), %ymm0
+ %1 = load <8 x i16>* %a, align 1
+ %2 = call <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16> %1)
+ ret <8 x i32> %2
+}
+
+define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) {
+; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwq
+; CHECK: vpmovzxwq (%rdi), %ymm0
+ %1 = load <8 x i16>* %a, align 1
+ %2 = call <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16> %1)
+ ret <4 x i64> %2
+}
+
+define <4 x i64> @test_llvm_x86_avx2_pmovzxdq(<4 x i32>* %a) {
+; CHECK-LABEL: test_llvm_x86_avx2_pmovzxdq
+; CHECK: vpmovzxdq (%rdi), %ymm0
+ %1 = load <4 x i32>* %a, align 1
+ %2 = call <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32> %1)
+ ret <4 x i64> %2
+}
+
+declare <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32>)
+declare <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16>)
+declare <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16>)
+declare <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8>)
+declare <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8>)
+declare <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8>)
+declare <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32>)
+declare <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16>)
+declare <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16>)
+declare <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8>)
+declare <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8>)
+declare <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8>)
diff --git a/test/CodeGen/X86/avx2-vbroadcast.ll b/test/CodeGen/X86/avx2-vbroadcast.ll
index 924c06e..83100a8 100644
--- a/test/CodeGen/X86/avx2-vbroadcast.ll
+++ b/test/CodeGen/X86/avx2-vbroadcast.ll
@@ -317,7 +317,7 @@ define <4 x double> @_inreg4xdouble(<4 x double> %a) {
}
;CHECK-LABEL: _inreg2xdouble:
-;CHECK: vunpcklpd
+;CHECK: vmovddup
;CHECK: ret
define <2 x double> @_inreg2xdouble(<2 x double> %a) {
%b = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> zeroinitializer
diff --git a/test/CodeGen/X86/avx512-arith.ll b/test/CodeGen/X86/avx512-arith.ll
index c43da9c..94b0821 100644
--- a/test/CodeGen/X86/avx512-arith.ll
+++ b/test/CodeGen/X86/avx512-arith.ll
@@ -462,3 +462,193 @@ entry:
%d = and <8 x i64> %p1, %c
ret <8 x i64>%d
}
+
+; CHECK-LABEL: test_mask_vaddps
+; CHECK: vaddps {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <16 x float> @test_mask_vaddps(<16 x float> %dst, <16 x float> %i,
+ <16 x float> %j, <16 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %x = fadd <16 x float> %i, %j
+ %r = select <16 x i1> %mask, <16 x float> %x, <16 x float> %dst
+ ret <16 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vmulps
+; CHECK: vmulps {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <16 x float> @test_mask_vmulps(<16 x float> %dst, <16 x float> %i,
+ <16 x float> %j, <16 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %x = fmul <16 x float> %i, %j
+ %r = select <16 x i1> %mask, <16 x float> %x, <16 x float> %dst
+ ret <16 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vminps
+; CHECK: vminps {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i,
+ <16 x float> %j, <16 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %cmp_res = fcmp olt <16 x float> %i, %j
+ %min = select <16 x i1> %cmp_res, <16 x float> %i, <16 x float> %j
+ %r = select <16 x i1> %mask, <16 x float> %min, <16 x float> %dst
+ ret <16 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vminpd
+; CHECK: vminpd {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
+ <8 x double> %j, <8 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %cmp_res = fcmp olt <8 x double> %i, %j
+ %min = select <8 x i1> %cmp_res, <8 x double> %i, <8 x double> %j
+ %r = select <8 x i1> %mask, <8 x double> %min, <8 x double> %dst
+ ret <8 x double> %r
+}
+
+; CHECK-LABEL: test_mask_vmaxps
+; CHECK: vmaxps {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i,
+ <16 x float> %j, <16 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %cmp_res = fcmp ogt <16 x float> %i, %j
+ %max = select <16 x i1> %cmp_res, <16 x float> %i, <16 x float> %j
+ %r = select <16 x i1> %mask, <16 x float> %max, <16 x float> %dst
+ ret <16 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vmaxpd
+; CHECK: vmaxpd {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
+ <8 x double> %j, <8 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %cmp_res = fcmp ogt <8 x double> %i, %j
+ %max = select <8 x i1> %cmp_res, <8 x double> %i, <8 x double> %j
+ %r = select <8 x i1> %mask, <8 x double> %max, <8 x double> %dst
+ ret <8 x double> %r
+}
+
+; CHECK-LABEL: test_mask_vsubps
+; CHECK: vsubps {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <16 x float> @test_mask_vsubps(<16 x float> %dst, <16 x float> %i,
+ <16 x float> %j, <16 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %x = fsub <16 x float> %i, %j
+ %r = select <16 x i1> %mask, <16 x float> %x, <16 x float> %dst
+ ret <16 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vdivps
+; CHECK: vdivps {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <16 x float> @test_mask_vdivps(<16 x float> %dst, <16 x float> %i,
+ <16 x float> %j, <16 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %x = fdiv <16 x float> %i, %j
+ %r = select <16 x i1> %mask, <16 x float> %x, <16 x float> %dst
+ ret <16 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vaddpd
+; CHECK: vaddpd {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <8 x double> @test_mask_vaddpd(<8 x double> %dst, <8 x double> %i,
+ <8 x double> %j, <8 x i64> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <8 x i64> %mask1, zeroinitializer
+ %x = fadd <8 x double> %i, %j
+ %r = select <8 x i1> %mask, <8 x double> %x, <8 x double> %dst
+ ret <8 x double> %r
+}
+
+; CHECK-LABEL: test_maskz_vaddpd
+; CHECK: vaddpd {{%zmm[0-9]{1,2}, %zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]} {z}}}
+; CHECK: ret
+define <8 x double> @test_maskz_vaddpd(<8 x double> %i, <8 x double> %j,
+ <8 x i64> %mask1) nounwind readnone {
+ %mask = icmp ne <8 x i64> %mask1, zeroinitializer
+ %x = fadd <8 x double> %i, %j
+ %r = select <8 x i1> %mask, <8 x double> %x, <8 x double> zeroinitializer
+ ret <8 x double> %r
+}
+
+; CHECK-LABEL: test_mask_fold_vaddpd
+; CHECK: vaddpd (%rdi), {{.*%zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]}.*}}
+; CHECK: ret
+define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i,
+ <8 x double>* %j, <8 x i64> %mask1)
+ nounwind {
+ %mask = icmp ne <8 x i64> %mask1, zeroinitializer
+ %tmp = load <8 x double>* %j, align 8
+ %x = fadd <8 x double> %i, %tmp
+ %r = select <8 x i1> %mask, <8 x double> %x, <8 x double> %dst
+ ret <8 x double> %r
+}
+
+; CHECK-LABEL: test_maskz_fold_vaddpd
+; CHECK: vaddpd (%rdi), {{.*%zmm[0-9]{1,2}, %zmm[0-9]{1,2} {%k[1-7]} {z}.*}}
+; CHECK: ret
+define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j,
+ <8 x i64> %mask1) nounwind {
+ %mask = icmp ne <8 x i64> %mask1, zeroinitializer
+ %tmp = load <8 x double>* %j, align 8
+ %x = fadd <8 x double> %i, %tmp
+ %r = select <8 x i1> %mask, <8 x double> %x, <8 x double> zeroinitializer
+ ret <8 x double> %r
+}
+
+; CHECK-LABEL: test_broadcast_vaddpd
+; CHECK: vaddpd (%rdi){1to8}, %zmm{{.*}}
+; CHECK: ret
+define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind {
+ %tmp = load double* %j
+ %b = insertelement <8 x double> undef, double %tmp, i32 0
+ %c = shufflevector <8 x double> %b, <8 x double> undef,
+ <8 x i32> zeroinitializer
+ %x = fadd <8 x double> %c, %i
+ ret <8 x double> %x
+}
+
+; CHECK-LABEL: test_mask_broadcast_vaddpd
+; CHECK: vaddpd (%rdi){1to8}, %zmm{{.*{%k[1-7]}.*}}
+; CHECK: ret
+define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double> %i,
+ double* %j, <8 x i64> %mask1) nounwind {
+ %mask = icmp ne <8 x i64> %mask1, zeroinitializer
+ %tmp = load double* %j
+ %b = insertelement <8 x double> undef, double %tmp, i32 0
+ %c = shufflevector <8 x double> %b, <8 x double> undef,
+ <8 x i32> zeroinitializer
+ %x = fadd <8 x double> %c, %i
+ %r = select <8 x i1> %mask, <8 x double> %x, <8 x double> %i
+ ret <8 x double> %r
+}
+
+; CHECK-LABEL: test_maskz_broadcast_vaddpd
+; CHECK: vaddpd (%rdi){1to8}, %zmm{{.*{%k[1-7]} {z}.*}}
+; CHECK: ret
+define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
+ <8 x i64> %mask1) nounwind {
+ %mask = icmp ne <8 x i64> %mask1, zeroinitializer
+ %tmp = load double* %j
+ %b = insertelement <8 x double> undef, double %tmp, i32 0
+ %c = shufflevector <8 x double> %b, <8 x double> undef,
+ <8 x i32> zeroinitializer
+ %x = fadd <8 x double> %c, %i
+ %r = select <8 x i1> %mask, <8 x double> %x, <8 x double> zeroinitializer
+ ret <8 x double> %r
+}
diff --git a/test/CodeGen/X86/avx512-fma-intrinsics.ll b/test/CodeGen/X86/avx512-fma-intrinsics.ll
index 366d324..9b82c88 100644
--- a/test/CodeGen/X86/avx512-fma-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-fma-intrinsics.ll
@@ -8,6 +8,13 @@ define <16 x float> @test_x86_vfmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <1
}
declare <16 x float> @llvm.x86.fma.mask.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) nounwind readnone
+define <16 x float> @test_mask_vfmadd_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_vfmadd_ps
+ ; CHECK: vfmadd213ps %zmm
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4) nounwind
+ ret <16 x float> %res
+}
+
define <8 x double> @test_x86_vfmadd_pd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_x86_vfmadd_pd_z
; CHECK: vfmadd213pd %zmm
@@ -32,6 +39,13 @@ define <16 x float> @test_x86_vfmsubps_z(<16 x float> %a0, <16 x float> %a1, <16
}
declare <16 x float> @llvm.x86.fma.mask.vfmsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) nounwind readnone
+define <16 x float> @test_mask_vfmsub_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsub_ps
+ ; CHECK: vfmsub213ps %zmm
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4) nounwind
+ ret <16 x float> %res
+}
+
define <8 x double> @test_x86_vfmsubpd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_x86_vfmsubpd_z
; CHECK: vfmsub213pd %zmm
@@ -40,6 +54,13 @@ define <8 x double> @test_x86_vfmsubpd_z(<8 x double> %a0, <8 x double> %a1, <8
}
declare <8 x double> @llvm.x86.fma.mask.vfmsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) nounwind readnone
+define <8 x double> @test_mask_vfmsub_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsub_pd
+ ; CHECK: vfmsub213pd %zmm
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind
+ ret <8 x double> %res
+}
+
define <16 x float> @test_x86_vfnmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_x86_vfnmadd_ps_z
; CHECK: vfnmadd213ps %zmm
@@ -48,6 +69,13 @@ define <16 x float> @test_x86_vfnmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <
}
declare <16 x float> @llvm.x86.fma.mask.vfnmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) nounwind readnone
+define <16 x float> @test_mask_vfnmadd_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_vfnmadd_ps
+ ; CHECK: vfnmadd213ps %zmm
+ %res = call <16 x float> @llvm.x86.fma.mask.vfnmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4) nounwind
+ ret <16 x float> %res
+}
+
define <8 x double> @test_x86_vfnmadd_pd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_x86_vfnmadd_pd_z
; CHECK: vfnmadd213pd %zmm
@@ -56,6 +84,13 @@ define <8 x double> @test_x86_vfnmadd_pd_z(<8 x double> %a0, <8 x double> %a1, <
}
declare <8 x double> @llvm.x86.fma.mask.vfnmadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) nounwind readnone
+define <8 x double> @test_mask_vfnmadd_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfnmadd_pd
+ ; CHECK: vfnmadd213pd %zmm
+ %res = call <8 x double> @llvm.x86.fma.mask.vfnmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind
+ ret <8 x double> %res
+}
+
define <16 x float> @test_x86_vfnmsubps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_x86_vfnmsubps_z
; CHECK: vfnmsub213ps %zmm
@@ -64,6 +99,13 @@ define <16 x float> @test_x86_vfnmsubps_z(<16 x float> %a0, <16 x float> %a1, <1
}
declare <16 x float> @llvm.x86.fma.mask.vfnmsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) nounwind readnone
+define <16 x float> @test_mask_vfnmsub_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_vfnmsub_ps
+ ; CHECK: vfnmsub213ps %zmm
+ %res = call <16 x float> @llvm.x86.fma.mask.vfnmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4) nounwind
+ ret <16 x float> %res
+}
+
define <8 x double> @test_x86_vfnmsubpd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_x86_vfnmsubpd_z
; CHECK: vfnmsub213pd %zmm
@@ -72,6 +114,13 @@ define <8 x double> @test_x86_vfnmsubpd_z(<8 x double> %a0, <8 x double> %a1, <8
}
declare <8 x double> @llvm.x86.fma.mask.vfnmsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) nounwind readnone
+define <8 x double> @test_mask_vfnmsub_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfnmsub_pd
+ ; CHECK: vfnmsub213pd %zmm
+ %res = call <8 x double> @llvm.x86.fma.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind
+ ret <8 x double> %res
+}
+
define <16 x float> @test_x86_vfmaddsubps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_x86_vfmaddsubps_z
; CHECK: vfmaddsub213ps %zmm
@@ -96,6 +145,13 @@ define <8 x double> @test_x86_vfmaddsubpd_z(<8 x double> %a0, <8 x double> %a1,
}
declare <8 x double> @llvm.x86.fma.mask.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) nounwind readnone
+define <8 x double> @test_mask_vfmaddsub_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmaddsub_pd
+ ; CHECK: vfmaddsub213pd %zmm
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmaddsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind
+ ret <8 x double> %res
+}
+
define <16 x float> @test_x86_vfmsubaddps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_x86_vfmsubaddps_z
; CHECK: vfmsubadd213ps %zmm
@@ -104,6 +160,13 @@ define <16 x float> @test_x86_vfmsubaddps_z(<16 x float> %a0, <16 x float> %a1,
}
declare <16 x float> @llvm.x86.fma.mask.vfmsubadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) nounwind readnone
+define <16 x float> @test_mask_vfmsubadd_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsubadd_ps
+ ; CHECK: vfmsubadd213ps %zmm
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmsubadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4) nounwind
+ ret <16 x float> %res
+}
+
define <8 x double> @test_x86_vfmsubaddpd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_x86_vfmsubaddpd_z
; CHECK: vfmsubadd213pd %zmm
@@ -111,3 +174,291 @@ define <8 x double> @test_x86_vfmsubaddpd_z(<8 x double> %a0, <8 x double> %a1,
ret <8 x double> %res
}
declare <8 x double> @llvm.x86.fma.mask.vfmsubadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) nounwind readnone
+
+define <8 x double> @test_mask_vfmsubadd_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsubadd_pd
+ ; CHECK: vfmsubadd213pd %zmm
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmsubadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind
+ ret <8 x double> %res
+}
+
+define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rne(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rne
+ ; CHECK: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 0) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtn(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rtn
+ ; CHECK: vfmadd213ps {rd-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x39,0xa8,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 1) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtp(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rtp
+ ; CHECK: vfmadd213ps {ru-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x59,0xa8,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 2) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtz(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rtz
+ ; CHECK: vfmadd213ps {rz-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x79,0xa8,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 3) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmadd512_ps_rrb_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_current
+ ; CHECK: vfmadd213ps %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x49,0xa8,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rne(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rne
+ ; CHECK: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 0) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtn(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rtn
+ ; CHECK: vfmadd213ps {rd-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x38,0xa8,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 1) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtp(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rtp
+ ; CHECK: vfmadd213ps {ru-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x58,0xa8,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 2) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtz(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rtz
+ ; CHECK: vfmadd213ps {rz-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x78,0xa8,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 3) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_current
+ ; CHECK: vfmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa8,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmsub512_ps_rrb_rne(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmsub512_ps_rrb_rne
+ ; CHECK: vfmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xaa,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 0) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmsub512_ps_rrb_rtn(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmsub512_ps_rrb_rtn
+ ; CHECK: vfmsub213ps {rd-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x39,0xaa,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 1) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmsub512_ps_rrb_rtp(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmsub512_ps_rrb_rtp
+ ; CHECK: vfmsub213ps {ru-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x59,0xaa,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 2) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmsub512_ps_rrb_rtz(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmsub512_ps_rrb_rtz
+ ; CHECK: vfmsub213ps {rz-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x79,0xaa,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 3) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmsub512_ps_rrb_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmsub512_ps_rrb_current
+ ; CHECK: vfmsub213ps %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x49,0xaa,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmsub512_ps_rrbz_rne(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmsub512_ps_rrbz_rne
+ ; CHECK: vfmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xaa,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 0) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmsub512_ps_rrbz_rtn(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmsub512_ps_rrbz_rtn
+ ; CHECK: vfmsub213ps {rd-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x38,0xaa,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 1) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmsub512_ps_rrbz_rtp(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmsub512_ps_rrbz_rtp
+ ; CHECK: vfmsub213ps {ru-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x58,0xaa,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 2) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmsub512_ps_rrbz_rtz(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmsub512_ps_rrbz_rtz
+ ; CHECK: vfmsub213ps {rz-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x78,0xaa,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 3) nounwind
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mask_round_vfmsub512_ps_rrbz_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmsub512_ps_rrbz_current
+ ; CHECK: vfmsub213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xaa,0xc2]
+ %res = call <16 x float> @llvm.x86.fma.mask.vfmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind
+ ret <16 x float> %res
+}
+
+define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rne
+ ; CHECK: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x19,0xa8,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 0) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rtn
+ ; CHECK: vfmadd213pd {rd-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x39,0xa8,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 1) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rtp
+ ; CHECK: vfmadd213pd {ru-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x59,0xa8,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 2) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rtz
+ ; CHECK: vfmadd213pd {rz-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x79,0xa8,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 3) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfmadd512_pd_rrb_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_current
+ ; CHECK: vfmadd213pd %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x49,0xa8,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rne
+ ; CHECK: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xa8,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 0) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rtn
+ ; CHECK: vfmadd213pd {rd-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x38,0xa8,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 1) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rtp
+ ; CHECK: vfmadd213pd {ru-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x58,0xa8,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 2) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rtz
+ ; CHECK: vfmadd213pd {rz-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x78,0xa8,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 3) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_current
+ ; CHECK: vfmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa8,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
+ ret <8 x double> %res
+}
+
+
+define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rne
+ ; CHECK: vfnmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x19,0xae,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 0) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rtn
+ ; CHECK: vfnmsub213pd {rd-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x39,0xae,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 1) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rtp
+ ; CHECK: vfnmsub213pd {ru-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x59,0xae,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 2) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rtz
+ ; CHECK: vfnmsub213pd {rz-sae}, %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x79,0xae,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 3) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_current
+ ; CHECK: vfnmsub213pd %zmm2, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x49,0xae,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rne
+ ; CHECK: vfnmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xae,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 0) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rtn
+ ; CHECK: vfnmsub213pd {rd-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x38,0xae,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 1) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rtp
+ ; CHECK: vfnmsub213pd {ru-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x58,0xae,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 2) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rtz
+ ; CHECK: vfnmsub213pd {rz-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x78,0xae,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 3) nounwind
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
+ ; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_current
+ ; CHECK: vfnmsub213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xae,0xc2]
+ %res = call <8 x double> @llvm.x86.fma.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
+ ret <8 x double> %res
+}
diff --git a/test/CodeGen/X86/avx512-i1test.ll b/test/CodeGen/X86/avx512-i1test.ll
new file mode 100755
index 0000000..a237738
--- /dev/null
+++ b/test/CodeGen/X86/avx512-i1test.ll
@@ -0,0 +1,45 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+
+; ModuleID = 'bugpoint-reduced-simplified.bc'
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK-LABEL: func
+; CHECK: testb
+; CHECK: testb
+define void @func() {
+bb1:
+ br i1 undef, label %L_10, label %L_10
+
+L_10: ; preds = %bb1, %bb1
+ br i1 undef, label %L_30, label %bb56
+
+bb56: ; preds = %L_10
+ br label %bb33
+
+bb33: ; preds = %bb51, %bb56
+ %r111 = load i64* undef, align 8
+ br i1 undef, label %bb51, label %bb35
+
+bb35: ; preds = %bb33
+ br i1 undef, label %L_19, label %bb37
+
+bb37: ; preds = %bb35
+ %r128 = and i64 %r111, 576460752303423488
+ %phitmp = icmp eq i64 %r128, 0
+ br label %L_19
+
+L_19: ; preds = %bb37, %bb35
+ %"$V_S25.0" = phi i1 [ %phitmp, %bb37 ], [ true, %bb35 ]
+ br i1 undef, label %bb51, label %bb42
+
+bb42: ; preds = %L_19
+ %r136 = select i1 %"$V_S25.0", i32* undef, i32* undef
+ br label %bb51
+
+bb51: ; preds = %bb42, %L_19, %bb33
+ br i1 false, label %L_30, label %bb33
+
+L_30: ; preds = %bb51, %L_10
+ ret void
+}
diff --git a/test/CodeGen/X86/avx512-insert-extract.ll b/test/CodeGen/X86/avx512-insert-extract.ll
index eba895e..d6b887e 100644
--- a/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/test/CodeGen/X86/avx512-insert-extract.ll
@@ -106,7 +106,7 @@ define i32 @test10(<16 x i32> %x, i32 %ind) nounwind {
;CHECK: vpcmpltud
;CHECK: kshiftlw $11
;CHECK: kshiftrw $15
-;CHECK: kortestw
+;CHECK: testb
;CHECK: je
;CHECK: ret
;CHECK: ret
@@ -125,7 +125,7 @@ define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) {
;CHECK: vpcmpgtq
;CHECK: kshiftlw $15
;CHECK: kshiftrw $15
-;CHECK: kortestw
+;CHECK: testb
;CHECK: ret
define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) {
@@ -150,9 +150,12 @@ define i16 @test13(i32 %a, i32 %b) {
;CHECK-LABEL: test14
;CHECK: vpcmpgtq
-;CHECK: kshiftlw $11
-;CHECK: kshiftrw $15
-;CHECK: kortestw
+;KNL: kshiftlw $11
+;KNL: kshiftrw $15
+;KNL: testb
+;SKX: kshiftlb $3
+;SKX: kshiftrb $7
+;SKX: testb
;CHECK: ret
define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
@@ -188,9 +191,11 @@ define i16 @test16(i1 *%addr, i16 %a) {
}
;CHECK-LABEL: test17
-;CHECK: kshiftlw
-;CHECK: kshiftrw
+;KNL: kshiftlw
+;KNL: kshiftrw
;KNL: korw
+;SKX: kshiftlb
+;SKX: kshiftrb
;SKX: korb
;CHECK: ret
define i8 @test17(i1 *%addr, i8 %a) {
diff --git a/test/CodeGen/X86/avx512-intel-ocl.ll b/test/CodeGen/X86/avx512-intel-ocl.ll
new file mode 100644
index 0000000..3f2691b
--- /dev/null
+++ b/test/CodeGen/X86/avx512-intel-ocl.ll
@@ -0,0 +1,105 @@
+; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=knl | FileCheck -check-prefix=X32 %s
+; RUN: llc < %s -mtriple=i386-pc-win32 -mcpu=knl | FileCheck -check-prefix=X32 %s
+; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=knl | FileCheck -check-prefix=WIN64 %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck -check-prefix=X64 %s
+
+declare <16 x float> @func_float16_ptr(<16 x float>, <16 x float> *)
+declare <16 x float> @func_float16(<16 x float>, <16 x float>)
+declare i32 @func_int(i32, i32)
+
+; WIN64-LABEL: testf16_inp
+; WIN64: vaddps {{.*}}, {{%zmm[0-1]}}
+; WIN64: leaq {{.*}}(%rsp), %rcx
+; WIN64: call
+; WIN64: ret
+
+; X32-LABEL: testf16_inp
+; X32: vaddps {{.*}}, {{%zmm[0-1]}}
+; X32: movl %eax, (%esp)
+; X32: call
+; X32: ret
+
+; X64-LABEL: testf16_inp
+; X64: vaddps {{.*}}, {{%zmm[0-1]}}
+; X64: leaq {{.*}}(%rsp), %rdi
+; X64: call
+; X64: ret
+
+;test calling conventions - input parameters
+define <16 x float> @testf16_inp(<16 x float> %a, <16 x float> %b) nounwind {
+ %y = alloca <16 x float>, align 16
+ %x = fadd <16 x float> %a, %b
+ %1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
+ %2 = load <16 x float>* %y, align 16
+ %3 = fadd <16 x float> %2, %1
+ ret <16 x float> %3
+}
+
+;test calling conventions - preserved registers
+
+; preserved zmm16-
+; WIN64-LABEL: testf16_regs
+; WIN64: call
+; WIN64: vaddps %zmm16, %zmm0, %zmm0
+; WIN64: ret
+
+; preserved zmm16-
+; X64-LABEL: testf16_regs
+; X64: call
+; X64: vaddps %zmm16, %zmm0, %zmm0
+; X64: ret
+
+define <16 x float> @testf16_regs(<16 x float> %a, <16 x float> %b) nounwind {
+ %y = alloca <16 x float>, align 16
+ %x = fadd <16 x float> %a, %b
+ %1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
+ %2 = load <16 x float>* %y, align 16
+ %3 = fadd <16 x float> %1, %b
+ %4 = fadd <16 x float> %2, %3
+ ret <16 x float> %4
+}
+
+; test calling conventions - prolog and epilog
+; WIN64-LABEL: test_prolog_epilog
+; WIN64: vmovups %zmm21, {{.*(%rbp).*}} # 64-byte Spill
+; WIN64: vmovups %zmm6, {{.*(%rbp).*}} # 64-byte Spill
+; WIN64: call
+; WIN64: vmovups {{.*(%rbp).*}}, %zmm6 # 64-byte Reload
+; WIN64: vmovups {{.*(%rbp).*}}, %zmm21 # 64-byte Reload
+
+; X64-LABEL: test_prolog_epilog
+; X64: kmovw %k7, {{.*}}(%rsp) ## 8-byte Folded Spill
+; X64: kmovw %k6, {{.*}}(%rsp) ## 8-byte Folded Spill
+; X64: kmovw %k5, {{.*}}(%rsp) ## 8-byte Folded Spill
+; X64: kmovw %k4, {{.*}}(%rsp) ## 8-byte Folded Spill
+; X64: vmovups %zmm31, {{.*}}(%rsp) ## 64-byte Spill
+; X64: vmovups %zmm16, {{.*}}(%rsp) ## 64-byte Spill
+; X64: call
+; X64: vmovups {{.*}}(%rsp), %zmm16 ## 64-byte Reload
+; X64: vmovups {{.*}}(%rsp), %zmm31 ## 64-byte Reload
+define intel_ocl_bicc <16 x float> @test_prolog_epilog(<16 x float> %a, <16 x float> %b) nounwind {
+ %c = call <16 x float> @func_float16(<16 x float> %a, <16 x float> %b)
+ ret <16 x float> %c
+}
+
+
+declare <16 x float> @func_float16_mask(<16 x float>, <16 x i1>)
+
+; X64-LABEL: testf16_inp_mask
+; X64: kmovw %edi, %k1
+; X64: call
+define <16 x float> @testf16_inp_mask(<16 x float> %a, i16 %mask) {
+ %imask = bitcast i16 %mask to <16 x i1>
+ %1 = call intel_ocl_bicc <16 x float> @func_float16_mask(<16 x float> %a, <16 x i1> %imask)
+ ret <16 x float> %1
+}
+
+; X64-LABEL: test_prolog_epilog_with_mask
+; X64: kxorw %k{{.*}}, %k{{.*}}, %k1
+; X64: call
+define intel_ocl_bicc <16 x float> @test_prolog_epilog_with_mask(<16 x float> %a, <16 x i32> %x1, <16 x i32>%x2, <16 x i1> %mask) nounwind {
+ %cmp_res = icmp eq <16 x i32>%x1, %x2
+ %mask1 = xor <16 x i1> %cmp_res, %mask
+ %c = call intel_ocl_bicc <16 x float> @func_float16_mask(<16 x float> %a, <16 x i1>%mask1)
+ ret <16 x float> %c
+} \ No newline at end of file
diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll
index 691d1fb..b6375c1 100644
--- a/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-intrinsics.ll
@@ -5,7 +5,7 @@ declare i32 @llvm.x86.avx512.kortestz.w(i16, i16) nounwind readnone
; CHECK: kortestw
; CHECK: sete
define i32 @test_kortestz(i16 %a0, i16 %a1) {
- %res = call i32 @llvm.x86.avx512.kortestz.w(i16 %a0, i16 %a1)
+ %res = call i32 @llvm.x86.avx512.kortestz.w(i16 %a0, i16 %a1)
ret i32 %res
}
@@ -14,7 +14,7 @@ declare i32 @llvm.x86.avx512.kortestc.w(i16, i16) nounwind readnone
; CHECK: kortestw
; CHECK: sbbl
define i32 @test_kortestc(i16 %a0, i16 %a1) {
- %res = call i32 @llvm.x86.avx512.kortestc.w(i16 %a0, i16 %a1)
+ %res = call i32 @llvm.x86.avx512.kortestc.w(i16 %a0, i16 %a1)
ret i32 %res
}
@@ -277,7 +277,7 @@ define <8 x i64> @test_conflict_q(<8 x i64> %a) {
declare <8 x i64> @llvm.x86.avx512.mask.conflict.q.512(<8 x i64>, <8 x i64>, i8) nounwind readonly
define <16 x i32> @test_maskz_conflict_d(<16 x i32> %a, i16 %mask) {
- ; CHECK: vpconflictd
+ ; CHECK: vpconflictd
%res = call <16 x i32> @llvm.x86.avx512.mask.conflict.d.512(<16 x i32> %a, <16 x i32> zeroinitializer, i16 %mask)
ret <16 x i32> %res
}
@@ -340,7 +340,7 @@ define <8 x i64> @test_ctlz_q(<8 x i64> %a) {
declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1) nounwind readonly
define <16 x float> @test_x86_mask_blend_ps_512(i16 %a0, <16 x float> %a1, <16 x float> %a2) {
- ; CHECK: vblendmps
+ ; CHECK: vblendmps %zmm1, %zmm0
%res = call <16 x float> @llvm.x86.avx512.mask.blend.ps.512(<16 x float> %a1, <16 x float> %a2, i16 %a0) ; <<16 x float>> [#uses=1]
ret <16 x float> %res
}
@@ -348,7 +348,7 @@ define <16 x float> @test_x86_mask_blend_ps_512(i16 %a0, <16 x float> %a1, <16 x
declare <16 x float> @llvm.x86.avx512.mask.blend.ps.512(<16 x float>, <16 x float>, i16) nounwind readonly
define <8 x double> @test_x86_mask_blend_pd_512(i8 %a0, <8 x double> %a1, <8 x double> %a2) {
- ; CHECK: vblendmpd
+ ; CHECK: vblendmpd %zmm1, %zmm0
%res = call <8 x double> @llvm.x86.avx512.mask.blend.pd.512(<8 x double> %a1, <8 x double> %a2, i8 %a0) ; <<8 x double>> [#uses=1]
ret <8 x double> %res
}
@@ -382,7 +382,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.blend.q.512(<8 x i64>, <8 x i64>, i8) no
ret <8 x i32>%res
}
declare <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double>, <8 x i32>, i8, i32)
-
+
define <16 x i32> @test_cvtps2udq(<16 x float> %a) {
;CHECK: vcvtps2udq {rd-sae}{{.*}}encoding: [0x62,0xf1,0x7c,0x38,0x79,0xc0]
%res = call <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float> %a, <16 x i32>zeroinitializer, i16 -1, i32 1)
@@ -392,17 +392,17 @@ declare <8 x i64> @llvm.x86.avx512.mask.blend.q.512(<8 x i64>, <8 x i64>, i8) no
define i16 @test_cmpps(<16 x float> %a, <16 x float> %b) {
;CHECK: vcmpleps {sae}{{.*}}encoding: [0x62,0xf1,0x7c,0x18,0xc2,0xc1,0x02]
- %res = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %b, i32 2, i16 -1, i32 8)
+ %res = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %b, i8 2, i16 -1, i32 8)
ret i16 %res
}
- declare i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> , <16 x float> , i32, i16, i32)
+ declare i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> , <16 x float> , i8, i16, i32)
define i8 @test_cmppd(<8 x double> %a, <8 x double> %b) {
;CHECK: vcmpneqpd %zmm{{.*}}encoding: [0x62,0xf1,0xfd,0x48,0xc2,0xc1,0x04]
- %res = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 4, i8 -1, i32 4)
+ %res = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %a, <8 x double> %b, i8 4, i8 -1, i32 4)
ret i8 %res
}
- declare i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> , <8 x double> , i32, i8, i32)
+ declare i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> , <8 x double> , i8, i8, i32)
; cvt intrinsics
define <16 x float> @test_cvtdq2ps(<16 x i32> %a) {
@@ -551,7 +551,73 @@ define void @test_store2(<8 x double> %data, i8* %ptr, i8 %mask) {
ret void
}
-declare void @llvm.x86.avx512.mask.storeu.pd.512(i8*, <8 x double>, i8 )
+declare void @llvm.x86.avx512.mask.storeu.pd.512(i8*, <8 x double>, i8)
+
+define void @test_mask_store_aligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
+; CHECK-LABEL: test_mask_store_aligned_ps:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovaps %zmm0, (%rdi) {%k1}
+; CHECK-NEXT: retq
+ call void @llvm.x86.avx512.mask.store.ps.512(i8* %ptr, <16 x float> %data, i16 %mask)
+ ret void
+}
+
+declare void @llvm.x86.avx512.mask.store.ps.512(i8*, <16 x float>, i16 )
+
+define void @test_mask_store_aligned_pd(<8 x double> %data, i8* %ptr, i8 %mask) {
+; CHECK-LABEL: test_mask_store_aligned_pd:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovapd %zmm0, (%rdi) {%k1}
+; CHECK-NEXT: retq
+ call void @llvm.x86.avx512.mask.store.pd.512(i8* %ptr, <8 x double> %data, i8 %mask)
+ ret void
+}
+
+declare void @llvm.x86.avx512.mask.store.pd.512(i8*, <8 x double>, i8)
+
+define <16 x float> @test_maskz_load_aligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
+; CHECK-LABEL: test_maskz_load_aligned_ps:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovaps (%rdi), %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %res = call <16 x float> @llvm.x86.avx512.mask.load.ps.512(i8* %ptr, <16 x float> zeroinitializer, i16 %mask)
+ ret <16 x float> %res
+}
+
+declare <16 x float> @llvm.x86.avx512.mask.load.ps.512(i8*, <16 x float>, i16)
+
+define <8 x double> @test_maskz_load_aligned_pd(<8 x double> %data, i8* %ptr, i8 %mask) {
+; CHECK-LABEL: test_maskz_load_aligned_pd:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovapd (%rdi), %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %res = call <8 x double> @llvm.x86.avx512.mask.load.pd.512(i8* %ptr, <8 x double> zeroinitializer, i8 %mask)
+ ret <8 x double> %res
+}
+
+declare <8 x double> @llvm.x86.avx512.mask.load.pd.512(i8*, <8 x double>, i8)
+
+define <16 x float> @test_load_aligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
+; CHECK-LABEL: test_load_aligned_ps:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovaps (%rdi), %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x float> @llvm.x86.avx512.mask.load.ps.512(i8* %ptr, <16 x float> zeroinitializer, i16 -1)
+ ret <16 x float> %res
+}
+
+define <8 x double> @test_load_aligned_pd(<8 x double> %data, i8* %ptr, i8 %mask) {
+; CHECK-LABEL: test_load_aligned_pd:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovapd (%rdi), %zmm0
+; CHECK-NEXT: retq
+ %res = call <8 x double> @llvm.x86.avx512.mask.load.pd.512(i8* %ptr, <8 x double> zeroinitializer, i8 -1)
+ ret <8 x double> %res
+}
define <16 x float> @test_vpermt2ps(<16 x float>%x, <16 x float>%y, <16 x i32>%perm) {
; CHECK: vpermt2ps {{.*}}encoding: [0x62,0xf2,0x6d,0x48,0x7f,0xc1]
@@ -678,28 +744,28 @@ declare i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64>, <8 x i64>, i8)
define <8 x i16> @test_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
; CHECK_LABEL: test_cmp_d_512
; CHECK: vpcmpeqd %zmm1, %zmm0, %k0 ##
- %res0 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 -1)
+ %res0 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 0, i16 -1)
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
; CHECK: vpcmpltd %zmm1, %zmm0, %k0 ##
- %res1 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 1, i16 -1)
+ %res1 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 1, i16 -1)
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
; CHECK: vpcmpled %zmm1, %zmm0, %k0 ##
- %res2 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 2, i16 -1)
+ %res2 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 2, i16 -1)
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
; CHECK: vpcmpunordd %zmm1, %zmm0, %k0 ##
- %res3 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 3, i16 -1)
+ %res3 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 3, i16 -1)
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
; CHECK: vpcmpneqd %zmm1, %zmm0, %k0 ##
- %res4 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 4, i16 -1)
+ %res4 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 4, i16 -1)
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
; CHECK: vpcmpnltd %zmm1, %zmm0, %k0 ##
- %res5 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 5, i16 -1)
+ %res5 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 5, i16 -1)
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
; CHECK: vpcmpnled %zmm1, %zmm0, %k0 ##
- %res6 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 6, i16 -1)
+ %res6 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 6, i16 -1)
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
; CHECK: vpcmpordd %zmm1, %zmm0, %k0 ##
- %res7 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 7, i16 -1)
+ %res7 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 7, i16 -1)
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
ret <8 x i16> %vec7
}
@@ -707,59 +773,59 @@ define <8 x i16> @test_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
define <8 x i16> @test_mask_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK_LABEL: test_mask_cmp_d_512
; CHECK: vpcmpeqd %zmm1, %zmm0, %k0 {%k1} ##
- %res0 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 %mask)
+ %res0 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 0, i16 %mask)
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
; CHECK: vpcmpltd %zmm1, %zmm0, %k0 {%k1} ##
- %res1 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 1, i16 %mask)
+ %res1 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 1, i16 %mask)
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
; CHECK: vpcmpled %zmm1, %zmm0, %k0 {%k1} ##
- %res2 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 2, i16 %mask)
+ %res2 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 2, i16 %mask)
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
; CHECK: vpcmpunordd %zmm1, %zmm0, %k0 {%k1} ##
- %res3 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 3, i16 %mask)
+ %res3 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 3, i16 %mask)
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
; CHECK: vpcmpneqd %zmm1, %zmm0, %k0 {%k1} ##
- %res4 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 4, i16 %mask)
+ %res4 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 4, i16 %mask)
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
; CHECK: vpcmpnltd %zmm1, %zmm0, %k0 {%k1} ##
- %res5 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 5, i16 %mask)
+ %res5 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 5, i16 %mask)
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
; CHECK: vpcmpnled %zmm1, %zmm0, %k0 {%k1} ##
- %res6 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 6, i16 %mask)
+ %res6 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 6, i16 %mask)
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
; CHECK: vpcmpordd %zmm1, %zmm0, %k0 {%k1} ##
- %res7 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 7, i16 %mask)
+ %res7 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 7, i16 %mask)
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
ret <8 x i16> %vec7
}
-declare i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32>, <16 x i32>, i32, i16) nounwind readnone
+declare i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32>, <16 x i32>, i8, i16) nounwind readnone
define <8 x i16> @test_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
; CHECK_LABEL: test_ucmp_d_512
; CHECK: vpcmpequd %zmm1, %zmm0, %k0 ##
- %res0 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 -1)
+ %res0 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 0, i16 -1)
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
; CHECK: vpcmpltud %zmm1, %zmm0, %k0 ##
- %res1 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 1, i16 -1)
+ %res1 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 1, i16 -1)
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
; CHECK: vpcmpleud %zmm1, %zmm0, %k0 ##
- %res2 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 2, i16 -1)
+ %res2 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 2, i16 -1)
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
; CHECK: vpcmpunordud %zmm1, %zmm0, %k0 ##
- %res3 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 3, i16 -1)
+ %res3 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 3, i16 -1)
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
; CHECK: vpcmpnequd %zmm1, %zmm0, %k0 ##
- %res4 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 4, i16 -1)
+ %res4 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 4, i16 -1)
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
; CHECK: vpcmpnltud %zmm1, %zmm0, %k0 ##
- %res5 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 5, i16 -1)
+ %res5 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 5, i16 -1)
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
; CHECK: vpcmpnleud %zmm1, %zmm0, %k0 ##
- %res6 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 6, i16 -1)
+ %res6 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 6, i16 -1)
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
; CHECK: vpcmpordud %zmm1, %zmm0, %k0 ##
- %res7 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 7, i16 -1)
+ %res7 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 7, i16 -1)
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
ret <8 x i16> %vec7
}
@@ -767,59 +833,59 @@ define <8 x i16> @test_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
define <8 x i16> @test_mask_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK_LABEL: test_mask_ucmp_d_512
; CHECK: vpcmpequd %zmm1, %zmm0, %k0 {%k1} ##
- %res0 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 %mask)
+ %res0 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 0, i16 %mask)
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
; CHECK: vpcmpltud %zmm1, %zmm0, %k0 {%k1} ##
- %res1 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 1, i16 %mask)
+ %res1 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 1, i16 %mask)
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
; CHECK: vpcmpleud %zmm1, %zmm0, %k0 {%k1} ##
- %res2 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 2, i16 %mask)
+ %res2 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 2, i16 %mask)
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
; CHECK: vpcmpunordud %zmm1, %zmm0, %k0 {%k1} ##
- %res3 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 3, i16 %mask)
+ %res3 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 3, i16 %mask)
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
; CHECK: vpcmpnequd %zmm1, %zmm0, %k0 {%k1} ##
- %res4 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 4, i16 %mask)
+ %res4 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 4, i16 %mask)
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
; CHECK: vpcmpnltud %zmm1, %zmm0, %k0 {%k1} ##
- %res5 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 5, i16 %mask)
+ %res5 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 5, i16 %mask)
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
; CHECK: vpcmpnleud %zmm1, %zmm0, %k0 {%k1} ##
- %res6 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 6, i16 %mask)
+ %res6 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 6, i16 %mask)
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
; CHECK: vpcmpordud %zmm1, %zmm0, %k0 {%k1} ##
- %res7 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 7, i16 %mask)
+ %res7 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i8 7, i16 %mask)
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
ret <8 x i16> %vec7
}
-declare i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32>, <16 x i32>, i32, i16) nounwind readnone
+declare i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32>, <16 x i32>, i8, i16) nounwind readnone
define <8 x i8> @test_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK_LABEL: test_cmp_q_512
; CHECK: vpcmpeqq %zmm1, %zmm0, %k0 ##
- %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 -1)
+ %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 0, i8 -1)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltq %zmm1, %zmm0, %k0 ##
- %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 1, i8 -1)
+ %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 1, i8 -1)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleq %zmm1, %zmm0, %k0 ##
- %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 2, i8 -1)
+ %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 2, i8 -1)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordq %zmm1, %zmm0, %k0 ##
- %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 3, i8 -1)
+ %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 3, i8 -1)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpneqq %zmm1, %zmm0, %k0 ##
- %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 4, i8 -1)
+ %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 4, i8 -1)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltq %zmm1, %zmm0, %k0 ##
- %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 5, i8 -1)
+ %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 5, i8 -1)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleq %zmm1, %zmm0, %k0 ##
- %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 6, i8 -1)
+ %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 6, i8 -1)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordq %zmm1, %zmm0, %k0 ##
- %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 7, i8 -1)
+ %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 7, i8 -1)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
@@ -827,59 +893,59 @@ define <8 x i8> @test_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i8> @test_mask_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK_LABEL: test_mask_cmp_q_512
; CHECK: vpcmpeqq %zmm1, %zmm0, %k0 {%k1} ##
- %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 %mask)
+ %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 0, i8 %mask)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltq %zmm1, %zmm0, %k0 {%k1} ##
- %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 1, i8 %mask)
+ %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 1, i8 %mask)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleq %zmm1, %zmm0, %k0 {%k1} ##
- %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 2, i8 %mask)
+ %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 2, i8 %mask)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordq %zmm1, %zmm0, %k0 {%k1} ##
- %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 3, i8 %mask)
+ %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 3, i8 %mask)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpneqq %zmm1, %zmm0, %k0 {%k1} ##
- %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 4, i8 %mask)
+ %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 4, i8 %mask)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltq %zmm1, %zmm0, %k0 {%k1} ##
- %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 5, i8 %mask)
+ %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 5, i8 %mask)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleq %zmm1, %zmm0, %k0 {%k1} ##
- %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 6, i8 %mask)
+ %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 6, i8 %mask)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordq %zmm1, %zmm0, %k0 {%k1} ##
- %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 7, i8 %mask)
+ %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 7, i8 %mask)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
-declare i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64>, <8 x i64>, i32, i8) nounwind readnone
+declare i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64>, <8 x i64>, i8, i8) nounwind readnone
define <8 x i8> @test_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
; CHECK_LABEL: test_ucmp_q_512
; CHECK: vpcmpequq %zmm1, %zmm0, %k0 ##
- %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 -1)
+ %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 0, i8 -1)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltuq %zmm1, %zmm0, %k0 ##
- %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 1, i8 -1)
+ %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 1, i8 -1)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleuq %zmm1, %zmm0, %k0 ##
- %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 2, i8 -1)
+ %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 2, i8 -1)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunorduq %zmm1, %zmm0, %k0 ##
- %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 3, i8 -1)
+ %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 3, i8 -1)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpnequq %zmm1, %zmm0, %k0 ##
- %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 4, i8 -1)
+ %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 4, i8 -1)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltuq %zmm1, %zmm0, %k0 ##
- %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 5, i8 -1)
+ %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 5, i8 -1)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleuq %zmm1, %zmm0, %k0 ##
- %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 6, i8 -1)
+ %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 6, i8 -1)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmporduq %zmm1, %zmm0, %k0 ##
- %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 7, i8 -1)
+ %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 7, i8 -1)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
@@ -887,33 +953,33 @@ define <8 x i8> @test_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
define <8 x i8> @test_mask_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK_LABEL: test_mask_ucmp_q_512
; CHECK: vpcmpequq %zmm1, %zmm0, %k0 {%k1} ##
- %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 %mask)
+ %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 0, i8 %mask)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltuq %zmm1, %zmm0, %k0 {%k1} ##
- %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 1, i8 %mask)
+ %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 1, i8 %mask)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleuq %zmm1, %zmm0, %k0 {%k1} ##
- %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 2, i8 %mask)
+ %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 2, i8 %mask)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunorduq %zmm1, %zmm0, %k0 {%k1} ##
- %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 3, i8 %mask)
+ %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 3, i8 %mask)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpnequq %zmm1, %zmm0, %k0 {%k1} ##
- %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 4, i8 %mask)
+ %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 4, i8 %mask)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltuq %zmm1, %zmm0, %k0 {%k1} ##
- %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 5, i8 %mask)
+ %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 5, i8 %mask)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleuq %zmm1, %zmm0, %k0 {%k1} ##
- %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 6, i8 %mask)
+ %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 6, i8 %mask)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmporduq %zmm1, %zmm0, %k0 {%k1} ##
- %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 7, i8 %mask)
+ %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 7, i8 %mask)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
-declare i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64>, <8 x i64>, i32, i8) nounwind readnone
+declare i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64>, <8 x i64>, i8, i8) nounwind readnone
define <4 x float> @test_mask_vextractf32x4(<4 x float> %b, <16 x float> %a, i8 %mask) {
; CHECK-LABEL: test_mask_vextractf32x4:
@@ -959,8 +1025,8 @@ define <16 x i32> @test_x86_avx512_pslli_d(<16 x i32> %a0) {
}
define <16 x i32> @test_x86_avx512_mask_pslli_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
- ; CHECK-LABEL: test_x86_avx512_mask_pslli_d
- ; CHECK: vpslld $7, %zmm0, %zmm1 {%k1}
+ ; CHECK-LABEL: test_x86_avx512_mask_pslli_d
+ ; CHECK: vpslld $7, %zmm0, %zmm1 {%k1}
%res = call <16 x i32> @llvm.x86.avx512.mask.pslli.d(<16 x i32> %a0, i32 7, <16 x i32> %a1, i16 %mask)
ret <16 x i32> %res
}
@@ -983,14 +1049,14 @@ define <8 x i64> @test_x86_avx512_pslli_q(<8 x i64> %a0) {
define <8 x i64> @test_x86_avx512_mask_pslli_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_pslli_q
- ; CHECK: vpsllq $7, %zmm0, %zmm1 {%k1}
+ ; CHECK: vpsllq $7, %zmm0, %zmm1 {%k1}
%res = call <8 x i64> @llvm.x86.avx512.mask.pslli.q(<8 x i64> %a0, i32 7, <8 x i64> %a1, i8 %mask)
ret <8 x i64> %res
}
define <8 x i64> @test_x86_avx512_maskz_pslli_q(<8 x i64> %a0, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_maskz_pslli_q
- ; CHECK: vpsllq $7, %zmm0, %zmm0 {%k1} {z}
+ ; CHECK: vpsllq $7, %zmm0, %zmm0 {%k1} {z}
%res = call <8 x i64> @llvm.x86.avx512.mask.pslli.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 %mask)
ret <8 x i64> %res
}
@@ -1006,7 +1072,7 @@ define <16 x i32> @test_x86_avx512_psrli_d(<16 x i32> %a0) {
define <16 x i32> @test_x86_avx512_mask_psrli_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrli_d
- ; CHECK: vpsrld $7, %zmm0, %zmm1 {%k1}
+ ; CHECK: vpsrld $7, %zmm0, %zmm1 {%k1}
%res = call <16 x i32> @llvm.x86.avx512.mask.psrli.d(<16 x i32> %a0, i32 7, <16 x i32> %a1, i16 %mask)
ret <16 x i32> %res
}
@@ -1029,7 +1095,7 @@ define <8 x i64> @test_x86_avx512_psrli_q(<8 x i64> %a0) {
define <8 x i64> @test_x86_avx512_mask_psrli_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrli_q
- ; CHECK: vpsrlq $7, %zmm0, %zmm1 {%k1}
+ ; CHECK: vpsrlq $7, %zmm0, %zmm1 {%k1}
%res = call <8 x i64> @llvm.x86.avx512.mask.psrli.q(<8 x i64> %a0, i32 7, <8 x i64> %a1, i8 %mask)
ret <8 x i64> %res
}
@@ -1052,7 +1118,7 @@ define <16 x i32> @test_x86_avx512_psrai_d(<16 x i32> %a0) {
define <16 x i32> @test_x86_avx512_mask_psrai_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrai_d
- ; CHECK: vpsrad $7, %zmm0, %zmm1 {%k1}
+ ; CHECK: vpsrad $7, %zmm0, %zmm1 {%k1}
%res = call <16 x i32> @llvm.x86.avx512.mask.psrai.d(<16 x i32> %a0, i32 7, <16 x i32> %a1, i16 %mask)
ret <16 x i32> %res
}
@@ -1075,7 +1141,7 @@ define <8 x i64> @test_x86_avx512_psrai_q(<8 x i64> %a0) {
define <8 x i64> @test_x86_avx512_mask_psrai_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_x86_avx512_mask_psrai_q
- ; CHECK: vpsraq $7, %zmm0, %zmm1 {%k1}
+ ; CHECK: vpsraq $7, %zmm0, %zmm1 {%k1}
%res = call <8 x i64> @llvm.x86.avx512.mask.psrai.q(<8 x i64> %a0, i32 7, <8 x i64> %a1, i8 %mask)
ret <8 x i64> %res
}
@@ -1088,3 +1154,455 @@ define <8 x i64> @test_x86_avx512_maskz_psrai_q(<8 x i64> %a0, i8 %mask) {
}
declare <8 x i64> @llvm.x86.avx512.mask.psrai.q(<8 x i64>, i32, <8 x i64>, i8) nounwind readnone
+
+define <16 x i32> @test_x86_avx512_psll_d(<16 x i32> %a0, <4 x i32> %a1) {
+ ; CHECK-LABEL: test_x86_avx512_psll_d
+ ; CHECK: vpslld
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_x86_avx512_mask_psll_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_mask_psll_d
+ ; CHECK: vpslld %xmm1, %zmm0, %zmm2 {%k1}
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_x86_avx512_maskz_psll_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_maskz_psll_d
+ ; CHECK: vpslld %xmm1, %zmm0, %zmm0 {%k1} {z}
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32>, <4 x i32>, <16 x i32>, i16) nounwind readnone
+
+define <8 x i64> @test_x86_avx512_psll_q(<8 x i64> %a0, <2 x i64> %a1) {
+ ; CHECK-LABEL: test_x86_avx512_psll_q
+ ; CHECK: vpsllq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_x86_avx512_mask_psll_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_mask_psll_q
+ ; CHECK: vpsllq %xmm1, %zmm0, %zmm2 {%k1}
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask)
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_x86_avx512_maskz_psll_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_maskz_psll_q
+ ; CHECK: vpsllq %xmm1, %zmm0, %zmm0 {%k1} {z}
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64>, <2 x i64>, <8 x i64>, i8) nounwind readnone
+
+define <16 x i32> @test_x86_avx512_psrl_d(<16 x i32> %a0, <4 x i32> %a1) {
+ ; CHECK-LABEL: test_x86_avx512_psrl_d
+ ; CHECK: vpsrld
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_x86_avx512_mask_psrl_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_mask_psrl_d
+ ; CHECK: vpsrld %xmm1, %zmm0, %zmm2 {%k1}
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_x86_avx512_maskz_psrl_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_maskz_psrl_d
+ ; CHECK: vpsrld %xmm1, %zmm0, %zmm0 {%k1} {z}
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32>, <4 x i32>, <16 x i32>, i16) nounwind readnone
+
+define <8 x i64> @test_x86_avx512_psrl_q(<8 x i64> %a0, <2 x i64> %a1) {
+ ; CHECK-LABEL: test_x86_avx512_psrl_q
+ ; CHECK: vpsrlq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_x86_avx512_mask_psrl_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_mask_psrl_q
+ ; CHECK: vpsrlq %xmm1, %zmm0, %zmm2 {%k1}
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask)
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_x86_avx512_maskz_psrl_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_maskz_psrl_q
+ ; CHECK: vpsrlq %xmm1, %zmm0, %zmm0 {%k1} {z}
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64>, <2 x i64>, <8 x i64>, i8) nounwind readnone
+
+define <16 x i32> @test_x86_avx512_psra_d(<16 x i32> %a0, <4 x i32> %a1) {
+ ; CHECK-LABEL: test_x86_avx512_psra_d
+ ; CHECK: vpsrad
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_x86_avx512_mask_psra_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_mask_psra_d
+ ; CHECK: vpsrad %xmm1, %zmm0, %zmm2 {%k1}
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_x86_avx512_maskz_psra_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_maskz_psra_d
+ ; CHECK: vpsrad %xmm1, %zmm0, %zmm0 {%k1} {z}
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32>, <4 x i32>, <16 x i32>, i16) nounwind readnone
+
+define <8 x i64> @test_x86_avx512_psra_q(<8 x i64> %a0, <2 x i64> %a1) {
+ ; CHECK-LABEL: test_x86_avx512_psra_q
+ ; CHECK: vpsraq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_x86_avx512_mask_psra_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_mask_psra_q
+ ; CHECK: vpsraq %xmm1, %zmm0, %zmm2 {%k1}
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask)
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_x86_avx512_maskz_psra_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_maskz_psra_q
+ ; CHECK: vpsraq %xmm1, %zmm0, %zmm0 {%k1} {z}
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64>, <2 x i64>, <8 x i64>, i8) nounwind readnone
+
+define <16 x i32> @test_x86_avx512_psllv_d(<16 x i32> %a0, <16 x i32> %a1) {
+ ; CHECK-LABEL: test_x86_avx512_psllv_d
+ ; CHECK: vpsllvd
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_x86_avx512_mask_psllv_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_mask_psllv_d
+ ; CHECK: vpsllvd %zmm1, %zmm0, %zmm2 {%k1}
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_x86_avx512_maskz_psllv_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_maskz_psllv_d
+ ; CHECK: vpsllvd %zmm1, %zmm0, %zmm0 {%k1} {z}
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32>, <16 x i32>, <16 x i32>, i16) nounwind readnone
+
+define <8 x i64> @test_x86_avx512_psllv_q(<8 x i64> %a0, <8 x i64> %a1) {
+ ; CHECK-LABEL: test_x86_avx512_psllv_q
+ ; CHECK: vpsllvq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_x86_avx512_mask_psllv_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_mask_psllv_q
+ ; CHECK: vpsllvq %zmm1, %zmm0, %zmm2 {%k1}
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask)
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_x86_avx512_maskz_psllv_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_maskz_psllv_q
+ ; CHECK: vpsllvq %zmm1, %zmm0, %zmm0 {%k1} {z}
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64>, <8 x i64>, <8 x i64>, i8) nounwind readnone
+
+
+define <16 x i32> @test_x86_avx512_psrav_d(<16 x i32> %a0, <16 x i32> %a1) {
+ ; CHECK-LABEL: test_x86_avx512_psrav_d
+ ; CHECK: vpsravd
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_x86_avx512_mask_psrav_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_mask_psrav_d
+ ; CHECK: vpsravd %zmm1, %zmm0, %zmm2 {%k1}
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_x86_avx512_maskz_psrav_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_maskz_psrav_d
+ ; CHECK: vpsravd %zmm1, %zmm0, %zmm0 {%k1} {z}
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32>, <16 x i32>, <16 x i32>, i16) nounwind readnone
+
+define <8 x i64> @test_x86_avx512_psrav_q(<8 x i64> %a0, <8 x i64> %a1) {
+ ; CHECK-LABEL: test_x86_avx512_psrav_q
+ ; CHECK: vpsravq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_x86_avx512_mask_psrav_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_mask_psrav_q
+ ; CHECK: vpsravq %zmm1, %zmm0, %zmm2 {%k1}
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask)
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_x86_avx512_maskz_psrav_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_maskz_psrav_q
+ ; CHECK: vpsravq %zmm1, %zmm0, %zmm0 {%k1} {z}
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64>, <8 x i64>, <8 x i64>, i8) nounwind readnone
+
+define <16 x i32> @test_x86_avx512_psrlv_d(<16 x i32> %a0, <16 x i32> %a1) {
+ ; CHECK-LABEL: test_x86_avx512_psrlv_d
+ ; CHECK: vpsrlvd
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_x86_avx512_mask_psrlv_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_mask_psrlv_d
+ ; CHECK: vpsrlvd %zmm1, %zmm0, %zmm2 {%k1}
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @test_x86_avx512_maskz_psrlv_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_maskz_psrlv_d
+ ; CHECK: vpsrlvd %zmm1, %zmm0, %zmm0 {%k1} {z}
+ %res = call <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32>, <16 x i32>, <16 x i32>, i16) nounwind readnone
+
+define <8 x i64> @test_x86_avx512_psrlv_q(<8 x i64> %a0, <8 x i64> %a1) {
+ ; CHECK-LABEL: test_x86_avx512_psrlv_q
+ ; CHECK: vpsrlvq
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_x86_avx512_mask_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_mask_psrlv_q
+ ; CHECK: vpsrlvq %zmm1, %zmm0, %zmm2 {%k1}
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask)
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_x86_avx512_maskz_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
+ ; CHECK-LABEL: test_x86_avx512_maskz_psrlv_q
+ ; CHECK: vpsrlvq %zmm1, %zmm0, %zmm0 {%k1} {z}
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64>, <8 x i64>, <8 x i64>, i8) nounwind readnone
+
+define <8 x i64> @test_x86_avx512_psrlv_q_memop(<8 x i64> %a0, <8 x i64>* %ptr) {
+ ; CHECK-LABEL: test_x86_avx512_psrlv_q_memop
+ ; CHECK: vpsrlvq (%
+ %b = load <8 x i64>* %ptr
+ %res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
+ ret <8 x i64> %res
+}
+
+declare <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
+declare <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
+declare <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32)
+
+define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) {
+ ; CHECK-LABEL: test_vsubps_rn
+ ; CHECK: vsubps {rn-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x18,0x5c,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> zeroinitializer, i16 -1, i32 0)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) {
+ ; CHECK-LABEL: test_vsubps_rd
+ ; CHECK: vsubps {rd-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x38,0x5c,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> zeroinitializer, i16 -1, i32 1)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) {
+ ; CHECK-LABEL: test_vsubps_ru
+ ; CHECK: vsubps {ru-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x58,0x5c,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> zeroinitializer, i16 -1, i32 2)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) {
+ ; CHECK-LABEL: test_vsubps_rz
+ ; CHECK: vsubps {rz-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x78,0x5c,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.sub.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> zeroinitializer, i16 -1, i32 3)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) {
+ ; CHECK-LABEL: test_vmulps_rn
+ ; CHECK: vmulps {rn-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x18,0x59,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> zeroinitializer, i16 -1, i32 0)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) {
+ ; CHECK-LABEL: test_vmulps_rd
+ ; CHECK: vmulps {rd-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x38,0x59,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> zeroinitializer, i16 -1, i32 1)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) {
+ ; CHECK-LABEL: test_vmulps_ru
+ ; CHECK: vmulps {ru-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x58,0x59,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> zeroinitializer, i16 -1, i32 2)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vmulps_rz(<16 x float> %a0, <16 x float> %a1) {
+ ; CHECK-LABEL: test_vmulps_rz
+ ; CHECK: vmulps {rz-sae}{{.*}} ## encoding: [0x62,0xf1,0x7c,0x78,0x59,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> zeroinitializer, i16 -1, i32 3)
+ ret <16 x float> %res
+}
+
+;; mask float
+define <16 x float> @test_vmulps_mask_rn(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
+ ; CHECK-LABEL: test_vmulps_mask_rn
+ ; CHECK: vmulps {rn-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x59,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> zeroinitializer, i16 %mask, i32 0)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vmulps_mask_rd(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
+ ; CHECK-LABEL: test_vmulps_mask_rd
+ ; CHECK: vmulps {rd-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x59,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> zeroinitializer, i16 %mask, i32 1)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vmulps_mask_ru(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
+ ; CHECK-LABEL: test_vmulps_mask_ru
+ ; CHECK: vmulps {ru-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xd9,0x59,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> zeroinitializer, i16 %mask, i32 2)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vmulps_mask_rz(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
+ ; CHECK-LABEL: test_vmulps_mask_rz
+ ; CHECK: vmulps {rz-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xf9,0x59,0xc1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> zeroinitializer, i16 %mask, i32 3)
+ ret <16 x float> %res
+}
+
+;; With Passthru value
+define <16 x float> @test_vmulps_mask_passthru_rn(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
+ ; CHECK-LABEL: test_vmulps_mask_passthru_rn
+ ; CHECK: vmulps {rn-sae}{{.*}}{%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x59,0xd1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> %passthru, i16 %mask, i32 0)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vmulps_mask_passthru_rd(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
+ ; CHECK-LABEL: test_vmulps_mask_passthru_rd
+ ; CHECK: vmulps {rd-sae}{{.*}}{%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x59,0xd1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> %passthru, i16 %mask, i32 1)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vmulps_mask_passthru_ru(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
+ ; CHECK-LABEL: test_vmulps_mask_passthru_ru
+ ; CHECK: vmulps {ru-sae}{{.*}}{%k1} ## encoding: [0x62,0xf1,0x7c,0x59,0x59,0xd1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> %passthru, i16 %mask, i32 2)
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_vmulps_mask_passthru_rz(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
+ ; CHECK-LABEL: test_vmulps_mask_passthru_rz
+ ; CHECK: vmulps {rz-sae}{{.*}}{%k1} ## encoding: [0x62,0xf1,0x7c,0x79,0x59,0xd1]
+ %res = call <16 x float> @llvm.x86.avx512.mask.mul.ps.512(<16 x float> %a0, <16 x float> %a1,
+ <16 x float> %passthru, i16 %mask, i32 3)
+ ret <16 x float> %res
+}
+
+;; mask double
+define <8 x double> @test_vmulpd_mask_rn(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
+ ; CHECK-LABEL: test_vmulpd_mask_rn
+ ; CHECK: vmulpd {rn-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x99,0x59,0xc1]
+ %res = call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a0, <8 x double> %a1,
+ <8 x double> zeroinitializer, i8 %mask, i32 0)
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_vmulpd_mask_rd(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
+ ; CHECK-LABEL: test_vmulpd_mask_rd
+ ; CHECK: vmulpd {rd-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xb9,0x59,0xc1]
+ %res = call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a0, <8 x double> %a1,
+ <8 x double> zeroinitializer, i8 %mask, i32 1)
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_vmulpd_mask_ru(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
+ ; CHECK-LABEL: test_vmulpd_mask_ru
+ ; CHECK: vmulpd {ru-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xd9,0x59,0xc1]
+ %res = call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a0, <8 x double> %a1,
+ <8 x double> zeroinitializer, i8 %mask, i32 2)
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_vmulpd_mask_rz(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
+ ; CHECK-LABEL: test_vmulpd_mask_rz
+ ; CHECK: vmulpd {rz-sae}{{.*}}{%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xf9,0x59,0xc1]
+ %res = call <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double> %a0, <8 x double> %a1,
+ <8 x double> zeroinitializer, i8 %mask, i32 3)
+ ret <8 x double> %res
+}
diff --git a/test/CodeGen/X86/avx512-logic.ll b/test/CodeGen/X86/avx512-logic.ll
new file mode 100644
index 0000000..bee4f52
--- /dev/null
+++ b/test/CodeGen/X86/avx512-logic.ll
@@ -0,0 +1,101 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+
+; CHECK-LABEL: vpandd
+; CHECK: vpandd %zmm
+; CHECK: ret
+define <16 x i32> @vpandd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
+ i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = and <16 x i32> %a2, %b
+ ret <16 x i32> %x
+}
+
+; CHECK-LABEL: vpord
+; CHECK: vpord %zmm
+; CHECK: ret
+define <16 x i32> @vpord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
+ i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = or <16 x i32> %a2, %b
+ ret <16 x i32> %x
+}
+
+; CHECK-LABEL: vpxord
+; CHECK: vpxord %zmm
+; CHECK: ret
+define <16 x i32> @vpxord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
+ i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = xor <16 x i32> %a2, %b
+ ret <16 x i32> %x
+}
+
+; CHECK-LABEL: vpandq
+; CHECK: vpandq %zmm
+; CHECK: ret
+define <8 x i64> @vpandq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %x = and <8 x i64> %a2, %b
+ ret <8 x i64> %x
+}
+
+; CHECK-LABEL: vporq
+; CHECK: vporq %zmm
+; CHECK: ret
+define <8 x i64> @vporq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %x = or <8 x i64> %a2, %b
+ ret <8 x i64> %x
+}
+
+; CHECK-LABEL: vpxorq
+; CHECK: vpxorq %zmm
+; CHECK: ret
+define <8 x i64> @vpxorq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %x = xor <8 x i64> %a2, %b
+ ret <8 x i64> %x
+}
+
+
+; CHECK-LABEL: orq_broadcast
+; CHECK: vporq LCP{{.*}}(%rip){1to8}, %zmm0, %zmm0
+; CHECK: ret
+define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
+ %b = or <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
+ ret <8 x i64> %b
+}
+
+; CHECK-LABEL: andd512fold
+; CHECK: vpandd (%
+; CHECK: ret
+define <16 x i32> @andd512fold(<16 x i32> %y, <16 x i32>* %x) {
+entry:
+ %a = load <16 x i32>* %x, align 4
+ %b = and <16 x i32> %y, %a
+ ret <16 x i32> %b
+}
+
+; CHECK-LABEL: andqbrst
+; CHECK: vpandq (%rdi){1to8}, %zmm
+; CHECK: ret
+define <8 x i64> @andqbrst(<8 x i64> %p1, i64* %ap) {
+entry:
+ %a = load i64* %ap, align 8
+ %b = insertelement <8 x i64> undef, i64 %a, i32 0
+ %c = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
+ %d = and <8 x i64> %p1, %c
+ ret <8 x i64>%d
+}
diff --git a/test/CodeGen/X86/avx512-mask-op.ll b/test/CodeGen/X86/avx512-mask-op.ll
index 35d3348..264d915 100644
--- a/test/CodeGen/X86/avx512-mask-op.ll
+++ b/test/CodeGen/X86/avx512-mask-op.ll
@@ -1,28 +1,37 @@
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s --check-prefix=KNL --check-prefix=CHECK
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s --check-prefix=SKX --check-prefix=CHECK
+; CHECK-LABEL: mask16
+; CHECK: kmovw
+; CHECK-NEXT: knotw
+; CHECK-NEXT: kmovw
define i16 @mask16(i16 %x) {
%m0 = bitcast i16 %x to <16 x i1>
%m1 = xor <16 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%ret = bitcast <16 x i1> %m1 to i16
ret i16 %ret
-; CHECK-LABEL: mask16
-; CHECK: kmovw
-; CHECK-NEXT: knotw
-; CHECK-NEXT: kmovw
-; CHECK: ret
}
+; CHECK-LABEL: mask8
+; KNL: kmovw
+; KNL-NEXT: knotw
+; KNL-NEXT: kmovw
+; SKX: kmovb
+; SKX-NEXT: knotb
+; SKX-NEXT: kmovb
+
define i8 @mask8(i8 %x) {
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%ret = bitcast <8 x i1> %m1 to i8
ret i8 %ret
-; CHECK-LABEL: mask8
-; CHECK: kmovw
+}
+
+; CHECK-LABEL: mask16_mem
+; CHECK: kmovw ([[ARG1:%rdi|%rcx]]), %k{{[0-7]}}
; CHECK-NEXT: knotw
-; CHECK-NEXT: kmovw
+; CHECK-NEXT: kmovw %k{{[0-7]}}, ([[ARG1]])
; CHECK: ret
-}
define void @mask16_mem(i16* %ptr) {
%x = load i16* %ptr, align 4
@@ -31,13 +40,16 @@ define void @mask16_mem(i16* %ptr) {
%ret = bitcast <16 x i1> %m1 to i16
store i16 %ret, i16* %ptr, align 4
ret void
-; CHECK-LABEL: mask16_mem
-; CHECK: kmovw ([[ARG1:%rdi|%rcx]]), %k{{[0-7]}}
-; CHECK-NEXT: knotw
-; CHECK-NEXT: kmovw %k{{[0-7]}}, ([[ARG1]])
-; CHECK: ret
}
+; CHECK-LABEL: mask8_mem
+; KNL: kmovw ([[ARG1]]), %k{{[0-7]}}
+; KNL-NEXT: knotw
+; KNL-NEXT: kmovw %k{{[0-7]}}, ([[ARG1]])
+; SKX: kmovb ([[ARG1]]), %k{{[0-7]}}
+; SKX-NEXT: knotb
+; SKX-NEXT: kmovb %k{{[0-7]}}, ([[ARG1]])
+
define void @mask8_mem(i8* %ptr) {
%x = load i8* %ptr, align 4
%m0 = bitcast i8 %x to <8 x i1>
@@ -45,13 +57,12 @@ define void @mask8_mem(i8* %ptr) {
%ret = bitcast <8 x i1> %m1 to i8
store i8 %ret, i8* %ptr, align 4
ret void
-; CHECK-LABEL: mask8_mem
-; CHECK: kmovw ([[ARG1]]), %k{{[0-7]}}
-; CHECK-NEXT: knotw
-; CHECK-NEXT: kmovw %k{{[0-7]}}, ([[ARG1]])
-; CHECK: ret
}
+; CHECK-LABEL: mand16
+; CHECK: kandw
+; CHECK: kxorw
+; CHECK: korw
define i16 @mand16(i16 %x, i16 %y) {
%ma = bitcast i16 %x to <16 x i1>
%mb = bitcast i16 %y to <16 x i1>
@@ -59,15 +70,11 @@ define i16 @mand16(i16 %x, i16 %y) {
%md = xor <16 x i1> %ma, %mb
%me = or <16 x i1> %mc, %md
%ret = bitcast <16 x i1> %me to i16
-; CHECK: kandw
-; CHECK: kxorw
-; CHECK: korw
ret i16 %ret
}
-; CHECK: shuf_test1
+; CHECK-LABEL: shuf_test1
; CHECK: kshiftrw $8
-; CHECK:ret
define i8 @shuf_test1(i16 %v) nounwind {
%v1 = bitcast i16 %v to <16 x i1>
%mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -75,11 +82,11 @@ define i8 @shuf_test1(i16 %v) nounwind {
ret i8 %mask1
}
-; CHECK: zext_test1
+; CHECK-LABEL: zext_test1
; CHECK: kshiftlw
; CHECK: kshiftrw
; CHECK: kmovw
-; CHECK:ret
+
define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
%cmp_res = icmp ugt <16 x i32> %a, %b
%cmp_res.i1 = extractelement <16 x i1> %cmp_res, i32 5
@@ -87,11 +94,11 @@ define i32 @zext_test1(<16 x i32> %a, <16 x i32> %b) {
ret i32 %res
}
-; CHECK: zext_test2
+; CHECK-LABEL: zext_test2
; CHECK: kshiftlw
; CHECK: kshiftrw
; CHECK: kmovw
-; CHECK:ret
+
define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
%cmp_res = icmp ugt <16 x i32> %a, %b
%cmp_res.i1 = extractelement <16 x i1> %cmp_res, i32 5
@@ -99,14 +106,29 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
ret i16 %res
}
-; CHECK: zext_test3
+; CHECK-LABEL: zext_test3
; CHECK: kshiftlw
; CHECK: kshiftrw
; CHECK: kmovw
-; CHECK:ret
+
define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
%cmp_res = icmp ugt <16 x i32> %a, %b
%cmp_res.i1 = extractelement <16 x i1> %cmp_res, i32 5
%res = zext i1 %cmp_res.i1 to i8
ret i8 %res
}
+
+; CHECK-LABEL: conv1
+; KNL: kmovw %k0, %eax
+; KNL: movb %al, (%rdi)
+; SKX: kmovb %k0, (%rdi)
+define i8 @conv1(<8 x i1>* %R) {
+entry:
+ store <8 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <8 x i1>* %R
+
+ %maskPtr = alloca <8 x i1>
+ store <8 x i1> <i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <8 x i1>* %maskPtr
+ %mask = load <8 x i1>* %maskPtr
+ %mask_convert = bitcast <8 x i1> %mask to i8
+ ret i8 %mask_convert
+} \ No newline at end of file
diff --git a/test/CodeGen/X86/avx512-nontemporal.ll b/test/CodeGen/X86/avx512-nontemporal.ll
index ef50cdb..bf57d02 100644
--- a/test/CodeGen/X86/avx512-nontemporal.ll
+++ b/test/CodeGen/X86/avx512-nontemporal.ll
@@ -16,4 +16,4 @@ define void @f(<16 x float> %A, <16 x float> %AA, i8* %B, <8 x double> %C, <8 x
ret void
}
-!0 = metadata !{i32 1}
+!0 = !{i32 1}
diff --git a/test/CodeGen/X86/avx512-round.ll b/test/CodeGen/X86/avx512-round.ll
new file mode 100644
index 0000000..ffeb2a8
--- /dev/null
+++ b/test/CodeGen/X86/avx512-round.ll
@@ -0,0 +1,106 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl --show-mc-encoding| FileCheck %s
+
+define <16 x float> @floor_v16f32(<16 x float> %a) {
+; CHECK-LABEL: floor_v16f32
+; CHECK: vrndscaleps $1, {{.*}}encoding: [0x62,0xf3,0x7d,0x48,0x08,0xc0,0x01]
+ %res = call <16 x float> @llvm.floor.v16f32(<16 x float> %a)
+ ret <16 x float> %res
+}
+declare <16 x float> @llvm.floor.v16f32(<16 x float> %p)
+
+define <8 x double> @floor_v8f64(<8 x double> %a) {
+; CHECK-LABEL: floor_v8f64
+; CHECK: vrndscalepd $1, {{.*}}encoding: [0x62,0xf3,0xfd,0x48,0x09,0xc0,0x01]
+ %res = call <8 x double> @llvm.floor.v8f64(<8 x double> %a)
+ ret <8 x double> %res
+}
+declare <8 x double> @llvm.floor.v8f64(<8 x double> %p)
+
+define <16 x float> @ceil_v16f32(<16 x float> %a) {
+; CHECK-LABEL: ceil_v16f32
+; CHECK: vrndscaleps $2, {{.*}}encoding: [0x62,0xf3,0x7d,0x48,0x08,0xc0,0x02]
+ %res = call <16 x float> @llvm.ceil.v16f32(<16 x float> %a)
+ ret <16 x float> %res
+}
+declare <16 x float> @llvm.ceil.v16f32(<16 x float> %p)
+
+define <8 x double> @ceil_v8f64(<8 x double> %a) {
+; CHECK-LABEL: ceil_v8f64
+; CHECK: vrndscalepd $2, {{.*}}encoding: [0x62,0xf3,0xfd,0x48,0x09,0xc0,0x02]
+ %res = call <8 x double> @llvm.ceil.v8f64(<8 x double> %a)
+ ret <8 x double> %res
+}
+declare <8 x double> @llvm.ceil.v8f64(<8 x double> %p)
+
+define <16 x float> @trunc_v16f32(<16 x float> %a) {
+; CHECK-LABEL: trunc_v16f32
+; CHECK: vrndscaleps $3, {{.*}}encoding: [0x62,0xf3,0x7d,0x48,0x08,0xc0,0x03]
+ %res = call <16 x float> @llvm.trunc.v16f32(<16 x float> %a)
+ ret <16 x float> %res
+}
+declare <16 x float> @llvm.trunc.v16f32(<16 x float> %p)
+
+define <8 x double> @trunc_v8f64(<8 x double> %a) {
+; CHECK-LABEL: trunc_v8f64
+; CHECK: vrndscalepd $3, {{.*}}encoding: [0x62,0xf3,0xfd,0x48,0x09,0xc0,0x03]
+ %res = call <8 x double> @llvm.trunc.v8f64(<8 x double> %a)
+ ret <8 x double> %res
+}
+declare <8 x double> @llvm.trunc.v8f64(<8 x double> %p)
+
+define <16 x float> @rint_v16f32(<16 x float> %a) {
+; CHECK-LABEL: rint_v16f32
+; CHECK: vrndscaleps $4, {{.*}}encoding: [0x62,0xf3,0x7d,0x48,0x08,0xc0,0x04]
+ %res = call <16 x float> @llvm.rint.v16f32(<16 x float> %a)
+ ret <16 x float> %res
+}
+declare <16 x float> @llvm.rint.v16f32(<16 x float> %p)
+
+define <8 x double> @rint_v8f64(<8 x double> %a) {
+; CHECK-LABEL: rint_v8f64
+; CHECK: vrndscalepd $4, {{.*}}encoding: [0x62,0xf3,0xfd,0x48,0x09,0xc0,0x04]
+ %res = call <8 x double> @llvm.rint.v8f64(<8 x double> %a)
+ ret <8 x double> %res
+}
+declare <8 x double> @llvm.rint.v8f64(<8 x double> %p)
+
+define <16 x float> @nearbyint_v16f32(<16 x float> %a) {
+; CHECK-LABEL: nearbyint_v16f32
+; CHECK: vrndscaleps $12, {{.*}}encoding: [0x62,0xf3,0x7d,0x48,0x08,0xc0,0x0c]
+ %res = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %a)
+ ret <16 x float> %res
+}
+declare <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p)
+
+define <8 x double> @nearbyint_v8f64(<8 x double> %a) {
+; CHECK-LABEL: nearbyint_v8f64
+; CHECK: vrndscalepd $12, {{.*}}encoding: [0x62,0xf3,0xfd,0x48,0x09,0xc0,0x0c]
+ %res = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %a)
+ ret <8 x double> %res
+}
+declare <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p)
+
+define double @nearbyint_f64(double %a) {
+; CHECK-LABEL: nearbyint_f64
+; CHECK: vrndscalesd $12, {{.*}}encoding: [0x62,0xf3,0xfd,0x08,0x0b,0xc0,0x0c]
+ %res = call double @llvm.nearbyint.f64(double %a)
+ ret double %res
+}
+declare double @llvm.nearbyint.f64(double %p)
+
+define float @floor_f32(float %a) {
+; CHECK-LABEL: floor_f32
+; CHECK: vrndscaless $1, {{.*}}encoding: [0x62,0xf3,0x7d,0x08,0x0a,0xc0,0x01]
+ %res = call float @llvm.floor.f32(float %a)
+ ret float %res
+}
+declare float @llvm.floor.f32(float %p)
+
+define float @floor_f32m(float* %aptr) {
+; CHECK-LABEL: floor_f32m
+; CHECK: vrndscaless $1, (%rdi), {{.*}}encoding: [0x62,0xf3,0x7d,0x08,0x0a,0x07,0x01]
+ %a = load float* %aptr, align 4
+ %res = call float @llvm.floor.f32(float %a)
+ ret float %res
+}
+
diff --git a/test/CodeGen/X86/avx512-vbroadcast.ll b/test/CodeGen/X86/avx512-vbroadcast.ll
index 0b0e0fc..5bb8233 100644
--- a/test/CodeGen/X86/avx512-vbroadcast.ll
+++ b/test/CodeGen/X86/avx512-vbroadcast.ll
@@ -20,6 +20,14 @@ define <8 x i64> @_inreg8xi64(i64 %a) {
ret <8 x i64> %c
}
+;CHECK-LABEL: _ss16xfloat_v4
+;CHECK: vbroadcastss %xmm0, %zmm0
+;CHECK: ret
+define <16 x float> @_ss16xfloat_v4(<4 x float> %a) {
+ %b = shufflevector <4 x float> %a, <4 x float> undef, <16 x i32> zeroinitializer
+ ret <16 x float> %b
+}
+
define <16 x float> @_inreg16xfloat(float %a) {
; CHECK-LABEL: _inreg16xfloat:
; CHECK: ## BB#0:
@@ -30,6 +38,62 @@ define <16 x float> @_inreg16xfloat(float %a) {
ret <16 x float> %c
}
+;CHECK-LABEL: _ss16xfloat_mask:
+;CHECK: vbroadcastss %xmm0, %zmm1 {%k1}
+;CHECK: ret
+define <16 x float> @_ss16xfloat_mask(float %a, <16 x float> %i, <16 x i32> %mask1) {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %b = insertelement <16 x float> undef, float %a, i32 0
+ %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
+ %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> %i
+ ret <16 x float> %r
+}
+
+;CHECK-LABEL: _ss16xfloat_maskz:
+;CHECK: vbroadcastss %xmm0, %zmm0 {%k1} {z}
+;CHECK: ret
+define <16 x float> @_ss16xfloat_maskz(float %a, <16 x i32> %mask1) {
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %b = insertelement <16 x float> undef, float %a, i32 0
+ %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
+ %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> zeroinitializer
+ ret <16 x float> %r
+}
+
+;CHECK-LABEL: _ss16xfloat_load:
+;CHECK: vbroadcastss (%{{.*}}, %zmm
+;CHECK: ret
+define <16 x float> @_ss16xfloat_load(float* %a.ptr) {
+ %a = load float* %a.ptr
+ %b = insertelement <16 x float> undef, float %a, i32 0
+ %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
+ ret <16 x float> %c
+}
+
+;CHECK-LABEL: _ss16xfloat_mask_load:
+;CHECK: vbroadcastss (%rdi), %zmm0 {%k1}
+;CHECK: ret
+define <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16 x i32> %mask1) {
+ %a = load float* %a.ptr
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %b = insertelement <16 x float> undef, float %a, i32 0
+ %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
+ %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> %i
+ ret <16 x float> %r
+}
+
+;CHECK-LABEL: _ss16xfloat_maskz_load:
+;CHECK: vbroadcastss (%rdi), %zmm0 {%k1} {z}
+;CHECK: ret
+define <16 x float> @_ss16xfloat_maskz_load(float* %a.ptr, <16 x i32> %mask1) {
+ %a = load float* %a.ptr
+ %mask = icmp ne <16 x i32> %mask1, zeroinitializer
+ %b = insertelement <16 x float> undef, float %a, i32 0
+ %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
+ %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> zeroinitializer
+ ret <16 x float> %r
+}
+
define <8 x double> @_inreg8xdouble(double %a) {
; CHECK-LABEL: _inreg8xdouble:
; CHECK: ## BB#0:
@@ -40,6 +104,62 @@ define <8 x double> @_inreg8xdouble(double %a) {
ret <8 x double> %c
}
+;CHECK-LABEL: _sd8xdouble_mask:
+;CHECK: vbroadcastsd %xmm0, %zmm1 {%k1}
+;CHECK: ret
+define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %mask1) {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %b = insertelement <8 x double> undef, double %a, i32 0
+ %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
+ %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> %i
+ ret <8 x double> %r
+}
+
+;CHECK-LABEL: _sd8xdouble_maskz:
+;CHECK: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
+;CHECK: ret
+define <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %b = insertelement <8 x double> undef, double %a, i32 0
+ %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
+ %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> zeroinitializer
+ ret <8 x double> %r
+}
+
+;CHECK-LABEL: _sd8xdouble_load:
+;CHECK: vbroadcastsd (%rdi), %zmm
+;CHECK: ret
+define <8 x double> @_sd8xdouble_load(double* %a.ptr) {
+ %a = load double* %a.ptr
+ %b = insertelement <8 x double> undef, double %a, i32 0
+ %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
+ ret <8 x double> %c
+}
+
+;CHECK-LABEL: _sd8xdouble_mask_load:
+;CHECK: vbroadcastsd (%rdi), %zmm0 {%k1}
+;CHECK: ret
+define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
+ %a = load double* %a.ptr
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %b = insertelement <8 x double> undef, double %a, i32 0
+ %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
+ %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> %i
+ ret <8 x double> %r
+}
+
+define <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1) {
+; CHECK-LABEL: _sd8xdouble_maskz_load:
+; CHECK: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
+; CHECK: ret
+ %a = load double* %a.ptr
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %b = insertelement <8 x double> undef, double %a, i32 0
+ %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
+ %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> zeroinitializer
+ ret <8 x double> %r
+}
+
define <16 x i32> @_xmm16xi32(<16 x i32> %a) {
; CHECK-LABEL: _xmm16xi32:
; CHECK: ## BB#0:
diff --git a/test/CodeGen/X86/avx512-vec-cmp.ll b/test/CodeGen/X86/avx512-vec-cmp.ll
index c71e60e..b16f5c9 100644
--- a/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -37,15 +37,15 @@ define <16 x i32> @test3(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %yp) nounwin
ret <16 x i32> %max
}
-define <16 x i32> @test4_unsigned(<16 x i32> %x, <16 x i32> %y) nounwind {
+define <16 x i32> @test4_unsigned(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1) nounwind {
; CHECK-LABEL: test4_unsigned:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnltud %zmm1, %zmm0, %k1
-; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa32 %zmm2, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%mask = icmp uge <16 x i32> %x, %y
- %max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %y
+ %max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
ret <16 x i32> %max
}
@@ -61,15 +61,15 @@ define <8 x i64> @test5(<8 x i64> %x, <8 x i64> %y) nounwind {
ret <8 x i64> %max
}
-define <8 x i64> @test6_unsigned(<8 x i64> %x, <8 x i64> %y) nounwind {
+define <8 x i64> @test6_unsigned(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1) nounwind {
; CHECK-LABEL: test6_unsigned:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
-; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%mask = icmp ugt <8 x i64> %x, %y
- %max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %y
+ %max = select <8 x i1> %mask, <8 x i64> %x1, <8 x i64> %y
ret <8 x i64> %max
}
@@ -196,15 +196,15 @@ define <8 x i64> @test15(<8 x i64>%a, <8 x i64>%b) {
ret <8 x i64>%res
}
-define <16 x i32> @test16(<16 x i32> %x, <16 x i32> %y) nounwind {
+define <16 x i32> @test16(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1) nounwind {
; CHECK-LABEL: test16:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k1
-; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa32 %zmm2, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%mask = icmp sge <16 x i32> %x, %y
- %max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %y
+ %max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
ret <16 x i32> %max
}
diff --git a/test/CodeGen/X86/avx512bw-arith.ll b/test/CodeGen/X86/avx512bw-arith.ll
new file mode 100644
index 0000000..94f68a2
--- /dev/null
+++ b/test/CodeGen/X86/avx512bw-arith.ll
@@ -0,0 +1,102 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw| FileCheck %s
+
+; CHECK-LABEL: vpaddb512_test
+; CHECK: vpaddb %zmm{{.*}}
+; CHECK: ret
+define <64 x i8> @vpaddb512_test(<64 x i8> %i, <64 x i8> %j) nounwind readnone {
+ %x = add <64 x i8> %i, %j
+ ret <64 x i8> %x
+}
+
+; CHECK-LABEL: vpaddb512_fold_test
+; CHECK: vpaddb (%rdi), %zmm{{.*}}
+; CHECK: ret
+define <64 x i8> @vpaddb512_fold_test(<64 x i8> %i, <64 x i8>* %j) nounwind {
+ %tmp = load <64 x i8>* %j, align 4
+ %x = add <64 x i8> %i, %tmp
+ ret <64 x i8> %x
+}
+
+; CHECK-LABEL: vpaddw512_test
+; CHECK: vpaddw %zmm{{.*}}
+; CHECK: ret
+define <32 x i16> @vpaddw512_test(<32 x i16> %i, <32 x i16> %j) nounwind readnone {
+ %x = add <32 x i16> %i, %j
+ ret <32 x i16> %x
+}
+
+; CHECK-LABEL: vpaddw512_fold_test
+; CHECK: vpaddw (%rdi), %zmm{{.*}}
+; CHECK: ret
+define <32 x i16> @vpaddw512_fold_test(<32 x i16> %i, <32 x i16>* %j) nounwind {
+ %tmp = load <32 x i16>* %j, align 4
+ %x = add <32 x i16> %i, %tmp
+ ret <32 x i16> %x
+}
+
+; CHECK-LABEL: vpaddw512_mask_test
+; CHECK: vpaddw %zmm{{.*%k[1-7].*}}
+; CHECK: ret
+define <32 x i16> @vpaddw512_mask_test(<32 x i16> %i, <32 x i16> %j, <32 x i16> %mask1) nounwind readnone {
+ %mask = icmp ne <32 x i16> %mask1, zeroinitializer
+ %x = add <32 x i16> %i, %j
+ %r = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %i
+ ret <32 x i16> %r
+}
+
+; CHECK-LABEL: vpaddw512_maskz_test
+; CHECK: vpaddw %zmm{{.*{%k[1-7]} {z}.*}}
+; CHECK: ret
+define <32 x i16> @vpaddw512_maskz_test(<32 x i16> %i, <32 x i16> %j, <32 x i16> %mask1) nounwind readnone {
+ %mask = icmp ne <32 x i16> %mask1, zeroinitializer
+ %x = add <32 x i16> %i, %j
+ %r = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> zeroinitializer
+ ret <32 x i16> %r
+}
+
+; CHECK-LABEL: vpaddw512_mask_fold_test
+; CHECK: vpaddw (%rdi), %zmm{{.*%k[1-7]}}
+; CHECK: ret
+define <32 x i16> @vpaddw512_mask_fold_test(<32 x i16> %i, <32 x i16>* %j.ptr, <32 x i16> %mask1) nounwind readnone {
+ %mask = icmp ne <32 x i16> %mask1, zeroinitializer
+ %j = load <32 x i16>* %j.ptr
+ %x = add <32 x i16> %i, %j
+ %r = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %i
+ ret <32 x i16> %r
+}
+
+; CHECK-LABEL: vpaddw512_maskz_fold_test
+; CHECK: vpaddw (%rdi), %zmm{{.*{%k[1-7]} {z}}}
+; CHECK: ret
+define <32 x i16> @vpaddw512_maskz_fold_test(<32 x i16> %i, <32 x i16>* %j.ptr, <32 x i16> %mask1) nounwind readnone {
+ %mask = icmp ne <32 x i16> %mask1, zeroinitializer
+ %j = load <32 x i16>* %j.ptr
+ %x = add <32 x i16> %i, %j
+ %r = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> zeroinitializer
+ ret <32 x i16> %r
+}
+
+; CHECK-LABEL: vpsubb512_test
+; CHECK: vpsubb %zmm{{.*}}
+; CHECK: ret
+define <64 x i8> @vpsubb512_test(<64 x i8> %i, <64 x i8> %j) nounwind readnone {
+ %x = sub <64 x i8> %i, %j
+ ret <64 x i8> %x
+}
+
+; CHECK-LABEL: vpsubw512_test
+; CHECK: vpsubw %zmm{{.*}}
+; CHECK: ret
+define <32 x i16> @vpsubw512_test(<32 x i16> %i, <32 x i16> %j) nounwind readnone {
+ %x = sub <32 x i16> %i, %j
+ ret <32 x i16> %x
+}
+
+; CHECK-LABEL: vpmullw512_test
+; CHECK: vpmullw %zmm{{.*}}
+; CHECK: ret
+define <32 x i16> @vpmullw512_test(<32 x i16> %i, <32 x i16> %j) {
+ %x = mul <32 x i16> %i, %j
+ ret <32 x i16> %x
+}
+
diff --git a/test/CodeGen/X86/avx512bw-intrinsics.ll b/test/CodeGen/X86/avx512bw-intrinsics.ll
index bbc418c..308de16 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw --show-mc-encoding| FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx --show-mc-encoding| FileCheck %s
define i64 @test_pcmpeq_b(<64 x i8> %a, <64 x i8> %b) {
; CHECK-LABEL: test_pcmpeq_b
@@ -67,28 +67,28 @@ declare i32 @llvm.x86.avx512.mask.pcmpgt.w.512(<32 x i16>, <32 x i16>, i32)
define <8 x i64> @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
; CHECK_LABEL: test_cmp_b_512
; CHECK: vpcmpeqb %zmm1, %zmm0, %k0 ##
- %res0 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 -1)
+ %res0 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 0, i64 -1)
%vec0 = insertelement <8 x i64> undef, i64 %res0, i32 0
; CHECK: vpcmpltb %zmm1, %zmm0, %k0 ##
- %res1 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 1, i64 -1)
+ %res1 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 1, i64 -1)
%vec1 = insertelement <8 x i64> %vec0, i64 %res1, i32 1
; CHECK: vpcmpleb %zmm1, %zmm0, %k0 ##
- %res2 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 2, i64 -1)
+ %res2 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 2, i64 -1)
%vec2 = insertelement <8 x i64> %vec1, i64 %res2, i32 2
; CHECK: vpcmpunordb %zmm1, %zmm0, %k0 ##
- %res3 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 3, i64 -1)
+ %res3 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 3, i64 -1)
%vec3 = insertelement <8 x i64> %vec2, i64 %res3, i32 3
; CHECK: vpcmpneqb %zmm1, %zmm0, %k0 ##
- %res4 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 4, i64 -1)
+ %res4 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 4, i64 -1)
%vec4 = insertelement <8 x i64> %vec3, i64 %res4, i32 4
; CHECK: vpcmpnltb %zmm1, %zmm0, %k0 ##
- %res5 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 5, i64 -1)
+ %res5 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 5, i64 -1)
%vec5 = insertelement <8 x i64> %vec4, i64 %res5, i32 5
; CHECK: vpcmpnleb %zmm1, %zmm0, %k0 ##
- %res6 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 6, i64 -1)
+ %res6 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 6, i64 -1)
%vec6 = insertelement <8 x i64> %vec5, i64 %res6, i32 6
; CHECK: vpcmpordb %zmm1, %zmm0, %k0 ##
- %res7 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 7, i64 -1)
+ %res7 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 7, i64 -1)
%vec7 = insertelement <8 x i64> %vec6, i64 %res7, i32 7
ret <8 x i64> %vec7
}
@@ -96,59 +96,59 @@ define <8 x i64> @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
define <8 x i64> @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
; CHECK_LABEL: test_mask_cmp_b_512
; CHECK: vpcmpeqb %zmm1, %zmm0, %k0 {%k1} ##
- %res0 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 %mask)
+ %res0 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 0, i64 %mask)
%vec0 = insertelement <8 x i64> undef, i64 %res0, i32 0
; CHECK: vpcmpltb %zmm1, %zmm0, %k0 {%k1} ##
- %res1 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 1, i64 %mask)
+ %res1 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 1, i64 %mask)
%vec1 = insertelement <8 x i64> %vec0, i64 %res1, i32 1
; CHECK: vpcmpleb %zmm1, %zmm0, %k0 {%k1} ##
- %res2 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 2, i64 %mask)
+ %res2 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 2, i64 %mask)
%vec2 = insertelement <8 x i64> %vec1, i64 %res2, i32 2
; CHECK: vpcmpunordb %zmm1, %zmm0, %k0 {%k1} ##
- %res3 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 3, i64 %mask)
+ %res3 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 3, i64 %mask)
%vec3 = insertelement <8 x i64> %vec2, i64 %res3, i32 3
; CHECK: vpcmpneqb %zmm1, %zmm0, %k0 {%k1} ##
- %res4 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 4, i64 %mask)
+ %res4 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 4, i64 %mask)
%vec4 = insertelement <8 x i64> %vec3, i64 %res4, i32 4
; CHECK: vpcmpnltb %zmm1, %zmm0, %k0 {%k1} ##
- %res5 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 5, i64 %mask)
+ %res5 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 5, i64 %mask)
%vec5 = insertelement <8 x i64> %vec4, i64 %res5, i32 5
; CHECK: vpcmpnleb %zmm1, %zmm0, %k0 {%k1} ##
- %res6 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 6, i64 %mask)
+ %res6 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 6, i64 %mask)
%vec6 = insertelement <8 x i64> %vec5, i64 %res6, i32 6
; CHECK: vpcmpordb %zmm1, %zmm0, %k0 {%k1} ##
- %res7 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 7, i64 %mask)
+ %res7 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 7, i64 %mask)
%vec7 = insertelement <8 x i64> %vec6, i64 %res7, i32 7
ret <8 x i64> %vec7
}
-declare i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8>, <64 x i8>, i32, i64) nounwind readnone
+declare i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8>, <64 x i8>, i8, i64) nounwind readnone
define <8 x i64> @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
; CHECK_LABEL: test_ucmp_b_512
; CHECK: vpcmpequb %zmm1, %zmm0, %k0 ##
- %res0 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 -1)
+ %res0 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 0, i64 -1)
%vec0 = insertelement <8 x i64> undef, i64 %res0, i32 0
; CHECK: vpcmpltub %zmm1, %zmm0, %k0 ##
- %res1 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 1, i64 -1)
+ %res1 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 1, i64 -1)
%vec1 = insertelement <8 x i64> %vec0, i64 %res1, i32 1
; CHECK: vpcmpleub %zmm1, %zmm0, %k0 ##
- %res2 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 2, i64 -1)
+ %res2 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 2, i64 -1)
%vec2 = insertelement <8 x i64> %vec1, i64 %res2, i32 2
; CHECK: vpcmpunordub %zmm1, %zmm0, %k0 ##
- %res3 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 3, i64 -1)
+ %res3 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 3, i64 -1)
%vec3 = insertelement <8 x i64> %vec2, i64 %res3, i32 3
; CHECK: vpcmpnequb %zmm1, %zmm0, %k0 ##
- %res4 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 4, i64 -1)
+ %res4 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 4, i64 -1)
%vec4 = insertelement <8 x i64> %vec3, i64 %res4, i32 4
; CHECK: vpcmpnltub %zmm1, %zmm0, %k0 ##
- %res5 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 5, i64 -1)
+ %res5 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 5, i64 -1)
%vec5 = insertelement <8 x i64> %vec4, i64 %res5, i32 5
; CHECK: vpcmpnleub %zmm1, %zmm0, %k0 ##
- %res6 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 6, i64 -1)
+ %res6 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 6, i64 -1)
%vec6 = insertelement <8 x i64> %vec5, i64 %res6, i32 6
; CHECK: vpcmpordub %zmm1, %zmm0, %k0 ##
- %res7 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 7, i64 -1)
+ %res7 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 7, i64 -1)
%vec7 = insertelement <8 x i64> %vec6, i64 %res7, i32 7
ret <8 x i64> %vec7
}
@@ -156,59 +156,59 @@ define <8 x i64> @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
define <8 x i64> @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
; CHECK_LABEL: test_mask_ucmp_b_512
; CHECK: vpcmpequb %zmm1, %zmm0, %k0 {%k1} ##
- %res0 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 %mask)
+ %res0 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 0, i64 %mask)
%vec0 = insertelement <8 x i64> undef, i64 %res0, i32 0
; CHECK: vpcmpltub %zmm1, %zmm0, %k0 {%k1} ##
- %res1 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 1, i64 %mask)
+ %res1 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 1, i64 %mask)
%vec1 = insertelement <8 x i64> %vec0, i64 %res1, i32 1
; CHECK: vpcmpleub %zmm1, %zmm0, %k0 {%k1} ##
- %res2 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 2, i64 %mask)
+ %res2 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 2, i64 %mask)
%vec2 = insertelement <8 x i64> %vec1, i64 %res2, i32 2
; CHECK: vpcmpunordub %zmm1, %zmm0, %k0 {%k1} ##
- %res3 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 3, i64 %mask)
+ %res3 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 3, i64 %mask)
%vec3 = insertelement <8 x i64> %vec2, i64 %res3, i32 3
; CHECK: vpcmpnequb %zmm1, %zmm0, %k0 {%k1} ##
- %res4 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 4, i64 %mask)
+ %res4 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 4, i64 %mask)
%vec4 = insertelement <8 x i64> %vec3, i64 %res4, i32 4
; CHECK: vpcmpnltub %zmm1, %zmm0, %k0 {%k1} ##
- %res5 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 5, i64 %mask)
+ %res5 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 5, i64 %mask)
%vec5 = insertelement <8 x i64> %vec4, i64 %res5, i32 5
; CHECK: vpcmpnleub %zmm1, %zmm0, %k0 {%k1} ##
- %res6 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 6, i64 %mask)
+ %res6 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 6, i64 %mask)
%vec6 = insertelement <8 x i64> %vec5, i64 %res6, i32 6
; CHECK: vpcmpordub %zmm1, %zmm0, %k0 {%k1} ##
- %res7 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 7, i64 %mask)
+ %res7 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i8 7, i64 %mask)
%vec7 = insertelement <8 x i64> %vec6, i64 %res7, i32 7
ret <8 x i64> %vec7
}
-declare i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8>, <64 x i8>, i32, i64) nounwind readnone
+declare i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8>, <64 x i8>, i8, i64) nounwind readnone
define <8 x i32> @test_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1) {
; CHECK_LABEL: test_cmp_w_512
; CHECK: vpcmpeqw %zmm1, %zmm0, %k0 ##
- %res0 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 0, i32 -1)
+ %res0 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 0, i32 -1)
%vec0 = insertelement <8 x i32> undef, i32 %res0, i32 0
; CHECK: vpcmpltw %zmm1, %zmm0, %k0 ##
- %res1 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 1, i32 -1)
+ %res1 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 1, i32 -1)
%vec1 = insertelement <8 x i32> %vec0, i32 %res1, i32 1
; CHECK: vpcmplew %zmm1, %zmm0, %k0 ##
- %res2 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 2, i32 -1)
+ %res2 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 2, i32 -1)
%vec2 = insertelement <8 x i32> %vec1, i32 %res2, i32 2
; CHECK: vpcmpunordw %zmm1, %zmm0, %k0 ##
- %res3 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 3, i32 -1)
+ %res3 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 3, i32 -1)
%vec3 = insertelement <8 x i32> %vec2, i32 %res3, i32 3
; CHECK: vpcmpneqw %zmm1, %zmm0, %k0 ##
- %res4 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 4, i32 -1)
+ %res4 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 4, i32 -1)
%vec4 = insertelement <8 x i32> %vec3, i32 %res4, i32 4
; CHECK: vpcmpnltw %zmm1, %zmm0, %k0 ##
- %res5 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 5, i32 -1)
+ %res5 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 5, i32 -1)
%vec5 = insertelement <8 x i32> %vec4, i32 %res5, i32 5
; CHECK: vpcmpnlew %zmm1, %zmm0, %k0 ##
- %res6 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 6, i32 -1)
+ %res6 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 6, i32 -1)
%vec6 = insertelement <8 x i32> %vec5, i32 %res6, i32 6
; CHECK: vpcmpordw %zmm1, %zmm0, %k0 ##
- %res7 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 7, i32 -1)
+ %res7 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 7, i32 -1)
%vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7
ret <8 x i32> %vec7
}
@@ -216,59 +216,59 @@ define <8 x i32> @test_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1) {
define <8 x i32> @test_mask_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1, i32 %mask) {
; CHECK_LABEL: test_mask_cmp_w_512
; CHECK: vpcmpeqw %zmm1, %zmm0, %k0 {%k1} ##
- %res0 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 0, i32 %mask)
+ %res0 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 0, i32 %mask)
%vec0 = insertelement <8 x i32> undef, i32 %res0, i32 0
; CHECK: vpcmpltw %zmm1, %zmm0, %k0 {%k1} ##
- %res1 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 1, i32 %mask)
+ %res1 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 1, i32 %mask)
%vec1 = insertelement <8 x i32> %vec0, i32 %res1, i32 1
; CHECK: vpcmplew %zmm1, %zmm0, %k0 {%k1} ##
- %res2 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 2, i32 %mask)
+ %res2 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 2, i32 %mask)
%vec2 = insertelement <8 x i32> %vec1, i32 %res2, i32 2
; CHECK: vpcmpunordw %zmm1, %zmm0, %k0 {%k1} ##
- %res3 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 3, i32 %mask)
+ %res3 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 3, i32 %mask)
%vec3 = insertelement <8 x i32> %vec2, i32 %res3, i32 3
; CHECK: vpcmpneqw %zmm1, %zmm0, %k0 {%k1} ##
- %res4 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 4, i32 %mask)
+ %res4 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 4, i32 %mask)
%vec4 = insertelement <8 x i32> %vec3, i32 %res4, i32 4
; CHECK: vpcmpnltw %zmm1, %zmm0, %k0 {%k1} ##
- %res5 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 5, i32 %mask)
+ %res5 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 5, i32 %mask)
%vec5 = insertelement <8 x i32> %vec4, i32 %res5, i32 5
; CHECK: vpcmpnlew %zmm1, %zmm0, %k0 {%k1} ##
- %res6 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 6, i32 %mask)
+ %res6 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 6, i32 %mask)
%vec6 = insertelement <8 x i32> %vec5, i32 %res6, i32 6
; CHECK: vpcmpordw %zmm1, %zmm0, %k0 {%k1} ##
- %res7 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 7, i32 %mask)
+ %res7 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 7, i32 %mask)
%vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7
ret <8 x i32> %vec7
}
-declare i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16>, <32 x i16>, i32, i32) nounwind readnone
+declare i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16>, <32 x i16>, i8, i32) nounwind readnone
define <8 x i32> @test_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1) {
; CHECK_LABEL: test_ucmp_w_512
; CHECK: vpcmpequw %zmm1, %zmm0, %k0 ##
- %res0 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 0, i32 -1)
+ %res0 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 0, i32 -1)
%vec0 = insertelement <8 x i32> undef, i32 %res0, i32 0
; CHECK: vpcmpltuw %zmm1, %zmm0, %k0 ##
- %res1 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 1, i32 -1)
+ %res1 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 1, i32 -1)
%vec1 = insertelement <8 x i32> %vec0, i32 %res1, i32 1
; CHECK: vpcmpleuw %zmm1, %zmm0, %k0 ##
- %res2 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 2, i32 -1)
+ %res2 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 2, i32 -1)
%vec2 = insertelement <8 x i32> %vec1, i32 %res2, i32 2
; CHECK: vpcmpunorduw %zmm1, %zmm0, %k0 ##
- %res3 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 3, i32 -1)
+ %res3 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 3, i32 -1)
%vec3 = insertelement <8 x i32> %vec2, i32 %res3, i32 3
; CHECK: vpcmpnequw %zmm1, %zmm0, %k0 ##
- %res4 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 4, i32 -1)
+ %res4 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 4, i32 -1)
%vec4 = insertelement <8 x i32> %vec3, i32 %res4, i32 4
; CHECK: vpcmpnltuw %zmm1, %zmm0, %k0 ##
- %res5 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 5, i32 -1)
+ %res5 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 5, i32 -1)
%vec5 = insertelement <8 x i32> %vec4, i32 %res5, i32 5
; CHECK: vpcmpnleuw %zmm1, %zmm0, %k0 ##
- %res6 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 6, i32 -1)
+ %res6 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 6, i32 -1)
%vec6 = insertelement <8 x i32> %vec5, i32 %res6, i32 6
; CHECK: vpcmporduw %zmm1, %zmm0, %k0 ##
- %res7 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 7, i32 -1)
+ %res7 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 7, i32 -1)
%vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7
ret <8 x i32> %vec7
}
@@ -276,30 +276,78 @@ define <8 x i32> @test_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1) {
define <8 x i32> @test_mask_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1, i32 %mask) {
; CHECK_LABEL: test_mask_ucmp_w_512
; CHECK: vpcmpequw %zmm1, %zmm0, %k0 {%k1} ##
- %res0 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 0, i32 %mask)
+ %res0 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 0, i32 %mask)
%vec0 = insertelement <8 x i32> undef, i32 %res0, i32 0
; CHECK: vpcmpltuw %zmm1, %zmm0, %k0 {%k1} ##
- %res1 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 1, i32 %mask)
+ %res1 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 1, i32 %mask)
%vec1 = insertelement <8 x i32> %vec0, i32 %res1, i32 1
; CHECK: vpcmpleuw %zmm1, %zmm0, %k0 {%k1} ##
- %res2 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 2, i32 %mask)
+ %res2 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 2, i32 %mask)
%vec2 = insertelement <8 x i32> %vec1, i32 %res2, i32 2
; CHECK: vpcmpunorduw %zmm1, %zmm0, %k0 {%k1} ##
- %res3 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 3, i32 %mask)
+ %res3 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 3, i32 %mask)
%vec3 = insertelement <8 x i32> %vec2, i32 %res3, i32 3
; CHECK: vpcmpnequw %zmm1, %zmm0, %k0 {%k1} ##
- %res4 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 4, i32 %mask)
+ %res4 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 4, i32 %mask)
%vec4 = insertelement <8 x i32> %vec3, i32 %res4, i32 4
; CHECK: vpcmpnltuw %zmm1, %zmm0, %k0 {%k1} ##
- %res5 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 5, i32 %mask)
+ %res5 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 5, i32 %mask)
%vec5 = insertelement <8 x i32> %vec4, i32 %res5, i32 5
; CHECK: vpcmpnleuw %zmm1, %zmm0, %k0 {%k1} ##
- %res6 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 6, i32 %mask)
+ %res6 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 6, i32 %mask)
%vec6 = insertelement <8 x i32> %vec5, i32 %res6, i32 6
; CHECK: vpcmporduw %zmm1, %zmm0, %k0 {%k1} ##
- %res7 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 7, i32 %mask)
+ %res7 = call i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i8 7, i32 %mask)
%vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7
ret <8 x i32> %vec7
}
-declare i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16>, <32 x i16>, i32, i32) nounwind readnone
+declare i32 @llvm.x86.avx512.mask.ucmp.w.512(<32 x i16>, <32 x i16>, i8, i32) nounwind readnone
+
+; CHECK-LABEL: test_x86_mask_blend_b_256
+; CHECK: vpblendmb
+define <32 x i8> @test_x86_mask_blend_b_256(i32 %a0, <32 x i8> %a1, <32 x i8> %a2) {
+ %res = call <32 x i8> @llvm.x86.avx512.mask.blend.b.256(<32 x i8> %a1, <32 x i8> %a2, i32 %a0) ; <<32 x i8>> [#uses=1]
+ ret <32 x i8> %res
+}
+declare <32 x i8> @llvm.x86.avx512.mask.blend.b.256(<32 x i8>, <32 x i8>, i32) nounwind readonly
+
+; CHECK-LABEL: test_x86_mask_blend_w_256
+define <16 x i16> @test_x86_mask_blend_w_256(i16 %mask, <16 x i16> %a1, <16 x i16> %a2) {
+ ; CHECK: vpblendmw
+ %res = call <16 x i16> @llvm.x86.avx512.mask.blend.w.256(<16 x i16> %a1, <16 x i16> %a2, i16 %mask) ; <<16 x i16>> [#uses=1]
+ ret <16 x i16> %res
+}
+declare <16 x i16> @llvm.x86.avx512.mask.blend.w.256(<16 x i16>, <16 x i16>, i16) nounwind readonly
+
+; CHECK-LABEL: test_x86_mask_blend_b_512
+; CHECK: vpblendmb
+define <64 x i8> @test_x86_mask_blend_b_512(i64 %a0, <64 x i8> %a1, <64 x i8> %a2) {
+ %res = call <64 x i8> @llvm.x86.avx512.mask.blend.b.512(<64 x i8> %a1, <64 x i8> %a2, i64 %a0) ; <<64 x i8>> [#uses=1]
+ ret <64 x i8> %res
+}
+declare <64 x i8> @llvm.x86.avx512.mask.blend.b.512(<64 x i8>, <64 x i8>, i64) nounwind readonly
+
+; CHECK-LABEL: test_x86_mask_blend_w_512
+define <32 x i16> @test_x86_mask_blend_w_512(i32 %mask, <32 x i16> %a1, <32 x i16> %a2) {
+ ; CHECK: vpblendmw
+ %res = call <32 x i16> @llvm.x86.avx512.mask.blend.w.512(<32 x i16> %a1, <32 x i16> %a2, i32 %mask) ; <<32 x i16>> [#uses=1]
+ ret <32 x i16> %res
+}
+declare <32 x i16> @llvm.x86.avx512.mask.blend.w.512(<32 x i16>, <32 x i16>, i32) nounwind readonly
+
+; CHECK-LABEL: test_x86_mask_blend_b_128
+; CHECK: vpblendmb
+define <16 x i8> @test_x86_mask_blend_b_128(i16 %a0, <16 x i8> %a1, <16 x i8> %a2) {
+ %res = call <16 x i8> @llvm.x86.avx512.mask.blend.b.128(<16 x i8> %a1, <16 x i8> %a2, i16 %a0) ; <<16 x i8>> [#uses=1]
+ ret <16 x i8> %res
+}
+declare <16 x i8> @llvm.x86.avx512.mask.blend.b.128(<16 x i8>, <16 x i8>, i16) nounwind readonly
+
+; CHECK-LABEL: test_x86_mask_blend_w_128
+define <8 x i16> @test_x86_mask_blend_w_128(i8 %mask, <8 x i16> %a1, <8 x i16> %a2) {
+ ; CHECK: vpblendmw
+ %res = call <8 x i16> @llvm.x86.avx512.mask.blend.w.128(<8 x i16> %a1, <8 x i16> %a2, i8 %mask) ; <<8 x i16>> [#uses=1]
+ ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.x86.avx512.mask.blend.w.128(<8 x i16>, <8 x i16>, i8) nounwind readonly
diff --git a/test/CodeGen/X86/avx512bw-vec-cmp.ll b/test/CodeGen/X86/avx512bw-vec-cmp.ll
index d2b1724..6ba4db6 100644
--- a/test/CodeGen/X86/avx512bw-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512bw-vec-cmp.ll
@@ -14,9 +14,9 @@ define <64 x i8> @test1(<64 x i8> %x, <64 x i8> %y) nounwind {
; CHECK: vpcmpgtb {{.*%k[0-7]}}
; CHECK: vmovdqu8 {{.*}}%k1
; CHECK: ret
-define <64 x i8> @test2(<64 x i8> %x, <64 x i8> %y) nounwind {
+define <64 x i8> @test2(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1) nounwind {
%mask = icmp sgt <64 x i8> %x, %y
- %max = select <64 x i1> %mask, <64 x i8> %x, <64 x i8> %y
+ %max = select <64 x i1> %mask, <64 x i8> %x1, <64 x i8> %y
ret <64 x i8> %max
}
@@ -34,9 +34,9 @@ define <32 x i16> @test3(<32 x i16> %x, <32 x i16> %y, <32 x i16> %x1) nounwind
; CHECK: vpcmpnleub {{.*%k[0-7]}}
; CHECK: vmovdqu8 {{.*}}%k1
; CHECK: ret
-define <64 x i8> @test4(<64 x i8> %x, <64 x i8> %y) nounwind {
+define <64 x i8> @test4(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1) nounwind {
%mask = icmp ugt <64 x i8> %x, %y
- %max = select <64 x i1> %mask, <64 x i8> %x, <64 x i8> %y
+ %max = select <64 x i1> %mask, <64 x i8> %x1, <64 x i8> %y
ret <64 x i8> %max
}
diff --git a/test/CodeGen/X86/avx512bwvl-arith.ll b/test/CodeGen/X86/avx512bwvl-arith.ll
new file mode 100644
index 0000000..96f0140
--- /dev/null
+++ b/test/CodeGen/X86/avx512bwvl-arith.ll
@@ -0,0 +1,206 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw -mattr=+avx512vl| FileCheck %s
+
+; 256-bit
+
+; CHECK-LABEL: vpaddb256_test
+; CHECK: vpaddb %ymm{{.*}}
+; CHECK: ret
+define <32 x i8> @vpaddb256_test(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
+ %x = add <32 x i8> %i, %j
+ ret <32 x i8> %x
+}
+
+; CHECK-LABEL: vpaddb256_fold_test
+; CHECK: vpaddb (%rdi), %ymm{{.*}}
+; CHECK: ret
+define <32 x i8> @vpaddb256_fold_test(<32 x i8> %i, <32 x i8>* %j) nounwind {
+ %tmp = load <32 x i8>* %j, align 4
+ %x = add <32 x i8> %i, %tmp
+ ret <32 x i8> %x
+}
+
+; CHECK-LABEL: vpaddw256_test
+; CHECK: vpaddw %ymm{{.*}}
+; CHECK: ret
+define <16 x i16> @vpaddw256_test(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
+ %x = add <16 x i16> %i, %j
+ ret <16 x i16> %x
+}
+
+; CHECK-LABEL: vpaddw256_fold_test
+; CHECK: vpaddw (%rdi), %ymm{{.*}}
+; CHECK: ret
+define <16 x i16> @vpaddw256_fold_test(<16 x i16> %i, <16 x i16>* %j) nounwind {
+ %tmp = load <16 x i16>* %j, align 4
+ %x = add <16 x i16> %i, %tmp
+ ret <16 x i16> %x
+}
+
+; CHECK-LABEL: vpaddw256_mask_test
+; CHECK: vpaddw %ymm{{.*%k[1-7].*}}
+; CHECK: ret
+define <16 x i16> @vpaddw256_mask_test(<16 x i16> %i, <16 x i16> %j, <16 x i16> %mask1) nounwind readnone {
+ %mask = icmp ne <16 x i16> %mask1, zeroinitializer
+ %x = add <16 x i16> %i, %j
+ %r = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %i
+ ret <16 x i16> %r
+}
+
+; CHECK-LABEL: vpaddw256_maskz_test
+; CHECK: vpaddw %ymm{{.*{%k[1-7]} {z}.*}}
+; CHECK: ret
+define <16 x i16> @vpaddw256_maskz_test(<16 x i16> %i, <16 x i16> %j, <16 x i16> %mask1) nounwind readnone {
+ %mask = icmp ne <16 x i16> %mask1, zeroinitializer
+ %x = add <16 x i16> %i, %j
+ %r = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> zeroinitializer
+ ret <16 x i16> %r
+}
+
+; CHECK-LABEL: vpaddw256_mask_fold_test
+; CHECK: vpaddw (%rdi), %ymm{{.*%k[1-7]}}
+; CHECK: ret
+define <16 x i16> @vpaddw256_mask_fold_test(<16 x i16> %i, <16 x i16>* %j.ptr, <16 x i16> %mask1) nounwind readnone {
+ %mask = icmp ne <16 x i16> %mask1, zeroinitializer
+ %j = load <16 x i16>* %j.ptr
+ %x = add <16 x i16> %i, %j
+ %r = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %i
+ ret <16 x i16> %r
+}
+
+; CHECK-LABEL: vpaddw256_maskz_fold_test
+; CHECK: vpaddw (%rdi), %ymm{{.*{%k[1-7]} {z}}}
+; CHECK: ret
+define <16 x i16> @vpaddw256_maskz_fold_test(<16 x i16> %i, <16 x i16>* %j.ptr, <16 x i16> %mask1) nounwind readnone {
+ %mask = icmp ne <16 x i16> %mask1, zeroinitializer
+ %j = load <16 x i16>* %j.ptr
+ %x = add <16 x i16> %i, %j
+ %r = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> zeroinitializer
+ ret <16 x i16> %r
+}
+
+; CHECK-LABEL: vpsubb256_test
+; CHECK: vpsubb %ymm{{.*}}
+; CHECK: ret
+define <32 x i8> @vpsubb256_test(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
+ %x = sub <32 x i8> %i, %j
+ ret <32 x i8> %x
+}
+
+; CHECK-LABEL: vpsubw256_test
+; CHECK: vpsubw %ymm{{.*}}
+; CHECK: ret
+define <16 x i16> @vpsubw256_test(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
+ %x = sub <16 x i16> %i, %j
+ ret <16 x i16> %x
+}
+
+; CHECK-LABEL: vpmullw256_test
+; CHECK: vpmullw %ymm{{.*}}
+; CHECK: ret
+define <16 x i16> @vpmullw256_test(<16 x i16> %i, <16 x i16> %j) {
+ %x = mul <16 x i16> %i, %j
+ ret <16 x i16> %x
+}
+
+; 128-bit
+
+; CHECK-LABEL: vpaddb128_test
+; CHECK: vpaddb %xmm{{.*}}
+; CHECK: ret
+define <16 x i8> @vpaddb128_test(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
+ %x = add <16 x i8> %i, %j
+ ret <16 x i8> %x
+}
+
+; CHECK-LABEL: vpaddb128_fold_test
+; CHECK: vpaddb (%rdi), %xmm{{.*}}
+; CHECK: ret
+define <16 x i8> @vpaddb128_fold_test(<16 x i8> %i, <16 x i8>* %j) nounwind {
+ %tmp = load <16 x i8>* %j, align 4
+ %x = add <16 x i8> %i, %tmp
+ ret <16 x i8> %x
+}
+
+; CHECK-LABEL: vpaddw128_test
+; CHECK: vpaddw %xmm{{.*}}
+; CHECK: ret
+define <8 x i16> @vpaddw128_test(<8 x i16> %i, <8 x i16> %j) nounwind readnone {
+ %x = add <8 x i16> %i, %j
+ ret <8 x i16> %x
+}
+
+; CHECK-LABEL: vpaddw128_fold_test
+; CHECK: vpaddw (%rdi), %xmm{{.*}}
+; CHECK: ret
+define <8 x i16> @vpaddw128_fold_test(<8 x i16> %i, <8 x i16>* %j) nounwind {
+ %tmp = load <8 x i16>* %j, align 4
+ %x = add <8 x i16> %i, %tmp
+ ret <8 x i16> %x
+}
+
+; CHECK-LABEL: vpaddw128_mask_test
+; CHECK: vpaddw %xmm{{.*%k[1-7].*}}
+; CHECK: ret
+define <8 x i16> @vpaddw128_mask_test(<8 x i16> %i, <8 x i16> %j, <8 x i16> %mask1) nounwind readnone {
+ %mask = icmp ne <8 x i16> %mask1, zeroinitializer
+ %x = add <8 x i16> %i, %j
+ %r = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %i
+ ret <8 x i16> %r
+}
+
+; CHECK-LABEL: vpaddw128_maskz_test
+; CHECK: vpaddw %xmm{{.*{%k[1-7]} {z}.*}}
+; CHECK: ret
+define <8 x i16> @vpaddw128_maskz_test(<8 x i16> %i, <8 x i16> %j, <8 x i16> %mask1) nounwind readnone {
+ %mask = icmp ne <8 x i16> %mask1, zeroinitializer
+ %x = add <8 x i16> %i, %j
+ %r = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> zeroinitializer
+ ret <8 x i16> %r
+}
+
+; CHECK-LABEL: vpaddw128_mask_fold_test
+; CHECK: vpaddw (%rdi), %xmm{{.*%k[1-7]}}
+; CHECK: ret
+define <8 x i16> @vpaddw128_mask_fold_test(<8 x i16> %i, <8 x i16>* %j.ptr, <8 x i16> %mask1) nounwind readnone {
+ %mask = icmp ne <8 x i16> %mask1, zeroinitializer
+ %j = load <8 x i16>* %j.ptr
+ %x = add <8 x i16> %i, %j
+ %r = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %i
+ ret <8 x i16> %r
+}
+
+; CHECK-LABEL: vpaddw128_maskz_fold_test
+; CHECK: vpaddw (%rdi), %xmm{{.*{%k[1-7]} {z}}}
+; CHECK: ret
+define <8 x i16> @vpaddw128_maskz_fold_test(<8 x i16> %i, <8 x i16>* %j.ptr, <8 x i16> %mask1) nounwind readnone {
+ %mask = icmp ne <8 x i16> %mask1, zeroinitializer
+ %j = load <8 x i16>* %j.ptr
+ %x = add <8 x i16> %i, %j
+ %r = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> zeroinitializer
+ ret <8 x i16> %r
+}
+
+; CHECK-LABEL: vpsubb128_test
+; CHECK: vpsubb %xmm{{.*}}
+; CHECK: ret
+define <16 x i8> @vpsubb128_test(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
+ %x = sub <16 x i8> %i, %j
+ ret <16 x i8> %x
+}
+
+; CHECK-LABEL: vpsubw128_test
+; CHECK: vpsubw %xmm{{.*}}
+; CHECK: ret
+define <8 x i16> @vpsubw128_test(<8 x i16> %i, <8 x i16> %j) nounwind readnone {
+ %x = sub <8 x i16> %i, %j
+ ret <8 x i16> %x
+}
+
+; CHECK-LABEL: vpmullw128_test
+; CHECK: vpmullw %xmm{{.*}}
+; CHECK: ret
+define <8 x i16> @vpmullw128_test(<8 x i16> %i, <8 x i16> %j) {
+ %x = mul <8 x i16> %i, %j
+ ret <8 x i16> %x
+}
+
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics.ll b/test/CodeGen/X86/avx512bwvl-intrinsics.ll
index 45f8d6d..dbb9117 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics.ll
@@ -69,28 +69,28 @@ declare i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16>, <16 x i16>, i16)
define <8 x i32> @test_cmp_b_256(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK_LABEL: test_cmp_b_256
; CHECK: vpcmpeqb %ymm1, %ymm0, %k0 ##
- %res0 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 0, i32 -1)
+ %res0 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 0, i32 -1)
%vec0 = insertelement <8 x i32> undef, i32 %res0, i32 0
; CHECK: vpcmpltb %ymm1, %ymm0, %k0 ##
- %res1 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 1, i32 -1)
+ %res1 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 1, i32 -1)
%vec1 = insertelement <8 x i32> %vec0, i32 %res1, i32 1
; CHECK: vpcmpleb %ymm1, %ymm0, %k0 ##
- %res2 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 2, i32 -1)
+ %res2 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 2, i32 -1)
%vec2 = insertelement <8 x i32> %vec1, i32 %res2, i32 2
; CHECK: vpcmpunordb %ymm1, %ymm0, %k0 ##
- %res3 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 3, i32 -1)
+ %res3 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 3, i32 -1)
%vec3 = insertelement <8 x i32> %vec2, i32 %res3, i32 3
; CHECK: vpcmpneqb %ymm1, %ymm0, %k0 ##
- %res4 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 4, i32 -1)
+ %res4 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 4, i32 -1)
%vec4 = insertelement <8 x i32> %vec3, i32 %res4, i32 4
; CHECK: vpcmpnltb %ymm1, %ymm0, %k0 ##
- %res5 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 5, i32 -1)
+ %res5 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 5, i32 -1)
%vec5 = insertelement <8 x i32> %vec4, i32 %res5, i32 5
; CHECK: vpcmpnleb %ymm1, %ymm0, %k0 ##
- %res6 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 6, i32 -1)
+ %res6 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 6, i32 -1)
%vec6 = insertelement <8 x i32> %vec5, i32 %res6, i32 6
; CHECK: vpcmpordb %ymm1, %ymm0, %k0 ##
- %res7 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 7, i32 -1)
+ %res7 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 7, i32 -1)
%vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7
ret <8 x i32> %vec7
}
@@ -98,59 +98,59 @@ define <8 x i32> @test_cmp_b_256(<32 x i8> %a0, <32 x i8> %a1) {
define <8 x i32> @test_mask_cmp_b_256(<32 x i8> %a0, <32 x i8> %a1, i32 %mask) {
; CHECK_LABEL: test_mask_cmp_b_256
; CHECK: vpcmpeqb %ymm1, %ymm0, %k0 {%k1} ##
- %res0 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 0, i32 %mask)
+ %res0 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 0, i32 %mask)
%vec0 = insertelement <8 x i32> undef, i32 %res0, i32 0
; CHECK: vpcmpltb %ymm1, %ymm0, %k0 {%k1} ##
- %res1 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 1, i32 %mask)
+ %res1 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 1, i32 %mask)
%vec1 = insertelement <8 x i32> %vec0, i32 %res1, i32 1
; CHECK: vpcmpleb %ymm1, %ymm0, %k0 {%k1} ##
- %res2 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 2, i32 %mask)
+ %res2 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 2, i32 %mask)
%vec2 = insertelement <8 x i32> %vec1, i32 %res2, i32 2
; CHECK: vpcmpunordb %ymm1, %ymm0, %k0 {%k1} ##
- %res3 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 3, i32 %mask)
+ %res3 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 3, i32 %mask)
%vec3 = insertelement <8 x i32> %vec2, i32 %res3, i32 3
; CHECK: vpcmpneqb %ymm1, %ymm0, %k0 {%k1} ##
- %res4 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 4, i32 %mask)
+ %res4 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 4, i32 %mask)
%vec4 = insertelement <8 x i32> %vec3, i32 %res4, i32 4
; CHECK: vpcmpnltb %ymm1, %ymm0, %k0 {%k1} ##
- %res5 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 5, i32 %mask)
+ %res5 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 5, i32 %mask)
%vec5 = insertelement <8 x i32> %vec4, i32 %res5, i32 5
; CHECK: vpcmpnleb %ymm1, %ymm0, %k0 {%k1} ##
- %res6 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 6, i32 %mask)
+ %res6 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 6, i32 %mask)
%vec6 = insertelement <8 x i32> %vec5, i32 %res6, i32 6
; CHECK: vpcmpordb %ymm1, %ymm0, %k0 {%k1} ##
- %res7 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 7, i32 %mask)
+ %res7 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 7, i32 %mask)
%vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7
ret <8 x i32> %vec7
}
-declare i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8>, <32 x i8>, i32, i32) nounwind readnone
+declare i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8>, <32 x i8>, i8, i32) nounwind readnone
define <8 x i32> @test_ucmp_b_256(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK_LABEL: test_ucmp_b_256
; CHECK: vpcmpequb %ymm1, %ymm0, %k0 ##
- %res0 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 0, i32 -1)
+ %res0 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 0, i32 -1)
%vec0 = insertelement <8 x i32> undef, i32 %res0, i32 0
; CHECK: vpcmpltub %ymm1, %ymm0, %k0 ##
- %res1 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 1, i32 -1)
+ %res1 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 1, i32 -1)
%vec1 = insertelement <8 x i32> %vec0, i32 %res1, i32 1
; CHECK: vpcmpleub %ymm1, %ymm0, %k0 ##
- %res2 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 2, i32 -1)
+ %res2 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 2, i32 -1)
%vec2 = insertelement <8 x i32> %vec1, i32 %res2, i32 2
; CHECK: vpcmpunordub %ymm1, %ymm0, %k0 ##
- %res3 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 3, i32 -1)
+ %res3 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 3, i32 -1)
%vec3 = insertelement <8 x i32> %vec2, i32 %res3, i32 3
; CHECK: vpcmpnequb %ymm1, %ymm0, %k0 ##
- %res4 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 4, i32 -1)
+ %res4 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 4, i32 -1)
%vec4 = insertelement <8 x i32> %vec3, i32 %res4, i32 4
; CHECK: vpcmpnltub %ymm1, %ymm0, %k0 ##
- %res5 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 5, i32 -1)
+ %res5 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 5, i32 -1)
%vec5 = insertelement <8 x i32> %vec4, i32 %res5, i32 5
; CHECK: vpcmpnleub %ymm1, %ymm0, %k0 ##
- %res6 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 6, i32 -1)
+ %res6 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 6, i32 -1)
%vec6 = insertelement <8 x i32> %vec5, i32 %res6, i32 6
; CHECK: vpcmpordub %ymm1, %ymm0, %k0 ##
- %res7 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 7, i32 -1)
+ %res7 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 7, i32 -1)
%vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7
ret <8 x i32> %vec7
}
@@ -158,59 +158,59 @@ define <8 x i32> @test_ucmp_b_256(<32 x i8> %a0, <32 x i8> %a1) {
define <8 x i32> @test_mask_ucmp_b_256(<32 x i8> %a0, <32 x i8> %a1, i32 %mask) {
; CHECK_LABEL: test_mask_ucmp_b_256
; CHECK: vpcmpequb %ymm1, %ymm0, %k0 {%k1} ##
- %res0 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 0, i32 %mask)
+ %res0 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 0, i32 %mask)
%vec0 = insertelement <8 x i32> undef, i32 %res0, i32 0
; CHECK: vpcmpltub %ymm1, %ymm0, %k0 {%k1} ##
- %res1 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 1, i32 %mask)
+ %res1 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 1, i32 %mask)
%vec1 = insertelement <8 x i32> %vec0, i32 %res1, i32 1
; CHECK: vpcmpleub %ymm1, %ymm0, %k0 {%k1} ##
- %res2 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 2, i32 %mask)
+ %res2 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 2, i32 %mask)
%vec2 = insertelement <8 x i32> %vec1, i32 %res2, i32 2
; CHECK: vpcmpunordub %ymm1, %ymm0, %k0 {%k1} ##
- %res3 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 3, i32 %mask)
+ %res3 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 3, i32 %mask)
%vec3 = insertelement <8 x i32> %vec2, i32 %res3, i32 3
; CHECK: vpcmpnequb %ymm1, %ymm0, %k0 {%k1} ##
- %res4 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 4, i32 %mask)
+ %res4 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 4, i32 %mask)
%vec4 = insertelement <8 x i32> %vec3, i32 %res4, i32 4
; CHECK: vpcmpnltub %ymm1, %ymm0, %k0 {%k1} ##
- %res5 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 5, i32 %mask)
+ %res5 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 5, i32 %mask)
%vec5 = insertelement <8 x i32> %vec4, i32 %res5, i32 5
; CHECK: vpcmpnleub %ymm1, %ymm0, %k0 {%k1} ##
- %res6 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 6, i32 %mask)
+ %res6 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 6, i32 %mask)
%vec6 = insertelement <8 x i32> %vec5, i32 %res6, i32 6
; CHECK: vpcmpordub %ymm1, %ymm0, %k0 {%k1} ##
- %res7 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 7, i32 %mask)
+ %res7 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i8 7, i32 %mask)
%vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7
ret <8 x i32> %vec7
}
-declare i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8>, <32 x i8>, i32, i32) nounwind readnone
+declare i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8>, <32 x i8>, i8, i32) nounwind readnone
define <8 x i16> @test_cmp_w_256(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK_LABEL: test_cmp_w_256
; CHECK: vpcmpeqw %ymm1, %ymm0, %k0 ##
- %res0 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 0, i16 -1)
+ %res0 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 0, i16 -1)
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
; CHECK: vpcmpltw %ymm1, %ymm0, %k0 ##
- %res1 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 1, i16 -1)
+ %res1 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 1, i16 -1)
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
; CHECK: vpcmplew %ymm1, %ymm0, %k0 ##
- %res2 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 2, i16 -1)
+ %res2 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 2, i16 -1)
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
; CHECK: vpcmpunordw %ymm1, %ymm0, %k0 ##
- %res3 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 3, i16 -1)
+ %res3 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 3, i16 -1)
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
; CHECK: vpcmpneqw %ymm1, %ymm0, %k0 ##
- %res4 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 4, i16 -1)
+ %res4 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 4, i16 -1)
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
; CHECK: vpcmpnltw %ymm1, %ymm0, %k0 ##
- %res5 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 5, i16 -1)
+ %res5 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 5, i16 -1)
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
; CHECK: vpcmpnlew %ymm1, %ymm0, %k0 ##
- %res6 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 6, i16 -1)
+ %res6 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 6, i16 -1)
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
; CHECK: vpcmpordw %ymm1, %ymm0, %k0 ##
- %res7 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 7, i16 -1)
+ %res7 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 7, i16 -1)
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
ret <8 x i16> %vec7
}
@@ -218,59 +218,59 @@ define <8 x i16> @test_cmp_w_256(<16 x i16> %a0, <16 x i16> %a1) {
define <8 x i16> @test_mask_cmp_w_256(<16 x i16> %a0, <16 x i16> %a1, i16 %mask) {
; CHECK_LABEL: test_mask_cmp_w_256
; CHECK: vpcmpeqw %ymm1, %ymm0, %k0 {%k1} ##
- %res0 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 0, i16 %mask)
+ %res0 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 0, i16 %mask)
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
; CHECK: vpcmpltw %ymm1, %ymm0, %k0 {%k1} ##
- %res1 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 1, i16 %mask)
+ %res1 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 1, i16 %mask)
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
; CHECK: vpcmplew %ymm1, %ymm0, %k0 {%k1} ##
- %res2 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 2, i16 %mask)
+ %res2 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 2, i16 %mask)
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
; CHECK: vpcmpunordw %ymm1, %ymm0, %k0 {%k1} ##
- %res3 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 3, i16 %mask)
+ %res3 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 3, i16 %mask)
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
; CHECK: vpcmpneqw %ymm1, %ymm0, %k0 {%k1} ##
- %res4 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 4, i16 %mask)
+ %res4 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 4, i16 %mask)
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
; CHECK: vpcmpnltw %ymm1, %ymm0, %k0 {%k1} ##
- %res5 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 5, i16 %mask)
+ %res5 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 5, i16 %mask)
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
; CHECK: vpcmpnlew %ymm1, %ymm0, %k0 {%k1} ##
- %res6 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 6, i16 %mask)
+ %res6 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 6, i16 %mask)
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
; CHECK: vpcmpordw %ymm1, %ymm0, %k0 {%k1} ##
- %res7 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 7, i16 %mask)
+ %res7 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 7, i16 %mask)
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
ret <8 x i16> %vec7
}
-declare i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16>, <16 x i16>, i32, i16) nounwind readnone
+declare i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16>, <16 x i16>, i8, i16) nounwind readnone
define <8 x i16> @test_ucmp_w_256(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK_LABEL: test_ucmp_w_256
; CHECK: vpcmpequw %ymm1, %ymm0, %k0 ##
- %res0 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 0, i16 -1)
+ %res0 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 0, i16 -1)
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
; CHECK: vpcmpltuw %ymm1, %ymm0, %k0 ##
- %res1 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 1, i16 -1)
+ %res1 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 1, i16 -1)
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
; CHECK: vpcmpleuw %ymm1, %ymm0, %k0 ##
- %res2 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 2, i16 -1)
+ %res2 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 2, i16 -1)
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
; CHECK: vpcmpunorduw %ymm1, %ymm0, %k0 ##
- %res3 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 3, i16 -1)
+ %res3 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 3, i16 -1)
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
; CHECK: vpcmpnequw %ymm1, %ymm0, %k0 ##
- %res4 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 4, i16 -1)
+ %res4 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 4, i16 -1)
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
; CHECK: vpcmpnltuw %ymm1, %ymm0, %k0 ##
- %res5 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 5, i16 -1)
+ %res5 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 5, i16 -1)
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
; CHECK: vpcmpnleuw %ymm1, %ymm0, %k0 ##
- %res6 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 6, i16 -1)
+ %res6 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 6, i16 -1)
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
; CHECK: vpcmporduw %ymm1, %ymm0, %k0 ##
- %res7 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 7, i16 -1)
+ %res7 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 7, i16 -1)
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
ret <8 x i16> %vec7
}
@@ -278,33 +278,33 @@ define <8 x i16> @test_ucmp_w_256(<16 x i16> %a0, <16 x i16> %a1) {
define <8 x i16> @test_mask_ucmp_w_256(<16 x i16> %a0, <16 x i16> %a1, i16 %mask) {
; CHECK_LABEL: test_mask_ucmp_w_256
; CHECK: vpcmpequw %ymm1, %ymm0, %k0 {%k1} ##
- %res0 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 0, i16 %mask)
+ %res0 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 0, i16 %mask)
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
; CHECK: vpcmpltuw %ymm1, %ymm0, %k0 {%k1} ##
- %res1 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 1, i16 %mask)
+ %res1 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 1, i16 %mask)
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
; CHECK: vpcmpleuw %ymm1, %ymm0, %k0 {%k1} ##
- %res2 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 2, i16 %mask)
+ %res2 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 2, i16 %mask)
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
; CHECK: vpcmpunorduw %ymm1, %ymm0, %k0 {%k1} ##
- %res3 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 3, i16 %mask)
+ %res3 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 3, i16 %mask)
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
; CHECK: vpcmpnequw %ymm1, %ymm0, %k0 {%k1} ##
- %res4 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 4, i16 %mask)
+ %res4 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 4, i16 %mask)
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
; CHECK: vpcmpnltuw %ymm1, %ymm0, %k0 {%k1} ##
- %res5 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 5, i16 %mask)
+ %res5 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 5, i16 %mask)
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
; CHECK: vpcmpnleuw %ymm1, %ymm0, %k0 {%k1} ##
- %res6 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 6, i16 %mask)
+ %res6 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 6, i16 %mask)
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
; CHECK: vpcmporduw %ymm1, %ymm0, %k0 {%k1} ##
- %res7 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 7, i16 %mask)
+ %res7 = call i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i8 7, i16 %mask)
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
ret <8 x i16> %vec7
}
-declare i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16>, <16 x i16>, i32, i16) nounwind readnone
+declare i16 @llvm.x86.avx512.mask.ucmp.w.256(<16 x i16>, <16 x i16>, i8, i16) nounwind readnone
; 128-bit
@@ -375,28 +375,28 @@ declare i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16>, <8 x i16>, i8)
define <8 x i16> @test_cmp_b_128(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK_LABEL: test_cmp_b_128
; CHECK: vpcmpeqb %xmm1, %xmm0, %k0 ##
- %res0 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 0, i16 -1)
+ %res0 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 0, i16 -1)
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
; CHECK: vpcmpltb %xmm1, %xmm0, %k0 ##
- %res1 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 1, i16 -1)
+ %res1 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 1, i16 -1)
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
; CHECK: vpcmpleb %xmm1, %xmm0, %k0 ##
- %res2 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 2, i16 -1)
+ %res2 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 2, i16 -1)
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
; CHECK: vpcmpunordb %xmm1, %xmm0, %k0 ##
- %res3 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 3, i16 -1)
+ %res3 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 3, i16 -1)
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
; CHECK: vpcmpneqb %xmm1, %xmm0, %k0 ##
- %res4 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 4, i16 -1)
+ %res4 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 4, i16 -1)
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
; CHECK: vpcmpnltb %xmm1, %xmm0, %k0 ##
- %res5 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 5, i16 -1)
+ %res5 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 5, i16 -1)
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
; CHECK: vpcmpnleb %xmm1, %xmm0, %k0 ##
- %res6 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 6, i16 -1)
+ %res6 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 6, i16 -1)
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
; CHECK: vpcmpordb %xmm1, %xmm0, %k0 ##
- %res7 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 7, i16 -1)
+ %res7 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 7, i16 -1)
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
ret <8 x i16> %vec7
}
@@ -404,59 +404,59 @@ define <8 x i16> @test_cmp_b_128(<16 x i8> %a0, <16 x i8> %a1) {
define <8 x i16> @test_mask_cmp_b_128(<16 x i8> %a0, <16 x i8> %a1, i16 %mask) {
; CHECK_LABEL: test_mask_cmp_b_128
; CHECK: vpcmpeqb %xmm1, %xmm0, %k0 {%k1} ##
- %res0 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 0, i16 %mask)
+ %res0 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 0, i16 %mask)
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
; CHECK: vpcmpltb %xmm1, %xmm0, %k0 {%k1} ##
- %res1 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 1, i16 %mask)
+ %res1 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 1, i16 %mask)
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
; CHECK: vpcmpleb %xmm1, %xmm0, %k0 {%k1} ##
- %res2 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 2, i16 %mask)
+ %res2 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 2, i16 %mask)
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
; CHECK: vpcmpunordb %xmm1, %xmm0, %k0 {%k1} ##
- %res3 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 3, i16 %mask)
+ %res3 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 3, i16 %mask)
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
; CHECK: vpcmpneqb %xmm1, %xmm0, %k0 {%k1} ##
- %res4 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 4, i16 %mask)
+ %res4 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 4, i16 %mask)
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
; CHECK: vpcmpnltb %xmm1, %xmm0, %k0 {%k1} ##
- %res5 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 5, i16 %mask)
+ %res5 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 5, i16 %mask)
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
; CHECK: vpcmpnleb %xmm1, %xmm0, %k0 {%k1} ##
- %res6 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 6, i16 %mask)
+ %res6 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 6, i16 %mask)
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
; CHECK: vpcmpordb %xmm1, %xmm0, %k0 {%k1} ##
- %res7 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 7, i16 %mask)
+ %res7 = call i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 7, i16 %mask)
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
ret <8 x i16> %vec7
}
-declare i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8>, <16 x i8>, i32, i16) nounwind readnone
+declare i16 @llvm.x86.avx512.mask.cmp.b.128(<16 x i8>, <16 x i8>, i8, i16) nounwind readnone
define <8 x i16> @test_ucmp_b_128(<16 x i8> %a0, <16 x i8> %a1) {
; CHECK_LABEL: test_ucmp_b_128
; CHECK: vpcmpequb %xmm1, %xmm0, %k0 ##
- %res0 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 0, i16 -1)
+ %res0 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 0, i16 -1)
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
; CHECK: vpcmpltub %xmm1, %xmm0, %k0 ##
- %res1 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 1, i16 -1)
+ %res1 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 1, i16 -1)
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
; CHECK: vpcmpleub %xmm1, %xmm0, %k0 ##
- %res2 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 2, i16 -1)
+ %res2 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 2, i16 -1)
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
; CHECK: vpcmpunordub %xmm1, %xmm0, %k0 ##
- %res3 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 3, i16 -1)
+ %res3 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 3, i16 -1)
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
; CHECK: vpcmpnequb %xmm1, %xmm0, %k0 ##
- %res4 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 4, i16 -1)
+ %res4 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 4, i16 -1)
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
; CHECK: vpcmpnltub %xmm1, %xmm0, %k0 ##
- %res5 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 5, i16 -1)
+ %res5 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 5, i16 -1)
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
; CHECK: vpcmpnleub %xmm1, %xmm0, %k0 ##
- %res6 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 6, i16 -1)
+ %res6 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 6, i16 -1)
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
; CHECK: vpcmpordub %xmm1, %xmm0, %k0 ##
- %res7 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 7, i16 -1)
+ %res7 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 7, i16 -1)
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
ret <8 x i16> %vec7
}
@@ -464,59 +464,59 @@ define <8 x i16> @test_ucmp_b_128(<16 x i8> %a0, <16 x i8> %a1) {
define <8 x i16> @test_mask_ucmp_b_128(<16 x i8> %a0, <16 x i8> %a1, i16 %mask) {
; CHECK_LABEL: test_mask_ucmp_b_128
; CHECK: vpcmpequb %xmm1, %xmm0, %k0 {%k1} ##
- %res0 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 0, i16 %mask)
+ %res0 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 0, i16 %mask)
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
; CHECK: vpcmpltub %xmm1, %xmm0, %k0 {%k1} ##
- %res1 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 1, i16 %mask)
+ %res1 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 1, i16 %mask)
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
; CHECK: vpcmpleub %xmm1, %xmm0, %k0 {%k1} ##
- %res2 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 2, i16 %mask)
+ %res2 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 2, i16 %mask)
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
; CHECK: vpcmpunordub %xmm1, %xmm0, %k0 {%k1} ##
- %res3 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 3, i16 %mask)
+ %res3 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 3, i16 %mask)
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
; CHECK: vpcmpnequb %xmm1, %xmm0, %k0 {%k1} ##
- %res4 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 4, i16 %mask)
+ %res4 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 4, i16 %mask)
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
; CHECK: vpcmpnltub %xmm1, %xmm0, %k0 {%k1} ##
- %res5 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 5, i16 %mask)
+ %res5 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 5, i16 %mask)
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
; CHECK: vpcmpnleub %xmm1, %xmm0, %k0 {%k1} ##
- %res6 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 6, i16 %mask)
+ %res6 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 6, i16 %mask)
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
; CHECK: vpcmpordub %xmm1, %xmm0, %k0 {%k1} ##
- %res7 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i32 7, i16 %mask)
+ %res7 = call i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8> %a0, <16 x i8> %a1, i8 7, i16 %mask)
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
ret <8 x i16> %vec7
}
-declare i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8>, <16 x i8>, i32, i16) nounwind readnone
+declare i16 @llvm.x86.avx512.mask.ucmp.b.128(<16 x i8>, <16 x i8>, i8, i16) nounwind readnone
define <8 x i8> @test_cmp_w_128(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK_LABEL: test_cmp_w_128
; CHECK: vpcmpeqw %xmm1, %xmm0, %k0 ##
- %res0 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 0, i8 -1)
+ %res0 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 0, i8 -1)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltw %xmm1, %xmm0, %k0 ##
- %res1 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 1, i8 -1)
+ %res1 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 1, i8 -1)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmplew %xmm1, %xmm0, %k0 ##
- %res2 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 2, i8 -1)
+ %res2 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 2, i8 -1)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordw %xmm1, %xmm0, %k0 ##
- %res3 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 3, i8 -1)
+ %res3 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 3, i8 -1)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpneqw %xmm1, %xmm0, %k0 ##
- %res4 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 4, i8 -1)
+ %res4 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 4, i8 -1)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltw %xmm1, %xmm0, %k0 ##
- %res5 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 5, i8 -1)
+ %res5 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 5, i8 -1)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnlew %xmm1, %xmm0, %k0 ##
- %res6 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 6, i8 -1)
+ %res6 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 6, i8 -1)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordw %xmm1, %xmm0, %k0 ##
- %res7 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 7, i8 -1)
+ %res7 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 7, i8 -1)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
@@ -524,59 +524,59 @@ define <8 x i8> @test_cmp_w_128(<8 x i16> %a0, <8 x i16> %a1) {
define <8 x i8> @test_mask_cmp_w_128(<8 x i16> %a0, <8 x i16> %a1, i8 %mask) {
; CHECK_LABEL: test_mask_cmp_w_128
; CHECK: vpcmpeqw %xmm1, %xmm0, %k0 {%k1} ##
- %res0 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 0, i8 %mask)
+ %res0 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 0, i8 %mask)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltw %xmm1, %xmm0, %k0 {%k1} ##
- %res1 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 1, i8 %mask)
+ %res1 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 1, i8 %mask)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmplew %xmm1, %xmm0, %k0 {%k1} ##
- %res2 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 2, i8 %mask)
+ %res2 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 2, i8 %mask)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordw %xmm1, %xmm0, %k0 {%k1} ##
- %res3 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 3, i8 %mask)
+ %res3 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 3, i8 %mask)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpneqw %xmm1, %xmm0, %k0 {%k1} ##
- %res4 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 4, i8 %mask)
+ %res4 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 4, i8 %mask)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltw %xmm1, %xmm0, %k0 {%k1} ##
- %res5 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 5, i8 %mask)
+ %res5 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 5, i8 %mask)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnlew %xmm1, %xmm0, %k0 {%k1} ##
- %res6 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 6, i8 %mask)
+ %res6 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 6, i8 %mask)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordw %xmm1, %xmm0, %k0 {%k1} ##
- %res7 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 7, i8 %mask)
+ %res7 = call i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 7, i8 %mask)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
-declare i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16>, <8 x i16>, i32, i8) nounwind readnone
+declare i8 @llvm.x86.avx512.mask.cmp.w.128(<8 x i16>, <8 x i16>, i8, i8) nounwind readnone
define <8 x i8> @test_ucmp_w_128(<8 x i16> %a0, <8 x i16> %a1) {
; CHECK_LABEL: test_ucmp_w_128
; CHECK: vpcmpequw %xmm1, %xmm0, %k0 ##
- %res0 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 0, i8 -1)
+ %res0 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 0, i8 -1)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltuw %xmm1, %xmm0, %k0 ##
- %res1 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 1, i8 -1)
+ %res1 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 1, i8 -1)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleuw %xmm1, %xmm0, %k0 ##
- %res2 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 2, i8 -1)
+ %res2 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 2, i8 -1)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunorduw %xmm1, %xmm0, %k0 ##
- %res3 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 3, i8 -1)
+ %res3 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 3, i8 -1)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpnequw %xmm1, %xmm0, %k0 ##
- %res4 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 4, i8 -1)
+ %res4 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 4, i8 -1)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltuw %xmm1, %xmm0, %k0 ##
- %res5 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 5, i8 -1)
+ %res5 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 5, i8 -1)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleuw %xmm1, %xmm0, %k0 ##
- %res6 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 6, i8 -1)
+ %res6 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 6, i8 -1)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmporduw %xmm1, %xmm0, %k0 ##
- %res7 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 7, i8 -1)
+ %res7 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 7, i8 -1)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
@@ -584,30 +584,415 @@ define <8 x i8> @test_ucmp_w_128(<8 x i16> %a0, <8 x i16> %a1) {
define <8 x i8> @test_mask_ucmp_w_128(<8 x i16> %a0, <8 x i16> %a1, i8 %mask) {
; CHECK_LABEL: test_mask_ucmp_w_128
; CHECK: vpcmpequw %xmm1, %xmm0, %k0 {%k1} ##
- %res0 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 0, i8 %mask)
+ %res0 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 0, i8 %mask)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltuw %xmm1, %xmm0, %k0 {%k1} ##
- %res1 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 1, i8 %mask)
+ %res1 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 1, i8 %mask)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleuw %xmm1, %xmm0, %k0 {%k1} ##
- %res2 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 2, i8 %mask)
+ %res2 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 2, i8 %mask)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunorduw %xmm1, %xmm0, %k0 {%k1} ##
- %res3 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 3, i8 %mask)
+ %res3 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 3, i8 %mask)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpnequw %xmm1, %xmm0, %k0 {%k1} ##
- %res4 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 4, i8 %mask)
+ %res4 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 4, i8 %mask)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltuw %xmm1, %xmm0, %k0 {%k1} ##
- %res5 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 5, i8 %mask)
+ %res5 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 5, i8 %mask)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleuw %xmm1, %xmm0, %k0 {%k1} ##
- %res6 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 6, i8 %mask)
+ %res6 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 6, i8 %mask)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmporduw %xmm1, %xmm0, %k0 {%k1} ##
- %res7 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i32 7, i8 %mask)
+ %res7 = call i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16> %a0, <8 x i16> %a1, i8 7, i8 %mask)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
-declare i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16>, <8 x i16>, i32, i8) nounwind readnone
+declare i8 @llvm.x86.avx512.mask.ucmp.w.128(<8 x i16>, <8 x i16>, i8, i8) nounwind readnone
+
+declare <8 x float> @llvm.x86.fma.mask.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
+
+define <8 x float> @test_mask_vfmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmadd256_ps
+ ; CHECK: vfmadd213ps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x29,0xa8,0xc2]
+ %res = call <8 x float> @llvm.x86.fma.mask.vfmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
+ ret <8 x float> %res
+}
+
+declare <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
+
+define <4 x float> @test_mask_vfmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmadd128_ps
+ ; CHECK: vfmadd213ps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0xc2]
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+declare <4 x double> @llvm.x86.fma.mask.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8)
+
+define <4 x double> @test_mask_fmadd256_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c, i8 %mask) {
+; CHECK-LABEL: test_mask_fmadd256_pd:
+; CHECK: vfmadd213pd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xa8,0xc2]
+ %res = call <4 x double> @llvm.x86.fma.mask.vfmadd.pd.256(<4 x double> %a, <4 x double> %b, <4 x double> %c, i8 %mask)
+ ret <4 x double> %res
+}
+
+declare <2 x double> @llvm.x86.fma.mask.vfmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8)
+
+define <2 x double> @test_mask_fmadd128_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
+; CHECK-LABEL: test_mask_fmadd128_pd:
+; CHECK: vfmadd213pd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa8,0xc2]
+ %res = call <2 x double> @llvm.x86.fma.mask.vfmadd.pd.128(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask)
+ ret <2 x double> %res
+}
+
+declare <8 x float> @llvm.x86.fma.mask.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
+
+define <8 x float> @test_mask_vfmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsub256_ps
+ ; CHECK: vfmsub213ps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x29,0xaa,0xc2]
+ %res = call <8 x float> @llvm.x86.fma.mask.vfmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
+ ret <8 x float> %res
+}
+
+declare <4 x float> @llvm.x86.fma.mask.vfmsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
+
+define <4 x float> @test_mask_vfmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsub128_ps
+ ; CHECK: vfmsub213ps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xaa,0xc2]
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmsub.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+declare <4 x double> @llvm.x86.fma.mask.vfmsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone
+
+define <4 x double> @test_mask_vfmsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsub256_pd
+ ; CHECK: vfmsub213pd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xaa,0xc2]
+ %res = call <4 x double> @llvm.x86.fma.mask.vfmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
+ ret <4 x double> %res
+}
+
+declare <2 x double> @llvm.x86.fma.mask.vfmsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone
+
+define <2 x double> @test_mask_vfmsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsub128_pd
+ ; CHECK: vfmsub213pd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xaa,0xc2]
+ %res = call <2 x double> @llvm.x86.fma.mask.vfmsub.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
+ ret <2 x double> %res
+}
+
+declare <8 x float> @llvm.x86.fma.mask.vfnmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
+
+define <8 x float> @test_mask_vfnmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfnmadd256_ps
+ ; CHECK: vfnmadd213ps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x29,0xac,0xc2]
+ %res = call <8 x float> @llvm.x86.fma.mask.vfnmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
+ ret <8 x float> %res
+}
+
+declare <4 x float> @llvm.x86.fma.mask.vfnmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
+
+define <4 x float> @test_mask_vfnmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfnmadd128_ps
+ ; CHECK: vfnmadd213ps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xac,0xc2]
+ %res = call <4 x float> @llvm.x86.fma.mask.vfnmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+declare <4 x double> @llvm.x86.fma.mask.vfnmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone
+
+define <4 x double> @test_mask_vfnmadd256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfnmadd256_pd
+ ; CHECK: vfnmadd213pd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xac,0xc2]
+ %res = call <4 x double> @llvm.x86.fma.mask.vfnmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
+ ret <4 x double> %res
+}
+
+declare <2 x double> @llvm.x86.fma.mask.vfnmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone
+
+define <2 x double> @test_mask_vfnmadd128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfnmadd128_pd
+ ; CHECK: vfnmadd213pd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xac,0xc2]
+ %res = call <2 x double> @llvm.x86.fma.mask.vfnmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
+ ret <2 x double> %res
+}
+
+declare <8 x float> @llvm.x86.fma.mask.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
+
+define <8 x float> @test_mask_vfnmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfnmsub256_ps
+ ; CHECK: vfnmsub213ps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x29,0xae,0xc2]
+ %res = call <8 x float> @llvm.x86.fma.mask.vfnmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
+ ret <8 x float> %res
+}
+
+declare <4 x float> @llvm.x86.fma.mask.vfnmsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
+
+define <4 x float> @test_mask_vfnmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfnmsub128_ps
+ ; CHECK: vfnmsub213ps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xae,0xc2]
+ %res = call <4 x float> @llvm.x86.fma.mask.vfnmsub.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+declare <4 x double> @llvm.x86.fma.mask.vfnmsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone
+
+define <4 x double> @test_mask_vfnmsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfnmsub256_pd
+ ; CHECK: vfnmsub213pd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xae,0xc2]
+ %res = call <4 x double> @llvm.x86.fma.mask.vfnmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
+ ret <4 x double> %res
+}
+
+declare <2 x double> @llvm.x86.fma.mask.vfnmsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone
+
+define <2 x double> @test_mask_vfnmsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfnmsub128_pd
+ ; CHECK: vfnmsub213pd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xae,0xc2]
+ %res = call <2 x double> @llvm.x86.fma.mask.vfnmsub.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
+ ret <2 x double> %res
+}
+
+declare <8 x float> @llvm.x86.fma.mask.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
+
+define <8 x float> @test_mask_fmaddsub256_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c, i8 %mask) {
+; CHECK-LABEL: test_mask_fmaddsub256_ps:
+; CHECK: vfmaddsub213ps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x29,0xa6,0xc2]
+ %res = call <8 x float> @llvm.x86.fma.mask.vfmaddsub.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %c, i8 %mask)
+ ret <8 x float> %res
+}
+
+declare <4 x float> @llvm.x86.fma.mask.vfmaddsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
+
+define <4 x float> @test_mask_fmaddsub128_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
+; CHECK-LABEL: test_mask_fmaddsub128_ps:
+; CHECK: vfmaddsub213ps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa6,0xc2]
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmaddsub.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask)
+ ret <4 x float> %res
+}
+
+declare <4 x double> @llvm.x86.fma.mask.vfmaddsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone
+
+define <4 x double> @test_mask_vfmaddsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmaddsub256_pd
+ ; CHECK: vfmaddsub213pd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xa6,0xc2]
+ %res = call <4 x double> @llvm.x86.fma.mask.vfmaddsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
+ ret <4 x double> %res
+}
+
+declare <2 x double> @llvm.x86.fma.mask.vfmaddsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone
+
+define <2 x double> @test_mask_vfmaddsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmaddsub128_pd
+ ; CHECK: vfmaddsub213pd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa6,0xc2]
+ %res = call <2 x double> @llvm.x86.fma.mask.vfmaddsub.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
+ ret <2 x double> %res
+}
+
+declare <8 x float> @llvm.x86.fma.mask.vfmsubadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
+
+define <8 x float> @test_mask_vfmsubadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsubadd256_ps
+ ; CHECK: vfmsubadd213ps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x29,0xa7,0xc2]
+ %res = call <8 x float> @llvm.x86.fma.mask.vfmsubadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
+ ret <8 x float> %res
+}
+
+declare <4 x float> @llvm.x86.fma.mask.vfmsubadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
+
+define <4 x float> @test_mask_vfmsubadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsubadd128_ps
+ ; CHECK: vfmsubadd213ps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa7,0xc2]
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmsubadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+declare <4 x double> @llvm.x86.fma.mask.vfmsubadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone
+
+define <4 x double> @test_mask_vfmsubadd256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsubadd256_pd
+ ; CHECK: vfmsubadd213pd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xa7,0xc2]
+ %res = call <4 x double> @llvm.x86.fma.mask.vfmsubadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
+ ret <4 x double> %res
+}
+declare <2 x double> @llvm.x86.fma.mask.vfmsubadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone
+
+define <2 x double> @test_mask_vfmsubadd128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsubadd128_pd
+ ; CHECK: vfmsubadd213pd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa7,0xc2]
+ %res = call <2 x double> @llvm.x86.fma.mask.vfmsubadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
+ ret <2 x double> %res
+}
+
+define <2 x double> @test_mask_vfmsubadd128rm_pd(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsubadd128rm_pd
+ ; CHECK: vfmsubadd213pd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa7,0x07]
+ %a2 = load <2 x double>* %ptr_a2
+ %res = call <2 x double> @llvm.x86.fma.mask.vfmsubadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
+ ret <2 x double> %res
+}
+declare <8 x double> @llvm.x86.fma.mask.vfmsubadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) nounwind readnone
+define <8 x double> @test_mask_vfmsubaddrm_pd(<8 x double> %a0, <8 x double> %a1, <8 x double>* %ptr_a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmsubaddrm_pd
+ ; CHECK: vfmsubadd213pd (%rdi), %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x49,0xa7,0x07]
+ %a2 = load <8 x double>* %ptr_a2, align 8
+ %res = call <8 x double> @llvm.x86.fma.mask.vfmsubadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind
+ ret <8 x double> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_r(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmadd128_ps_r
+ ; CHECK: vfmadd213ps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0xc2]
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
+ ; CHECK-LABEL: test_mask_vfmadd128_ps_rz
+ ; CHECK: vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x08,0xa8,0xc2]
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmk(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmadd128_ps_rmk
+ ; CHECK: vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
+ %a2 = load <4 x float>* %ptr_a2
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmka(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmadd128_ps_rmka
+ ; CHECK: vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
+ %a2 = load <4 x float>* %ptr_a2, align 8
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmkz(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2) {
+ ; CHECK-LABEL: test_mask_vfmadd128_ps_rmkz
+ ; CHECK: vfmadd213ps (%rdi), %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0x07]
+ %a2 = load <4 x float>* %ptr_a2
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmkza(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2) {
+ ; CHECK-LABEL: test_mask_vfmadd128_ps_rmkza
+ ; CHECK: vfmadd213ps (%rdi), %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0x07]
+ %a2 = load <4 x float>* %ptr_a2, align 4
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmb(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmadd128_ps_rmb
+ ; CHECK: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
+ %q = load float* %ptr_a2
+ %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
+ %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
+ %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
+ %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %vecinit6.i, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmba(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmadd128_ps_rmba
+ ; CHECK: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
+ %q = load float* %ptr_a2, align 4
+ %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
+ %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
+ %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
+ %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %vecinit6.i, i8 %mask) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmbz(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2) {
+ ; CHECK-LABEL: test_mask_vfmadd128_ps_rmbz
+ ; CHECK: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
+ %q = load float* %ptr_a2
+ %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
+ %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
+ %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
+ %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %vecinit6.i, i8 -1) nounwind
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_mask_vfmadd128_ps_rmbza(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2) {
+ ; CHECK-LABEL: test_mask_vfmadd128_ps_rmbza
+ ; CHECK: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
+ %q = load float* %ptr_a2, align 4
+ %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
+ %vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
+ %vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
+ %vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
+ %res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %vecinit6.i, i8 -1) nounwind
+ ret <4 x float> %res
+}
+
+define <2 x double> @test_mask_vfmadd128_pd_r(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmadd128_pd_r
+ ; CHECK: vfmadd213pd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa8,0xc2]
+ %res = call <2 x double> @llvm.x86.fma.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
+ ret <2 x double> %res
+}
+
+define <2 x double> @test_mask_vfmadd128_pd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
+ ; CHECK-LABEL: test_mask_vfmadd128_pd_rz
+ ; CHECK: vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0xf5,0x08,0xa8,0xc2]
+ %res = call <2 x double> @llvm.x86.fma.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind
+ ret <2 x double> %res
+}
+
+define <2 x double> @test_mask_vfmadd128_pd_rmk(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmadd128_pd_rmk
+ ; CHECK: vfmadd213pd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa8,0x07]
+ %a2 = load <2 x double>* %ptr_a2
+ %res = call <2 x double> @llvm.x86.fma.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
+ ret <2 x double> %res
+}
+
+define <2 x double> @test_mask_vfmadd128_pd_rmkz(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2) {
+ ; CHECK-LABEL: test_mask_vfmadd128_pd_rmkz
+ ; CHECK: vfmadd213pd (%rdi), %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0x07]
+ %a2 = load <2 x double>* %ptr_a2
+ %res = call <2 x double> @llvm.x86.fma.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind
+ ret <2 x double> %res
+}
+
+define <4 x double> @test_mask_vfmadd256_pd_r(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmadd256_pd_r
+ ; CHECK: vfmadd213pd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xa8,0xc2]
+ %res = call <4 x double> @llvm.x86.fma.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
+ ret <4 x double> %res
+}
+
+define <4 x double> @test_mask_vfmadd256_pd_rz(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
+ ; CHECK-LABEL: test_mask_vfmadd256_pd_rz
+ ; CHECK: vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0x62,0xf2,0xf5,0x28,0xa8,0xc2]
+ %res = call <4 x double> @llvm.x86.fma.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind
+ ret <4 x double> %res
+}
+
+define <4 x double> @test_mask_vfmadd256_pd_rmk(<4 x double> %a0, <4 x double> %a1, <4 x double>* %ptr_a2, i8 %mask) {
+ ; CHECK-LABEL: test_mask_vfmadd256_pd_rmk
+ ; CHECK: vfmadd213pd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xa8,0x07]
+ %a2 = load <4 x double>* %ptr_a2
+ %res = call <4 x double> @llvm.x86.fma.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
+ ret <4 x double> %res
+}
+
+define <4 x double> @test_mask_vfmadd256_pd_rmkz(<4 x double> %a0, <4 x double> %a1, <4 x double>* %ptr_a2) {
+ ; CHECK-LABEL: test_mask_vfmadd256_pd_rmkz
+ ; CHECK: vfmadd213pd (%rdi), %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0x07]
+ %a2 = load <4 x double>* %ptr_a2
+ %res = call <4 x double> @llvm.x86.fma.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind
+ ret <4 x double> %res
+}
diff --git a/test/CodeGen/X86/avx512er-intrinsics.ll b/test/CodeGen/X86/avx512er-intrinsics.ll
index 0000ece..fa4352e 100644
--- a/test/CodeGen/X86/avx512er-intrinsics.ll
+++ b/test/CodeGen/X86/avx512er-intrinsics.ll
@@ -64,16 +64,53 @@ define <8 x double> @test_exp2_pd_512(<8 x double> %a0) {
declare <8 x double> @llvm.x86.avx512.exp2.pd(<8 x double>, <8 x double>, i8, i32) nounwind readnone
define <4 x float> @test_rsqrt28_ss(<4 x float> %a0) {
- ; CHECK: vrsqrt28ss {sae}, {{.*}}encoding: [0x62,0xf2,0x7d,0x18,0xcd,0xc0]
+ ; CHECK: vrsqrt28ss %xmm0, %xmm0, %xmm0 {sae} # encoding: [0x62,0xf2,0x7d,0x18,0xcd,0xc0]
%res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1, i32 8) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
define <4 x float> @test_rcp28_ss(<4 x float> %a0) {
- ; CHECK: vrcp28ss {sae}, {{.*}}encoding: [0x62,0xf2,0x7d,0x18,0xcb,0xc0]
+ ; CHECK: vrcp28ss %xmm0, %xmm0, %xmm0 {sae} # encoding: [0x62,0xf2,0x7d,0x18,0xcb,0xc0]
%res = call <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1, i32 8) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
declare <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
+define <4 x float> @test_rsqrt28_ss_maskz(<4 x float> %a0) {
+ ; CHECK: vrsqrt28ss %xmm0, %xmm0, %xmm0 {%k1} {z}{sae} # encoding: [0x62,0xf2,0x7d,0x99,0xcd,0xc0]
+ %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 7, i32 8) ;
+ ret <4 x float> %res
+}
+
+define <4 x float> @test_rsqrt28_ss_mask(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0) {
+ ; CHECK: vrsqrt28ss %xmm1, %xmm0, %xmm2 {%k1}{sae} # encoding: [0x62,0xf2,0x7d,0x19,0xcd,0xd1]
+ %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0, i8 7, i32 8) ;
+ ret <4 x float> %res
+}
+
+define <2 x double> @test_rsqrt28_sd_maskz(<2 x double> %a0) {
+ ; CHECK: vrsqrt28sd %xmm0, %xmm0, %xmm0 {%k1} {z}{sae} # encoding: [0x62,0xf2,0xfd,0x99,0xcd,0xc0]
+ %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %a0, <2 x double> zeroinitializer, i8 7, i32 8) ;
+ ret <2 x double> %res
+}
+
+declare <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
+
+define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr ) {
+ ; CHECK: vrsqrt28sd (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x07]
+ %mem = load double * %ptr, align 8
+ %mem_v = insertelement <2 x double> undef, double %mem, i32 0
+ %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 7, i32 4) ;
+ ret <2 x double> %res
+}
+
+define <2 x double> @test_rsqrt28_sd_maskz_mem_offset(<2 x double> %a0, double* %ptr ) {
+ ; CHECK: vrsqrt28sd 144(%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x47,0x12]
+ %ptr1 = getelementptr double* %ptr, i32 18
+ %mem = load double * %ptr1, align 8
+ %mem_v = insertelement <2 x double> undef, double %mem, i32 0
+ %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 7, i32 4) ;
+ ret <2 x double> %res
+}
+
diff --git a/test/CodeGen/X86/avx512vl-arith.ll b/test/CodeGen/X86/avx512vl-arith.ll
new file mode 100644
index 0000000..1f7da78
--- /dev/null
+++ b/test/CodeGen/X86/avx512vl-arith.ll
@@ -0,0 +1,794 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl| FileCheck %s
+
+; 256-bit
+
+; CHECK-LABEL: vpaddq256_test
+; CHECK: vpaddq %ymm{{.*}}
+; CHECK: ret
+define <4 x i64> @vpaddq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+ %x = add <4 x i64> %i, %j
+ ret <4 x i64> %x
+}
+
+; CHECK-LABEL: vpaddq256_fold_test
+; CHECK: vpaddq (%rdi), %ymm{{.*}}
+; CHECK: ret
+define <4 x i64> @vpaddq256_fold_test(<4 x i64> %i, <4 x i64>* %j) nounwind {
+ %tmp = load <4 x i64>* %j, align 4
+ %x = add <4 x i64> %i, %tmp
+ ret <4 x i64> %x
+}
+
+; CHECK-LABEL: vpaddq256_broadcast_test
+; CHECK: vpaddq LCP{{.*}}(%rip){1to4}, %ymm{{.*}}
+; CHECK: ret
+define <4 x i64> @vpaddq256_broadcast_test(<4 x i64> %i) nounwind {
+ %x = add <4 x i64> %i, <i64 1, i64 1, i64 1, i64 1>
+ ret <4 x i64> %x
+}
+
+; CHECK-LABEL: vpaddq256_broadcast2_test
+; CHECK: vpaddq (%rdi){1to4}, %ymm{{.*}}
+; CHECK: ret
+define <4 x i64> @vpaddq256_broadcast2_test(<4 x i64> %i, i64* %j.ptr) nounwind {
+ %j = load i64* %j.ptr
+ %j.0 = insertelement <4 x i64> undef, i64 %j, i32 0
+ %j.v = shufflevector <4 x i64> %j.0, <4 x i64> undef, <4 x i32> zeroinitializer
+ %x = add <4 x i64> %i, %j.v
+ ret <4 x i64> %x
+}
+
+; CHECK-LABEL: vpaddd256_test
+; CHECK: vpaddd %ymm{{.*}}
+; CHECK: ret
+define <8 x i32> @vpaddd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+ %x = add <8 x i32> %i, %j
+ ret <8 x i32> %x
+}
+
+; CHECK-LABEL: vpaddd256_fold_test
+; CHECK: vpaddd (%rdi), %ymm{{.*}}
+; CHECK: ret
+define <8 x i32> @vpaddd256_fold_test(<8 x i32> %i, <8 x i32>* %j) nounwind {
+ %tmp = load <8 x i32>* %j, align 4
+ %x = add <8 x i32> %i, %tmp
+ ret <8 x i32> %x
+}
+
+; CHECK-LABEL: vpaddd256_broadcast_test
+; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*}}
+; CHECK: ret
+define <8 x i32> @vpaddd256_broadcast_test(<8 x i32> %i) nounwind {
+ %x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ret <8 x i32> %x
+}
+
+; CHECK-LABEL: vpaddd256_mask_test
+; CHECK: vpaddd %ymm{{.*%k[1-7].*}}
+; CHECK: ret
+define <8 x i32> @vpaddd256_mask_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %x = add <8 x i32> %i, %j
+ %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i
+ ret <8 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd256_maskz_test
+; CHECK: vpaddd %ymm{{.*{%k[1-7]} {z}.*}}
+; CHECK: ret
+define <8 x i32> @vpaddd256_maskz_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %x = add <8 x i32> %i, %j
+ %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer
+ ret <8 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd256_mask_fold_test
+; CHECK: vpaddd (%rdi), %ymm{{.*%k[1-7]}}
+; CHECK: ret
+define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %j = load <8 x i32>* %j.ptr
+ %x = add <8 x i32> %i, %j
+ %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i
+ ret <8 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd256_mask_broadcast_test
+; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*{%k[1-7]}}}
+; CHECK: ret
+define <8 x i32> @vpaddd256_mask_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i
+ ret <8 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd256_maskz_fold_test
+; CHECK: vpaddd (%rdi), %ymm{{.*{%k[1-7]} {z}}}
+; CHECK: ret
+define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %j = load <8 x i32>* %j.ptr
+ %x = add <8 x i32> %i, %j
+ %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer
+ ret <8 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd256_maskz_broadcast_test
+; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*{%k[1-7]} {z}}}
+; CHECK: ret
+define <8 x i32> @vpaddd256_maskz_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer
+ ret <8 x i32> %r
+}
+
+; CHECK-LABEL: vpsubq256_test
+; CHECK: vpsubq %ymm{{.*}}
+; CHECK: ret
+define <4 x i64> @vpsubq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+ %x = sub <4 x i64> %i, %j
+ ret <4 x i64> %x
+}
+
+; CHECK-LABEL: vpsubd256_test
+; CHECK: vpsubd %ymm{{.*}}
+; CHECK: ret
+define <8 x i32> @vpsubd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+ %x = sub <8 x i32> %i, %j
+ ret <8 x i32> %x
+}
+
+; CHECK-LABEL: vpmulld256_test
+; CHECK: vpmulld %ymm{{.*}}
+; CHECK: ret
+define <8 x i32> @vpmulld256_test(<8 x i32> %i, <8 x i32> %j) {
+ %x = mul <8 x i32> %i, %j
+ ret <8 x i32> %x
+}
+
+; CHECK-LABEL: test_vaddpd_256
+; CHECK: vaddpd{{.*}}
+; CHECK: ret
+define <4 x double> @test_vaddpd_256(<4 x double> %y, <4 x double> %x) {
+entry:
+ %add.i = fadd <4 x double> %x, %y
+ ret <4 x double> %add.i
+}
+
+; CHECK-LABEL: test_fold_vaddpd_256
+; CHECK: vaddpd LCP{{.*}}(%rip){{.*}}
+; CHECK: ret
+define <4 x double> @test_fold_vaddpd_256(<4 x double> %y) {
+entry:
+ %add.i = fadd <4 x double> %y, <double 4.500000e+00, double 3.400000e+00, double 4.500000e+00, double 5.600000e+00>
+ ret <4 x double> %add.i
+}
+
+; CHECK-LABEL: test_broadcast_vaddpd_256
+; CHECK: LCP{{.*}}(%rip){1to8}, %ymm0, %ymm0
+; CHECK: ret
+define <8 x float> @test_broadcast_vaddpd_256(<8 x float> %a) nounwind {
+ %b = fadd <8 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
+ ret <8 x float> %b
+}
+
+; CHECK-LABEL: test_mask_vaddps_256
+; CHECK: vaddps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <8 x float> @test_mask_vaddps_256(<8 x float> %dst, <8 x float> %i,
+ <8 x float> %j, <8 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %x = fadd <8 x float> %i, %j
+ %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst
+ ret <8 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vmulps_256
+; CHECK: vmulps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <8 x float> @test_mask_vmulps_256(<8 x float> %dst, <8 x float> %i,
+ <8 x float> %j, <8 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %x = fmul <8 x float> %i, %j
+ %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst
+ ret <8 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vminps_256
+; CHECK: vminps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i,
+ <8 x float> %j, <8 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %cmp_res = fcmp olt <8 x float> %i, %j
+ %min = select <8 x i1> %cmp_res, <8 x float> %i, <8 x float> %j
+ %r = select <8 x i1> %mask, <8 x float> %min, <8 x float> %dst
+ ret <8 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vmaxps_256
+; CHECK: vmaxps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i,
+ <8 x float> %j, <8 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %cmp_res = fcmp ogt <8 x float> %i, %j
+ %max = select <8 x i1> %cmp_res, <8 x float> %i, <8 x float> %j
+ %r = select <8 x i1> %mask, <8 x float> %max, <8 x float> %dst
+ ret <8 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vsubps_256
+; CHECK: vsubps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <8 x float> @test_mask_vsubps_256(<8 x float> %dst, <8 x float> %i,
+ <8 x float> %j, <8 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %x = fsub <8 x float> %i, %j
+ %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst
+ ret <8 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vdivps_256
+; CHECK: vdivps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <8 x float> @test_mask_vdivps_256(<8 x float> %dst, <8 x float> %i,
+ <8 x float> %j, <8 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <8 x i32> %mask1, zeroinitializer
+ %x = fdiv <8 x float> %i, %j
+ %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst
+ ret <8 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vmulpd_256
+; CHECK: vmulpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <4 x double> @test_mask_vmulpd_256(<4 x double> %dst, <4 x double> %i,
+ <4 x double> %j, <4 x i64> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %x = fmul <4 x double> %i, %j
+ %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
+ ret <4 x double> %r
+}
+
+; CHECK-LABEL: test_mask_vminpd_256
+; CHECK: vminpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i,
+ <4 x double> %j, <4 x i64> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %cmp_res = fcmp olt <4 x double> %i, %j
+ %min = select <4 x i1> %cmp_res, <4 x double> %i, <4 x double> %j
+ %r = select <4 x i1> %mask, <4 x double> %min, <4 x double> %dst
+ ret <4 x double> %r
+}
+
+; CHECK-LABEL: test_mask_vmaxpd_256
+; CHECK: vmaxpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i,
+ <4 x double> %j, <4 x i64> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %cmp_res = fcmp ogt <4 x double> %i, %j
+ %max = select <4 x i1> %cmp_res, <4 x double> %i, <4 x double> %j
+ %r = select <4 x i1> %mask, <4 x double> %max, <4 x double> %dst
+ ret <4 x double> %r
+}
+
+; CHECK-LABEL: test_mask_vsubpd_256
+; CHECK: vsubpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <4 x double> @test_mask_vsubpd_256(<4 x double> %dst, <4 x double> %i,
+ <4 x double> %j, <4 x i64> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %x = fsub <4 x double> %i, %j
+ %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
+ ret <4 x double> %r
+}
+
+; CHECK-LABEL: test_mask_vdivpd_256
+; CHECK: vdivpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <4 x double> @test_mask_vdivpd_256(<4 x double> %dst, <4 x double> %i,
+ <4 x double> %j, <4 x i64> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %x = fdiv <4 x double> %i, %j
+ %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
+ ret <4 x double> %r
+}
+
+; CHECK-LABEL: test_mask_vaddpd_256
+; CHECK: vaddpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <4 x double> @test_mask_vaddpd_256(<4 x double> %dst, <4 x double> %i,
+ <4 x double> %j, <4 x i64> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %x = fadd <4 x double> %i, %j
+ %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
+ ret <4 x double> %r
+}
+
+; CHECK-LABEL: test_maskz_vaddpd_256
+; CHECK: vaddpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]} {z}}}
+; CHECK: ret
+define <4 x double> @test_maskz_vaddpd_256(<4 x double> %i, <4 x double> %j,
+ <4 x i64> %mask1) nounwind readnone {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %x = fadd <4 x double> %i, %j
+ %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer
+ ret <4 x double> %r
+}
+
+; CHECK-LABEL: test_mask_fold_vaddpd_256
+; CHECK: vaddpd (%rdi), {{.*%ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}.*}}
+; CHECK: ret
+define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %i,
+ <4 x double>* %j, <4 x i64> %mask1)
+ nounwind {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %tmp = load <4 x double>* %j
+ %x = fadd <4 x double> %i, %tmp
+ %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
+ ret <4 x double> %r
+}
+
+; CHECK-LABEL: test_maskz_fold_vaddpd_256
+; CHECK: vaddpd (%rdi), {{.*%ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]} {z}.*}}
+; CHECK: ret
+define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j,
+ <4 x i64> %mask1) nounwind {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %tmp = load <4 x double>* %j
+ %x = fadd <4 x double> %i, %tmp
+ %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer
+ ret <4 x double> %r
+}
+
+; CHECK-LABEL: test_broadcast2_vaddpd_256
+; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*}}
+; CHECK: ret
+define <4 x double> @test_broadcast2_vaddpd_256(<4 x double> %i, double* %j) nounwind {
+ %tmp = load double* %j
+ %b = insertelement <4 x double> undef, double %tmp, i32 0
+ %c = shufflevector <4 x double> %b, <4 x double> undef,
+ <4 x i32> zeroinitializer
+ %x = fadd <4 x double> %c, %i
+ ret <4 x double> %x
+}
+
+; CHECK-LABEL: test_mask_broadcast_vaddpd_256
+; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*{%k[1-7]}.*}}
+; CHECK: ret
+define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i,
+ double* %j, <4 x i64> %mask1) nounwind {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %tmp = load double* %j
+ %b = insertelement <4 x double> undef, double %tmp, i32 0
+ %c = shufflevector <4 x double> %b, <4 x double> undef,
+ <4 x i32> zeroinitializer
+ %x = fadd <4 x double> %c, %i
+ %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %i
+ ret <4 x double> %r
+}
+
+; CHECK-LABEL: test_maskz_broadcast_vaddpd_256
+; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*{%k[1-7]} {z}.*}}
+; CHECK: ret
+define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j,
+ <4 x i64> %mask1) nounwind {
+ %mask = icmp ne <4 x i64> %mask1, zeroinitializer
+ %tmp = load double* %j
+ %b = insertelement <4 x double> undef, double %tmp, i32 0
+ %c = shufflevector <4 x double> %b, <4 x double> undef,
+ <4 x i32> zeroinitializer
+ %x = fadd <4 x double> %c, %i
+ %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer
+ ret <4 x double> %r
+}
+
+; 128-bit
+
+; CHECK-LABEL: vpaddq128_test
+; CHECK: vpaddq %xmm{{.*}}
+; CHECK: ret
+define <2 x i64> @vpaddq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone {
+ %x = add <2 x i64> %i, %j
+ ret <2 x i64> %x
+}
+
+; CHECK-LABEL: vpaddq128_fold_test
+; CHECK: vpaddq (%rdi), %xmm{{.*}}
+; CHECK: ret
+define <2 x i64> @vpaddq128_fold_test(<2 x i64> %i, <2 x i64>* %j) nounwind {
+ %tmp = load <2 x i64>* %j, align 4
+ %x = add <2 x i64> %i, %tmp
+ ret <2 x i64> %x
+}
+
+; CHECK-LABEL: vpaddq128_broadcast2_test
+; CHECK: vpaddq (%rdi){1to2}, %xmm{{.*}}
+; CHECK: ret
+define <2 x i64> @vpaddq128_broadcast2_test(<2 x i64> %i, i64* %j) nounwind {
+ %tmp = load i64* %j
+ %j.0 = insertelement <2 x i64> undef, i64 %tmp, i32 0
+ %j.1 = insertelement <2 x i64> %j.0, i64 %tmp, i32 1
+ %x = add <2 x i64> %i, %j.1
+ ret <2 x i64> %x
+}
+
+; CHECK-LABEL: vpaddd128_test
+; CHECK: vpaddd %xmm{{.*}}
+; CHECK: ret
+define <4 x i32> @vpaddd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone {
+ %x = add <4 x i32> %i, %j
+ ret <4 x i32> %x
+}
+
+; CHECK-LABEL: vpaddd128_fold_test
+; CHECK: vpaddd (%rdi), %xmm{{.*}}
+; CHECK: ret
+define <4 x i32> @vpaddd128_fold_test(<4 x i32> %i, <4 x i32>* %j) nounwind {
+ %tmp = load <4 x i32>* %j, align 4
+ %x = add <4 x i32> %i, %tmp
+ ret <4 x i32> %x
+}
+
+; CHECK-LABEL: vpaddd128_broadcast_test
+; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*}}
+; CHECK: ret
+define <4 x i32> @vpaddd128_broadcast_test(<4 x i32> %i) nounwind {
+ %x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %x
+}
+
+; CHECK-LABEL: vpaddd128_mask_test
+; CHECK: vpaddd %xmm{{.*%k[1-7].*}}
+; CHECK: ret
+define <4 x i32> @vpaddd128_mask_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %x = add <4 x i32> %i, %j
+ %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i
+ ret <4 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd128_maskz_test
+; CHECK: vpaddd %xmm{{.*{%k[1-7]} {z}.*}}
+; CHECK: ret
+define <4 x i32> @vpaddd128_maskz_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %x = add <4 x i32> %i, %j
+ %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer
+ ret <4 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd128_mask_fold_test
+; CHECK: vpaddd (%rdi), %xmm{{.*%k[1-7]}}
+; CHECK: ret
+define <4 x i32> @vpaddd128_mask_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %j = load <4 x i32>* %j.ptr
+ %x = add <4 x i32> %i, %j
+ %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i
+ ret <4 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd128_mask_broadcast_test
+; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*{%k[1-7]}}}
+; CHECK: ret
+define <4 x i32> @vpaddd128_mask_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1>
+ %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i
+ ret <4 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd128_maskz_fold_test
+; CHECK: vpaddd (%rdi), %xmm{{.*{%k[1-7]} {z}}}
+; CHECK: ret
+define <4 x i32> @vpaddd128_maskz_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %j = load <4 x i32>* %j.ptr
+ %x = add <4 x i32> %i, %j
+ %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer
+ ret <4 x i32> %r
+}
+
+; CHECK-LABEL: vpaddd128_maskz_broadcast_test
+; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*{%k[1-7]} {z}}}
+; CHECK: ret
+define <4 x i32> @vpaddd128_maskz_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1>
+ %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer
+ ret <4 x i32> %r
+}
+
+; CHECK-LABEL: vpsubq128_test
+; CHECK: vpsubq %xmm{{.*}}
+; CHECK: ret
+define <2 x i64> @vpsubq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone {
+ %x = sub <2 x i64> %i, %j
+ ret <2 x i64> %x
+}
+
+; CHECK-LABEL: vpsubd128_test
+; CHECK: vpsubd %xmm{{.*}}
+; CHECK: ret
+define <4 x i32> @vpsubd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone {
+ %x = sub <4 x i32> %i, %j
+ ret <4 x i32> %x
+}
+
+; CHECK-LABEL: vpmulld128_test
+; CHECK: vpmulld %xmm{{.*}}
+; CHECK: ret
+define <4 x i32> @vpmulld128_test(<4 x i32> %i, <4 x i32> %j) {
+ %x = mul <4 x i32> %i, %j
+ ret <4 x i32> %x
+}
+
+; CHECK-LABEL: test_vaddpd_128
+; CHECK: vaddpd{{.*}}
+; CHECK: ret
+define <2 x double> @test_vaddpd_128(<2 x double> %y, <2 x double> %x) {
+entry:
+ %add.i = fadd <2 x double> %x, %y
+ ret <2 x double> %add.i
+}
+
+; CHECK-LABEL: test_fold_vaddpd_128
+; CHECK: vaddpd LCP{{.*}}(%rip){{.*}}
+; CHECK: ret
+define <2 x double> @test_fold_vaddpd_128(<2 x double> %y) {
+entry:
+ %add.i = fadd <2 x double> %y, <double 4.500000e+00, double 3.400000e+00>
+ ret <2 x double> %add.i
+}
+
+; CHECK-LABEL: test_broadcast_vaddpd_128
+; CHECK: LCP{{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK: ret
+define <4 x float> @test_broadcast_vaddpd_128(<4 x float> %a) nounwind {
+ %b = fadd <4 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
+ ret <4 x float> %b
+}
+
+; CHECK-LABEL: test_mask_vaddps_128
+; CHECK: vaddps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <4 x float> @test_mask_vaddps_128(<4 x float> %dst, <4 x float> %i,
+ <4 x float> %j, <4 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %x = fadd <4 x float> %i, %j
+ %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst
+ ret <4 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vmulps_128
+; CHECK: vmulps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <4 x float> @test_mask_vmulps_128(<4 x float> %dst, <4 x float> %i,
+ <4 x float> %j, <4 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %x = fmul <4 x float> %i, %j
+ %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst
+ ret <4 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vminps_128
+; CHECK: vminps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i,
+ <4 x float> %j, <4 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %cmp_res = fcmp olt <4 x float> %i, %j
+ %min = select <4 x i1> %cmp_res, <4 x float> %i, <4 x float> %j
+ %r = select <4 x i1> %mask, <4 x float> %min, <4 x float> %dst
+ ret <4 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vmaxps_128
+; CHECK: vmaxps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i,
+ <4 x float> %j, <4 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %cmp_res = fcmp ogt <4 x float> %i, %j
+ %max = select <4 x i1> %cmp_res, <4 x float> %i, <4 x float> %j
+ %r = select <4 x i1> %mask, <4 x float> %max, <4 x float> %dst
+ ret <4 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vsubps_128
+; CHECK: vsubps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i,
+ <4 x float> %j, <4 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %x = fsub <4 x float> %i, %j
+ %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst
+ ret <4 x float> %r
+}
+
+
+; CHECK-LABEL: test_mask_vdivps_128
+; CHECK: vdivps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <4 x float> @test_mask_vdivps_128(<4 x float> %dst, <4 x float> %i,
+ <4 x float> %j, <4 x i32> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <4 x i32> %mask1, zeroinitializer
+ %x = fdiv <4 x float> %i, %j
+ %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst
+ ret <4 x float> %r
+}
+
+; CHECK-LABEL: test_mask_vmulpd_128
+; CHECK: vmulpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <2 x double> @test_mask_vmulpd_128(<2 x double> %dst, <2 x double> %i,
+ <2 x double> %j, <2 x i64> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %x = fmul <2 x double> %i, %j
+ %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
+ ret <2 x double> %r
+}
+
+; CHECK-LABEL: test_mask_vminpd_128
+; CHECK: vminpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i,
+ <2 x double> %j, <2 x i64> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %cmp_res = fcmp olt <2 x double> %i, %j
+ %min = select <2 x i1> %cmp_res, <2 x double> %i, <2 x double> %j
+ %r = select <2 x i1> %mask, <2 x double> %min, <2 x double> %dst
+ ret <2 x double> %r
+}
+
+; CHECK-LABEL: test_mask_vmaxpd_128
+; CHECK: vmaxpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i,
+ <2 x double> %j, <2 x i64> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %cmp_res = fcmp ogt <2 x double> %i, %j
+ %max = select <2 x i1> %cmp_res, <2 x double> %i, <2 x double> %j
+ %r = select <2 x i1> %mask, <2 x double> %max, <2 x double> %dst
+ ret <2 x double> %r
+}
+
+; CHECK-LABEL: test_mask_vsubpd_128
+; CHECK: vsubpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <2 x double> @test_mask_vsubpd_128(<2 x double> %dst, <2 x double> %i,
+ <2 x double> %j, <2 x i64> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %x = fsub <2 x double> %i, %j
+ %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
+ ret <2 x double> %r
+}
+
+; CHECK-LABEL: test_mask_vdivpd_128
+; CHECK: vdivpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <2 x double> @test_mask_vdivpd_128(<2 x double> %dst, <2 x double> %i,
+ <2 x double> %j, <2 x i64> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %x = fdiv <2 x double> %i, %j
+ %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
+ ret <2 x double> %r
+}
+
+; CHECK-LABEL: test_mask_vaddpd_128
+; CHECK: vaddpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
+; CHECK: ret
+define <2 x double> @test_mask_vaddpd_128(<2 x double> %dst, <2 x double> %i,
+ <2 x double> %j, <2 x i64> %mask1)
+ nounwind readnone {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %x = fadd <2 x double> %i, %j
+ %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
+ ret <2 x double> %r
+}
+
+; CHECK-LABEL: test_maskz_vaddpd_128
+; CHECK: vaddpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]} {z}}}
+; CHECK: ret
+define <2 x double> @test_maskz_vaddpd_128(<2 x double> %i, <2 x double> %j,
+ <2 x i64> %mask1) nounwind readnone {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %x = fadd <2 x double> %i, %j
+ %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> zeroinitializer
+ ret <2 x double> %r
+}
+
+; CHECK-LABEL: test_mask_fold_vaddpd_128
+; CHECK: vaddpd (%rdi), {{.*%xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}.*}}
+; CHECK: ret
+define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %i,
+ <2 x double>* %j, <2 x i64> %mask1)
+ nounwind {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %tmp = load <2 x double>* %j
+ %x = fadd <2 x double> %i, %tmp
+ %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
+ ret <2 x double> %r
+}
+
+; CHECK-LABEL: test_maskz_fold_vaddpd_128
+; CHECK: vaddpd (%rdi), {{.*%xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]} {z}.*}}
+; CHECK: ret
+define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %j,
+ <2 x i64> %mask1) nounwind {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %tmp = load <2 x double>* %j
+ %x = fadd <2 x double> %i, %tmp
+ %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> zeroinitializer
+ ret <2 x double> %r
+}
+
+; CHECK-LABEL: test_broadcast2_vaddpd_128
+; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*}}
+; CHECK: ret
+define <2 x double> @test_broadcast2_vaddpd_128(<2 x double> %i, double* %j) nounwind {
+ %tmp = load double* %j
+ %j.0 = insertelement <2 x double> undef, double %tmp, i64 0
+ %j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1
+ %x = fadd <2 x double> %j.1, %i
+ ret <2 x double> %x
+}
+
+; CHECK-LABEL: test_mask_broadcast_vaddpd_128
+; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*{%k[1-7]}.*}}
+; CHECK: ret
+define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x double> %i,
+ double* %j, <2 x i64> %mask1)
+ nounwind {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %tmp = load double* %j
+ %j.0 = insertelement <2 x double> undef, double %tmp, i64 0
+ %j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1
+ %x = fadd <2 x double> %j.1, %i
+ %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %i
+ ret <2 x double> %r
+}
+
+; CHECK-LABEL: test_maskz_broadcast_vaddpd_128
+; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*{%k[1-7]} {z}.*}}
+; CHECK: ret
+define <2 x double> @test_maskz_broadcast_vaddpd_128(<2 x double> %i, double* %j,
+ <2 x i64> %mask1) nounwind {
+ %mask = icmp ne <2 x i64> %mask1, zeroinitializer
+ %tmp = load double* %j
+ %j.0 = insertelement <2 x double> undef, double %tmp, i64 0
+ %j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1
+ %x = fadd <2 x double> %j.1, %i
+ %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> zeroinitializer
+ ret <2 x double> %r
+}
diff --git a/test/CodeGen/X86/avx512vl-intrinsics.ll b/test/CodeGen/X86/avx512vl-intrinsics.ll
index fa19084..fe347bd 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics.ll
@@ -67,244 +67,244 @@ define i8 @test_mask_pcmpgt_q_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
declare i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64>, <4 x i64>, i8)
define <8 x i8> @test_cmp_d_256(<8 x i32> %a0, <8 x i32> %a1) {
-; CHECK_LABEL: test_cmp_d_256
+; CHECK-LABEL: test_cmp_d_256
; CHECK: vpcmpeqd %ymm1, %ymm0, %k0 ##
- %res0 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 0, i8 -1)
+ %res0 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 0, i8 -1)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltd %ymm1, %ymm0, %k0 ##
- %res1 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 1, i8 -1)
+ %res1 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 1, i8 -1)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpled %ymm1, %ymm0, %k0 ##
- %res2 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 2, i8 -1)
+ %res2 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 2, i8 -1)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordd %ymm1, %ymm0, %k0 ##
- %res3 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 3, i8 -1)
+ %res3 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 3, i8 -1)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpneqd %ymm1, %ymm0, %k0 ##
- %res4 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 4, i8 -1)
+ %res4 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 4, i8 -1)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltd %ymm1, %ymm0, %k0 ##
- %res5 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 5, i8 -1)
+ %res5 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 5, i8 -1)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnled %ymm1, %ymm0, %k0 ##
- %res6 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 6, i8 -1)
+ %res6 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 6, i8 -1)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordd %ymm1, %ymm0, %k0 ##
- %res7 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 7, i8 -1)
+ %res7 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 7, i8 -1)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
define <8 x i8> @test_mask_cmp_d_256(<8 x i32> %a0, <8 x i32> %a1, i8 %mask) {
-; CHECK_LABEL: test_mask_cmp_d_256
+; CHECK-LABEL: test_mask_cmp_d_256
; CHECK: vpcmpeqd %ymm1, %ymm0, %k0 {%k1} ##
- %res0 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 0, i8 %mask)
+ %res0 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 0, i8 %mask)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltd %ymm1, %ymm0, %k0 {%k1} ##
- %res1 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 1, i8 %mask)
+ %res1 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 1, i8 %mask)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpled %ymm1, %ymm0, %k0 {%k1} ##
- %res2 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 2, i8 %mask)
+ %res2 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 2, i8 %mask)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordd %ymm1, %ymm0, %k0 {%k1} ##
- %res3 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 3, i8 %mask)
+ %res3 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 3, i8 %mask)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpneqd %ymm1, %ymm0, %k0 {%k1} ##
- %res4 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 4, i8 %mask)
+ %res4 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 4, i8 %mask)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltd %ymm1, %ymm0, %k0 {%k1} ##
- %res5 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 5, i8 %mask)
+ %res5 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 5, i8 %mask)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnled %ymm1, %ymm0, %k0 {%k1} ##
- %res6 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 6, i8 %mask)
+ %res6 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 6, i8 %mask)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordd %ymm1, %ymm0, %k0 {%k1} ##
- %res7 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 7, i8 %mask)
+ %res7 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 7, i8 %mask)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
-declare i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32>, <8 x i32>, i32, i8) nounwind readnone
+declare i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32>, <8 x i32>, i8, i8) nounwind readnone
define <8 x i8> @test_ucmp_d_256(<8 x i32> %a0, <8 x i32> %a1) {
-; CHECK_LABEL: test_ucmp_d_256
+; CHECK-LABEL: test_ucmp_d_256
; CHECK: vpcmpequd %ymm1, %ymm0, %k0 ##
- %res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 0, i8 -1)
+ %res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 0, i8 -1)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltud %ymm1, %ymm0, %k0 ##
- %res1 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 1, i8 -1)
+ %res1 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 1, i8 -1)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleud %ymm1, %ymm0, %k0 ##
- %res2 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 2, i8 -1)
+ %res2 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 2, i8 -1)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordud %ymm1, %ymm0, %k0 ##
- %res3 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 3, i8 -1)
+ %res3 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 3, i8 -1)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpnequd %ymm1, %ymm0, %k0 ##
- %res4 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 4, i8 -1)
+ %res4 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 4, i8 -1)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltud %ymm1, %ymm0, %k0 ##
- %res5 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 5, i8 -1)
+ %res5 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 5, i8 -1)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleud %ymm1, %ymm0, %k0 ##
- %res6 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 6, i8 -1)
+ %res6 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 6, i8 -1)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordud %ymm1, %ymm0, %k0 ##
- %res7 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 7, i8 -1)
+ %res7 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 7, i8 -1)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
define <8 x i8> @test_mask_ucmp_d_256(<8 x i32> %a0, <8 x i32> %a1, i8 %mask) {
-; CHECK_LABEL: test_mask_ucmp_d_256
+; CHECK-LABEL: test_mask_ucmp_d_256
; CHECK: vpcmpequd %ymm1, %ymm0, %k0 {%k1} ##
- %res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 0, i8 %mask)
+ %res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 0, i8 %mask)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltud %ymm1, %ymm0, %k0 {%k1} ##
- %res1 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 1, i8 %mask)
+ %res1 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 1, i8 %mask)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleud %ymm1, %ymm0, %k0 {%k1} ##
- %res2 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 2, i8 %mask)
+ %res2 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 2, i8 %mask)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordud %ymm1, %ymm0, %k0 {%k1} ##
- %res3 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 3, i8 %mask)
+ %res3 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 3, i8 %mask)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpnequd %ymm1, %ymm0, %k0 {%k1} ##
- %res4 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 4, i8 %mask)
+ %res4 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 4, i8 %mask)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltud %ymm1, %ymm0, %k0 {%k1} ##
- %res5 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 5, i8 %mask)
+ %res5 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 5, i8 %mask)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleud %ymm1, %ymm0, %k0 {%k1} ##
- %res6 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 6, i8 %mask)
+ %res6 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 6, i8 %mask)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordud %ymm1, %ymm0, %k0 {%k1} ##
- %res7 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 7, i8 %mask)
+ %res7 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i8 7, i8 %mask)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
-declare i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32>, <8 x i32>, i32, i8) nounwind readnone
+declare i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32>, <8 x i32>, i8, i8) nounwind readnone
define <8 x i8> @test_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
-; CHECK_LABEL: test_cmp_q_256
+; CHECK-LABEL: test_cmp_q_256
; CHECK: vpcmpeqq %ymm1, %ymm0, %k0 ##
- %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 0, i8 -1)
+ %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 0, i8 -1)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltq %ymm1, %ymm0, %k0 ##
- %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 1, i8 -1)
+ %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 1, i8 -1)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleq %ymm1, %ymm0, %k0 ##
- %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 2, i8 -1)
+ %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 2, i8 -1)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordq %ymm1, %ymm0, %k0 ##
- %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 3, i8 -1)
+ %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 3, i8 -1)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpneqq %ymm1, %ymm0, %k0 ##
- %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 4, i8 -1)
+ %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 4, i8 -1)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltq %ymm1, %ymm0, %k0 ##
- %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 5, i8 -1)
+ %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 5, i8 -1)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleq %ymm1, %ymm0, %k0 ##
- %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 6, i8 -1)
+ %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 6, i8 -1)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordq %ymm1, %ymm0, %k0 ##
- %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 7, i8 -1)
+ %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 7, i8 -1)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
define <8 x i8> @test_mask_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1, i8 %mask) {
-; CHECK_LABEL: test_mask_cmp_q_256
+; CHECK-LABEL: test_mask_cmp_q_256
; CHECK: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ##
- %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 0, i8 %mask)
+ %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 0, i8 %mask)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltq %ymm1, %ymm0, %k0 {%k1} ##
- %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 1, i8 %mask)
+ %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 1, i8 %mask)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleq %ymm1, %ymm0, %k0 {%k1} ##
- %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 2, i8 %mask)
+ %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 2, i8 %mask)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordq %ymm1, %ymm0, %k0 {%k1} ##
- %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 3, i8 %mask)
+ %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 3, i8 %mask)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpneqq %ymm1, %ymm0, %k0 {%k1} ##
- %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 4, i8 %mask)
+ %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 4, i8 %mask)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltq %ymm1, %ymm0, %k0 {%k1} ##
- %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 5, i8 %mask)
+ %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 5, i8 %mask)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleq %ymm1, %ymm0, %k0 {%k1} ##
- %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 6, i8 %mask)
+ %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 6, i8 %mask)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordq %ymm1, %ymm0, %k0 {%k1} ##
- %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 7, i8 %mask)
+ %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 7, i8 %mask)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
-declare i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64>, <4 x i64>, i32, i8) nounwind readnone
+declare i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64>, <4 x i64>, i8, i8) nounwind readnone
define <8 x i8> @test_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
-; CHECK_LABEL: test_ucmp_q_256
+; CHECK-LABEL: test_ucmp_q_256
; CHECK: vpcmpequq %ymm1, %ymm0, %k0 ##
- %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 0, i8 -1)
+ %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 0, i8 -1)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltuq %ymm1, %ymm0, %k0 ##
- %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 1, i8 -1)
+ %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 1, i8 -1)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleuq %ymm1, %ymm0, %k0 ##
- %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 2, i8 -1)
+ %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 2, i8 -1)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunorduq %ymm1, %ymm0, %k0 ##
- %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 3, i8 -1)
+ %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 3, i8 -1)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpnequq %ymm1, %ymm0, %k0 ##
- %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 4, i8 -1)
+ %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 4, i8 -1)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltuq %ymm1, %ymm0, %k0 ##
- %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 5, i8 -1)
+ %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 5, i8 -1)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleuq %ymm1, %ymm0, %k0 ##
- %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 6, i8 -1)
+ %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 6, i8 -1)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmporduq %ymm1, %ymm0, %k0 ##
- %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 7, i8 -1)
+ %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 7, i8 -1)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
define <8 x i8> @test_mask_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1, i8 %mask) {
-; CHECK_LABEL: test_mask_ucmp_q_256
+; CHECK-LABEL: test_mask_ucmp_q_256
; CHECK: vpcmpequq %ymm1, %ymm0, %k0 {%k1} ##
- %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 0, i8 %mask)
+ %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 0, i8 %mask)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltuq %ymm1, %ymm0, %k0 {%k1} ##
- %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 1, i8 %mask)
+ %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 1, i8 %mask)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleuq %ymm1, %ymm0, %k0 {%k1} ##
- %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 2, i8 %mask)
+ %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 2, i8 %mask)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunorduq %ymm1, %ymm0, %k0 {%k1} ##
- %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 3, i8 %mask)
+ %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 3, i8 %mask)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpnequq %ymm1, %ymm0, %k0 {%k1} ##
- %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 4, i8 %mask)
+ %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 4, i8 %mask)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltuq %ymm1, %ymm0, %k0 {%k1} ##
- %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 5, i8 %mask)
+ %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 5, i8 %mask)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleuq %ymm1, %ymm0, %k0 {%k1} ##
- %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 6, i8 %mask)
+ %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 6, i8 %mask)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmporduq %ymm1, %ymm0, %k0 {%k1} ##
- %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 7, i8 %mask)
+ %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i8 7, i8 %mask)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
-declare i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64>, <4 x i64>, i32, i8) nounwind readnone
+declare i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64>, <4 x i64>, i8, i8) nounwind readnone
; 128-bit
@@ -373,241 +373,492 @@ define i8 @test_mask_pcmpgt_q_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
declare i8 @llvm.x86.avx512.mask.pcmpgt.q.128(<2 x i64>, <2 x i64>, i8)
define <8 x i8> @test_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
-; CHECK_LABEL: test_cmp_d_128
+; CHECK-LABEL: test_cmp_d_128
; CHECK: vpcmpeqd %xmm1, %xmm0, %k0 ##
- %res0 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 0, i8 -1)
+ %res0 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 0, i8 -1)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltd %xmm1, %xmm0, %k0 ##
- %res1 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 1, i8 -1)
+ %res1 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 1, i8 -1)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpled %xmm1, %xmm0, %k0 ##
- %res2 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 2, i8 -1)
+ %res2 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 2, i8 -1)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordd %xmm1, %xmm0, %k0 ##
- %res3 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 3, i8 -1)
+ %res3 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 3, i8 -1)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpneqd %xmm1, %xmm0, %k0 ##
- %res4 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 4, i8 -1)
+ %res4 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 4, i8 -1)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltd %xmm1, %xmm0, %k0 ##
- %res5 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 5, i8 -1)
+ %res5 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 5, i8 -1)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnled %xmm1, %xmm0, %k0 ##
- %res6 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 6, i8 -1)
+ %res6 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 6, i8 -1)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordd %xmm1, %xmm0, %k0 ##
- %res7 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 7, i8 -1)
+ %res7 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 7, i8 -1)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
define <8 x i8> @test_mask_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1, i8 %mask) {
-; CHECK_LABEL: test_mask_cmp_d_128
+; CHECK-LABEL: test_mask_cmp_d_128
; CHECK: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ##
- %res0 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 0, i8 %mask)
+ %res0 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 0, i8 %mask)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltd %xmm1, %xmm0, %k0 {%k1} ##
- %res1 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 1, i8 %mask)
+ %res1 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 1, i8 %mask)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpled %xmm1, %xmm0, %k0 {%k1} ##
- %res2 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 2, i8 %mask)
+ %res2 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 2, i8 %mask)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordd %xmm1, %xmm0, %k0 {%k1} ##
- %res3 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 3, i8 %mask)
+ %res3 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 3, i8 %mask)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpneqd %xmm1, %xmm0, %k0 {%k1} ##
- %res4 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 4, i8 %mask)
+ %res4 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 4, i8 %mask)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltd %xmm1, %xmm0, %k0 {%k1} ##
- %res5 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 5, i8 %mask)
+ %res5 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 5, i8 %mask)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnled %xmm1, %xmm0, %k0 {%k1} ##
- %res6 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 6, i8 %mask)
+ %res6 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 6, i8 %mask)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordd %xmm1, %xmm0, %k0 {%k1} ##
- %res7 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 7, i8 %mask)
+ %res7 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 7, i8 %mask)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
-declare i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32>, <4 x i32>, i32, i8) nounwind readnone
+declare i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32>, <4 x i32>, i8, i8) nounwind readnone
define <8 x i8> @test_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
-; CHECK_LABEL: test_ucmp_d_128
+; CHECK-LABEL: test_ucmp_d_128
; CHECK: vpcmpequd %xmm1, %xmm0, %k0 ##
- %res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 0, i8 -1)
+ %res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 0, i8 -1)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltud %xmm1, %xmm0, %k0 ##
- %res1 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 1, i8 -1)
+ %res1 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 1, i8 -1)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleud %xmm1, %xmm0, %k0 ##
- %res2 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 2, i8 -1)
+ %res2 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 2, i8 -1)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordud %xmm1, %xmm0, %k0 ##
- %res3 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 3, i8 -1)
+ %res3 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 3, i8 -1)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpnequd %xmm1, %xmm0, %k0 ##
- %res4 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 4, i8 -1)
+ %res4 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 4, i8 -1)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltud %xmm1, %xmm0, %k0 ##
- %res5 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 5, i8 -1)
+ %res5 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 5, i8 -1)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleud %xmm1, %xmm0, %k0 ##
- %res6 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 6, i8 -1)
+ %res6 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 6, i8 -1)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordud %xmm1, %xmm0, %k0 ##
- %res7 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 7, i8 -1)
+ %res7 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 7, i8 -1)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
define <8 x i8> @test_mask_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1, i8 %mask) {
-; CHECK_LABEL: test_mask_ucmp_d_128
+; CHECK-LABEL: test_mask_ucmp_d_128
; CHECK: vpcmpequd %xmm1, %xmm0, %k0 {%k1} ##
- %res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 0, i8 %mask)
+ %res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 0, i8 %mask)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltud %xmm1, %xmm0, %k0 {%k1} ##
- %res1 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 1, i8 %mask)
+ %res1 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 1, i8 %mask)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleud %xmm1, %xmm0, %k0 {%k1} ##
- %res2 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 2, i8 %mask)
+ %res2 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 2, i8 %mask)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordud %xmm1, %xmm0, %k0 {%k1} ##
- %res3 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 3, i8 %mask)
+ %res3 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 3, i8 %mask)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpnequd %xmm1, %xmm0, %k0 {%k1} ##
- %res4 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 4, i8 %mask)
+ %res4 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 4, i8 %mask)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltud %xmm1, %xmm0, %k0 {%k1} ##
- %res5 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 5, i8 %mask)
+ %res5 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 5, i8 %mask)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleud %xmm1, %xmm0, %k0 {%k1} ##
- %res6 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 6, i8 %mask)
+ %res6 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 6, i8 %mask)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordud %xmm1, %xmm0, %k0 {%k1} ##
- %res7 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 7, i8 %mask)
+ %res7 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i8 7, i8 %mask)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
-declare i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32>, <4 x i32>, i32, i8) nounwind readnone
+declare i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32>, <4 x i32>, i8, i8) nounwind readnone
define <8 x i8> @test_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
-; CHECK_LABEL: test_cmp_q_128
+; CHECK-LABEL: test_cmp_q_128
; CHECK: vpcmpeqq %xmm1, %xmm0, %k0 ##
- %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 0, i8 -1)
+ %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 0, i8 -1)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltq %xmm1, %xmm0, %k0 ##
- %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 1, i8 -1)
+ %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 1, i8 -1)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleq %xmm1, %xmm0, %k0 ##
- %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 2, i8 -1)
+ %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 2, i8 -1)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordq %xmm1, %xmm0, %k0 ##
- %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 3, i8 -1)
+ %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 3, i8 -1)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpneqq %xmm1, %xmm0, %k0 ##
- %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 4, i8 -1)
+ %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 4, i8 -1)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltq %xmm1, %xmm0, %k0 ##
- %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 5, i8 -1)
+ %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 5, i8 -1)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleq %xmm1, %xmm0, %k0 ##
- %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 6, i8 -1)
+ %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 6, i8 -1)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordq %xmm1, %xmm0, %k0 ##
- %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 7, i8 -1)
+ %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 7, i8 -1)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
define <8 x i8> @test_mask_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
-; CHECK_LABEL: test_mask_cmp_q_128
+; CHECK-LABEL: test_mask_cmp_q_128
; CHECK: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ##
- %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 0, i8 %mask)
+ %res0 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 0, i8 %mask)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltq %xmm1, %xmm0, %k0 {%k1} ##
- %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 1, i8 %mask)
+ %res1 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 1, i8 %mask)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleq %xmm1, %xmm0, %k0 {%k1} ##
- %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 2, i8 %mask)
+ %res2 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 2, i8 %mask)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunordq %xmm1, %xmm0, %k0 {%k1} ##
- %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 3, i8 %mask)
+ %res3 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 3, i8 %mask)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpneqq %xmm1, %xmm0, %k0 {%k1} ##
- %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 4, i8 %mask)
+ %res4 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 4, i8 %mask)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltq %xmm1, %xmm0, %k0 {%k1} ##
- %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 5, i8 %mask)
+ %res5 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 5, i8 %mask)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleq %xmm1, %xmm0, %k0 {%k1} ##
- %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 6, i8 %mask)
+ %res6 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 6, i8 %mask)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmpordq %xmm1, %xmm0, %k0 {%k1} ##
- %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 7, i8 %mask)
+ %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 7, i8 %mask)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
-declare i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64>, <2 x i64>, i32, i8) nounwind readnone
+declare i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64>, <2 x i64>, i8, i8) nounwind readnone
define <8 x i8> @test_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
-; CHECK_LABEL: test_ucmp_q_128
+; CHECK-LABEL: test_ucmp_q_128
; CHECK: vpcmpequq %xmm1, %xmm0, %k0 ##
- %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 0, i8 -1)
+ %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 0, i8 -1)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltuq %xmm1, %xmm0, %k0 ##
- %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 1, i8 -1)
+ %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 1, i8 -1)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleuq %xmm1, %xmm0, %k0 ##
- %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 2, i8 -1)
+ %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 2, i8 -1)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunorduq %xmm1, %xmm0, %k0 ##
- %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 3, i8 -1)
+ %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 3, i8 -1)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpnequq %xmm1, %xmm0, %k0 ##
- %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 4, i8 -1)
+ %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 4, i8 -1)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltuq %xmm1, %xmm0, %k0 ##
- %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 5, i8 -1)
+ %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 5, i8 -1)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleuq %xmm1, %xmm0, %k0 ##
- %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 6, i8 -1)
+ %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 6, i8 -1)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmporduq %xmm1, %xmm0, %k0 ##
- %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 7, i8 -1)
+ %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 7, i8 -1)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
define <8 x i8> @test_mask_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
-; CHECK_LABEL: test_mask_ucmp_q_128
+; CHECK-LABEL: test_mask_ucmp_q_128
; CHECK: vpcmpequq %xmm1, %xmm0, %k0 {%k1} ##
- %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 0, i8 %mask)
+ %res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 0, i8 %mask)
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
; CHECK: vpcmpltuq %xmm1, %xmm0, %k0 {%k1} ##
- %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 1, i8 %mask)
+ %res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 1, i8 %mask)
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
; CHECK: vpcmpleuq %xmm1, %xmm0, %k0 {%k1} ##
- %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 2, i8 %mask)
+ %res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 2, i8 %mask)
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
; CHECK: vpcmpunorduq %xmm1, %xmm0, %k0 {%k1} ##
- %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 3, i8 %mask)
+ %res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 3, i8 %mask)
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
; CHECK: vpcmpnequq %xmm1, %xmm0, %k0 {%k1} ##
- %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 4, i8 %mask)
+ %res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 4, i8 %mask)
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
; CHECK: vpcmpnltuq %xmm1, %xmm0, %k0 {%k1} ##
- %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 5, i8 %mask)
+ %res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 5, i8 %mask)
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
; CHECK: vpcmpnleuq %xmm1, %xmm0, %k0 {%k1} ##
- %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 6, i8 %mask)
+ %res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 6, i8 %mask)
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
; CHECK: vpcmporduq %xmm1, %xmm0, %k0 {%k1} ##
- %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 7, i8 %mask)
+ %res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i8 7, i8 %mask)
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
ret <8 x i8> %vec7
}
-declare i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64>, <2 x i64>, i32, i8) nounwind readnone
+declare i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64>, <2 x i64>, i8, i8) nounwind readnone
+
+; CHECK-LABEL: compr1
+; CHECK: vcompresspd %zmm0
+define void @compr1(i8* %addr, <8 x double> %data, i8 %mask) {
+ call void @llvm.x86.avx512.mask.compress.store.pd.512(i8* %addr, <8 x double> %data, i8 %mask)
+ ret void
+}
+
+declare void @llvm.x86.avx512.mask.compress.store.pd.512(i8* %addr, <8 x double> %data, i8 %mask)
+
+; CHECK-LABEL: compr2
+; CHECK: vcompresspd %ymm0
+define void @compr2(i8* %addr, <4 x double> %data, i8 %mask) {
+ call void @llvm.x86.avx512.mask.compress.store.pd.256(i8* %addr, <4 x double> %data, i8 %mask)
+ ret void
+}
+
+declare void @llvm.x86.avx512.mask.compress.store.pd.256(i8* %addr, <4 x double> %data, i8 %mask)
+
+; CHECK-LABEL: compr3
+; CHECK: vcompressps %xmm0
+define void @compr3(i8* %addr, <4 x float> %data, i8 %mask) {
+ call void @llvm.x86.avx512.mask.compress.store.ps.128(i8* %addr, <4 x float> %data, i8 %mask)
+ ret void
+}
+
+declare void @llvm.x86.avx512.mask.compress.store.ps.128(i8* %addr, <4 x float> %data, i8 %mask)
+
+; CHECK-LABEL: compr4
+; CHECK: vcompresspd %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x8a,0xc0]
+define <8 x double> @compr4(i8* %addr, <8 x double> %data, i8 %mask) {
+ %res = call <8 x double> @llvm.x86.avx512.mask.compress.pd.512(<8 x double> %data, <8 x double> zeroinitializer, i8 %mask)
+ ret <8 x double> %res
+}
+
+declare <8 x double> @llvm.x86.avx512.mask.compress.pd.512(<8 x double> %data, <8 x double> %src0, i8 %mask)
+
+; CHECK-LABEL: compr5
+; CHECK: vcompresspd %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x8a,0xc1]
+define <4 x double> @compr5(<4 x double> %data, <4 x double> %src0, i8 %mask) {
+ %res = call <4 x double> @llvm.x86.avx512.mask.compress.pd.256( <4 x double> %data, <4 x double> %src0, i8 %mask)
+ ret <4 x double> %res
+}
+
+declare <4 x double> @llvm.x86.avx512.mask.compress.pd.256(<4 x double> %data, <4 x double> %src0, i8 %mask)
+
+; CHECK-LABEL: compr6
+; CHECK: vcompressps %xmm0
+define <4 x float> @compr6(<4 x float> %data, i8 %mask) {
+ %res = call <4 x float> @llvm.x86.avx512.mask.compress.ps.128(<4 x float> %data, <4 x float>zeroinitializer, i8 %mask)
+ ret <4 x float> %res
+}
+
+declare <4 x float> @llvm.x86.avx512.mask.compress.ps.128(<4 x float> %data, <4 x float> %src0, i8 %mask)
+
+; CHECK-LABEL: compr7
+; CHECK-NOT: vcompress
+; CHECK: vmovapd
+define void @compr7(i8* %addr, <8 x double> %data) {
+ call void @llvm.x86.avx512.mask.compress.store.pd.512(i8* %addr, <8 x double> %data, i8 -1)
+ ret void
+}
+
+; CHECK-LABEL: compr8
+; CHECK-NOT: vcompressps %xmm0
+define <4 x float> @compr8(<4 x float> %data) {
+ %res = call <4 x float> @llvm.x86.avx512.mask.compress.ps.128(<4 x float> %data, <4 x float>zeroinitializer, i8 -1)
+ ret <4 x float> %res
+}
+
+; CHECK-LABEL: compr9
+; CHECK: vpcompressq %zmm0, (%rdi) {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x8b,0x07]
+define void @compr9(i8* %addr, <8 x i64> %data, i8 %mask) {
+ call void @llvm.x86.avx512.mask.compress.store.q.512(i8* %addr, <8 x i64> %data, i8 %mask)
+ ret void
+}
+
+declare void @llvm.x86.avx512.mask.compress.store.q.512(i8* %addr, <8 x i64> %data, i8 %mask)
+
+; CHECK-LABEL: compr10
+; CHECK: vpcompressd %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x8b,0xc0]
+define <4 x i32> @compr10(<4 x i32> %data, i8 %mask) {
+ %res = call <4 x i32> @llvm.x86.avx512.mask.compress.d.128(<4 x i32> %data, <4 x i32>zeroinitializer, i8 %mask)
+ ret <4 x i32> %res
+}
+
+declare <4 x i32> @llvm.x86.avx512.mask.compress.d.128(<4 x i32> %data, <4 x i32> %src0, i8 %mask)
+
+; Expand
+
+; CHECK-LABEL: expand1
+; CHECK: vexpandpd (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x88,0x07]
+define <8 x double> @expand1(i8* %addr, <8 x double> %data, i8 %mask) {
+ %res = call <8 x double> @llvm.x86.avx512.mask.expand.load.pd.512(i8* %addr, <8 x double> %data, i8 %mask)
+ ret <8 x double> %res
+}
+
+declare <8 x double> @llvm.x86.avx512.mask.expand.load.pd.512(i8* %addr, <8 x double> %data, i8 %mask)
+
+; CHECK-LABEL: expand2
+; CHECK: vexpandpd (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x88,0x07]
+define <4 x double> @expand2(i8* %addr, <4 x double> %data, i8 %mask) {
+ %res = call <4 x double> @llvm.x86.avx512.mask.expand.load.pd.256(i8* %addr, <4 x double> %data, i8 %mask)
+ ret <4 x double> %res
+}
+
+declare <4 x double> @llvm.x86.avx512.mask.expand.load.pd.256(i8* %addr, <4 x double> %data, i8 %mask)
+
+; CHECK-LABEL: expand3
+; CHECK: vexpandps (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x88,0x07]
+define <4 x float> @expand3(i8* %addr, <4 x float> %data, i8 %mask) {
+ %res = call <4 x float> @llvm.x86.avx512.mask.expand.load.ps.128(i8* %addr, <4 x float> %data, i8 %mask)
+ ret <4 x float> %res
+}
+
+declare <4 x float> @llvm.x86.avx512.mask.expand.load.ps.128(i8* %addr, <4 x float> %data, i8 %mask)
+
+; CHECK-LABEL: expand4
+; CHECK: vexpandpd %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x88,0xc0]
+define <8 x double> @expand4(i8* %addr, <8 x double> %data, i8 %mask) {
+ %res = call <8 x double> @llvm.x86.avx512.mask.expand.pd.512(<8 x double> %data, <8 x double> zeroinitializer, i8 %mask)
+ ret <8 x double> %res
+}
+
+declare <8 x double> @llvm.x86.avx512.mask.expand.pd.512(<8 x double> %data, <8 x double> %src0, i8 %mask)
+
+; CHECK-LABEL: expand5
+; CHECK: vexpandpd %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x88,0xc8]
+define <4 x double> @expand5(<4 x double> %data, <4 x double> %src0, i8 %mask) {
+ %res = call <4 x double> @llvm.x86.avx512.mask.expand.pd.256( <4 x double> %data, <4 x double> %src0, i8 %mask)
+ ret <4 x double> %res
+}
+
+declare <4 x double> @llvm.x86.avx512.mask.expand.pd.256(<4 x double> %data, <4 x double> %src0, i8 %mask)
+
+; CHECK-LABEL: expand6
+; CHECK: vexpandps %xmm0
+define <4 x float> @expand6(<4 x float> %data, i8 %mask) {
+ %res = call <4 x float> @llvm.x86.avx512.mask.expand.ps.128(<4 x float> %data, <4 x float>zeroinitializer, i8 %mask)
+ ret <4 x float> %res
+}
+
+declare <4 x float> @llvm.x86.avx512.mask.expand.ps.128(<4 x float> %data, <4 x float> %src0, i8 %mask)
+
+; CHECK-LABEL: expand7
+; CHECK-NOT: vexpand
+; CHECK: vmovapd
+define <8 x double> @expand7(i8* %addr, <8 x double> %data) {
+ %res = call <8 x double> @llvm.x86.avx512.mask.expand.load.pd.512(i8* %addr, <8 x double> %data, i8 -1)
+ ret <8 x double> %res
+}
+
+; CHECK-LABEL: expand8
+; CHECK-NOT: vexpandps %xmm0
+define <4 x float> @expand8(<4 x float> %data) {
+ %res = call <4 x float> @llvm.x86.avx512.mask.expand.ps.128(<4 x float> %data, <4 x float>zeroinitializer, i8 -1)
+ ret <4 x float> %res
+}
+
+; CHECK-LABEL: expand9
+; CHECK: vpexpandq (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x89,0x07]
+define <8 x i64> @expand9(i8* %addr, <8 x i64> %data, i8 %mask) {
+ %res = call <8 x i64> @llvm.x86.avx512.mask.expand.load.q.512(i8* %addr, <8 x i64> %data, i8 %mask)
+ ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.expand.load.q.512(i8* %addr, <8 x i64> %data, i8 %mask)
+
+; CHECK-LABEL: expand10
+; CHECK: vpexpandd %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x89,0xc0]
+define <4 x i32> @expand10(<4 x i32> %data, i8 %mask) {
+ %res = call <4 x i32> @llvm.x86.avx512.mask.expand.d.128(<4 x i32> %data, <4 x i32>zeroinitializer, i8 %mask)
+ ret <4 x i32> %res
+}
+
+declare <4 x i32> @llvm.x86.avx512.mask.expand.d.128(<4 x i32> %data, <4 x i32> %src0, i8 %mask)
+
+define <8 x float> @test_x86_mask_blend_ps_256(i8 %a0, <8 x float> %a1, <8 x float> %a2) {
+ ; CHECK: vblendmps %ymm1, %ymm0
+ %res = call <8 x float> @llvm.x86.avx512.mask.blend.ps.256(<8 x float> %a1, <8 x float> %a2, i8 %a0) ; <<8 x float>> [#uses=1]
+ ret <8 x float> %res
+}
+
+declare <8 x float> @llvm.x86.avx512.mask.blend.ps.256(<8 x float>, <8 x float>, i8) nounwind readonly
+
+define <4 x double> @test_x86_mask_blend_pd_256(i8 %a0, <4 x double> %a1, <4 x double> %a2) {
+ ; CHECK: vblendmpd %ymm1, %ymm0
+ %res = call <4 x double> @llvm.x86.avx512.mask.blend.pd.256(<4 x double> %a1, <4 x double> %a2, i8 %a0) ; <<4 x double>> [#uses=1]
+ ret <4 x double> %res
+}
+
+define <4 x double> @test_x86_mask_blend_pd_256_memop(<4 x double> %a, <4 x double>* %ptr, i8 %mask) {
+ ; CHECK-LABEL: test_x86_mask_blend_pd_256_memop
+ ; CHECK: vblendmpd (%
+ %b = load <4 x double>* %ptr
+ %res = call <4 x double> @llvm.x86.avx512.mask.blend.pd.256(<4 x double> %a, <4 x double> %b, i8 %mask) ; <<4 x double>> [#uses=1]
+ ret <4 x double> %res
+}
+declare <4 x double> @llvm.x86.avx512.mask.blend.pd.256(<4 x double>, <4 x double>, i8) nounwind readonly
+
+; CHECK-LABEL: test_x86_mask_blend_d_256
+; CHECK: vpblendmd
+define <8 x i32> @test_x86_mask_blend_d_256(i8 %a0, <8 x i32> %a1, <8 x i32> %a2) {
+ %res = call <8 x i32> @llvm.x86.avx512.mask.blend.d.256(<8 x i32> %a1, <8 x i32> %a2, i8 %a0) ; <<8 x i32>> [#uses=1]
+ ret <8 x i32> %res
+}
+declare <8 x i32> @llvm.x86.avx512.mask.blend.d.256(<8 x i32>, <8 x i32>, i8) nounwind readonly
+
+define <4 x i64> @test_x86_mask_blend_q_256(i8 %a0, <4 x i64> %a1, <4 x i64> %a2) {
+ ; CHECK: vpblendmq
+ %res = call <4 x i64> @llvm.x86.avx512.mask.blend.q.256(<4 x i64> %a1, <4 x i64> %a2, i8 %a0) ; <<4 x i64>> [#uses=1]
+ ret <4 x i64> %res
+}
+declare <4 x i64> @llvm.x86.avx512.mask.blend.q.256(<4 x i64>, <4 x i64>, i8) nounwind readonly
+
+define <4 x float> @test_x86_mask_blend_ps_128(i8 %a0, <4 x float> %a1, <4 x float> %a2) {
+ ; CHECK: vblendmps %xmm1, %xmm0
+ %res = call <4 x float> @llvm.x86.avx512.mask.blend.ps.128(<4 x float> %a1, <4 x float> %a2, i8 %a0) ; <<4 x float>> [#uses=1]
+ ret <4 x float> %res
+}
+
+declare <4 x float> @llvm.x86.avx512.mask.blend.ps.128(<4 x float>, <4 x float>, i8) nounwind readonly
+
+define <2 x double> @test_x86_mask_blend_pd_128(i8 %a0, <2 x double> %a1, <2 x double> %a2) {
+ ; CHECK: vblendmpd %xmm1, %xmm0
+ %res = call <2 x double> @llvm.x86.avx512.mask.blend.pd.128(<2 x double> %a1, <2 x double> %a2, i8 %a0) ; <<2 x double>> [#uses=1]
+ ret <2 x double> %res
+}
+
+define <2 x double> @test_x86_mask_blend_pd_128_memop(<2 x double> %a, <2 x double>* %ptr, i8 %mask) {
+ ; CHECK-LABEL: test_x86_mask_blend_pd_128_memop
+ ; CHECK: vblendmpd (%
+ %b = load <2 x double>* %ptr
+ %res = call <2 x double> @llvm.x86.avx512.mask.blend.pd.128(<2 x double> %a, <2 x double> %b, i8 %mask) ; <<2 x double>> [#uses=1]
+ ret <2 x double> %res
+}
+declare <2 x double> @llvm.x86.avx512.mask.blend.pd.128(<2 x double>, <2 x double>, i8) nounwind readonly
+
+define <4 x i32> @test_x86_mask_blend_d_128(i8 %a0, <4 x i32> %a1, <4 x i32> %a2) {
+ ; CHECK: vpblendmd
+ %res = call <4 x i32> @llvm.x86.avx512.mask.blend.d.128(<4 x i32> %a1, <4 x i32> %a2, i8 %a0) ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %res
+}
+declare <4 x i32> @llvm.x86.avx512.mask.blend.d.128(<4 x i32>, <4 x i32>, i8) nounwind readonly
+
+define <2 x i64> @test_x86_mask_blend_q_128(i8 %a0, <2 x i64> %a1, <2 x i64> %a2) {
+ ; CHECK: vpblendmq
+ %res = call <2 x i64> @llvm.x86.avx512.mask.blend.q.128(<2 x i64> %a1, <2 x i64> %a2, i8 %a0) ; <<2 x i64>> [#uses=1]
+ ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.avx512.mask.blend.q.128(<2 x i64>, <2 x i64>, i8) nounwind readonly
diff --git a/test/CodeGen/X86/avx512vl-logic.ll b/test/CodeGen/X86/avx512vl-logic.ll
new file mode 100644
index 0000000..02cb8f9
--- /dev/null
+++ b/test/CodeGen/X86/avx512vl-logic.ll
@@ -0,0 +1,137 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl | FileCheck %s
+
+; 256-bit
+
+; CHECK-LABEL: vpandd256
+; CHECK: vpandd %ymm
+; CHECK: ret
+define <8 x i32> @vpandd256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = and <8 x i32> %a2, %b
+ ret <8 x i32> %x
+}
+
+; CHECK-LABEL: vpord256
+; CHECK: vpord %ymm
+; CHECK: ret
+define <8 x i32> @vpord256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = or <8 x i32> %a2, %b
+ ret <8 x i32> %x
+}
+
+; CHECK-LABEL: vpxord256
+; CHECK: vpxord %ymm
+; CHECK: ret
+define <8 x i32> @vpxord256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = xor <8 x i32> %a2, %b
+ ret <8 x i32> %x
+}
+
+; CHECK-LABEL: vpandq256
+; CHECK: vpandq %ymm
+; CHECK: ret
+define <4 x i64> @vpandq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %x = and <4 x i64> %a2, %b
+ ret <4 x i64> %x
+}
+
+; CHECK-LABEL: vporq256
+; CHECK: vporq %ymm
+; CHECK: ret
+define <4 x i64> @vporq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %x = or <4 x i64> %a2, %b
+ ret <4 x i64> %x
+}
+
+; CHECK-LABEL: vpxorq256
+; CHECK: vpxorq %ymm
+; CHECK: ret
+define <4 x i64> @vpxorq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %x = xor <4 x i64> %a2, %b
+ ret <4 x i64> %x
+}
+
+; 128-bit
+
+; CHECK-LABEL: vpandd128
+; CHECK: vpandd %xmm
+; CHECK: ret
+define <4 x i32> @vpandd128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %x = and <4 x i32> %a2, %b
+ ret <4 x i32> %x
+}
+
+; CHECK-LABEL: vpord128
+; CHECK: vpord %xmm
+; CHECK: ret
+define <4 x i32> @vpord128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %x = or <4 x i32> %a2, %b
+ ret <4 x i32> %x
+}
+
+; CHECK-LABEL: vpxord128
+; CHECK: vpxord %xmm
+; CHECK: ret
+define <4 x i32> @vpxord128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %x = xor <4 x i32> %a2, %b
+ ret <4 x i32> %x
+}
+
+; CHECK-LABEL: vpandq128
+; CHECK: vpandq %xmm
+; CHECK: ret
+define <2 x i64> @vpandq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %x = and <2 x i64> %a2, %b
+ ret <2 x i64> %x
+}
+
+; CHECK-LABEL: vporq128
+; CHECK: vporq %xmm
+; CHECK: ret
+define <2 x i64> @vporq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %x = or <2 x i64> %a2, %b
+ ret <2 x i64> %x
+}
+
+; CHECK-LABEL: vpxorq128
+; CHECK: vpxorq %xmm
+; CHECK: ret
+define <2 x i64> @vpxorq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+ ; Force the execution domain with an add.
+ %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %x = xor <2 x i64> %a2, %b
+ ret <2 x i64> %x
+}
diff --git a/test/CodeGen/X86/avx512vl-nontemporal.ll b/test/CodeGen/X86/avx512vl-nontemporal.ll
index 2ad9768..fdafb35 100644
--- a/test/CodeGen/X86/avx512vl-nontemporal.ll
+++ b/test/CodeGen/X86/avx512vl-nontemporal.ll
@@ -31,4 +31,4 @@ define void @f128(<4 x float> %A, <4 x float> %AA, i8* %B, <2 x double> %C, <2 x
store <2 x double> %C2, <2 x double>* %cast2, align 64, !nontemporal !0
ret void
}
-!0 = metadata !{i32 1}
+!0 = !{i32 1}
diff --git a/test/CodeGen/X86/avx512vl-vec-cmp.ll b/test/CodeGen/X86/avx512vl-vec-cmp.ll
index 9c64c03..b6b5085 100644
--- a/test/CodeGen/X86/avx512vl-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512vl-vec-cmp.ll
@@ -14,9 +14,9 @@ define <4 x i64> @test256_1(<4 x i64> %x, <4 x i64> %y) nounwind {
; CHECK: vpcmpgtq {{.*%k[0-7]}}
; CHECK: vmovdqa64 {{.*}}%k1
; CHECK: ret
-define <4 x i64> @test256_2(<4 x i64> %x, <4 x i64> %y) nounwind {
+define <4 x i64> @test256_2(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind {
%mask = icmp sgt <4 x i64> %x, %y
- %max = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %y
+ %max = select <4 x i1> %mask, <4 x i64> %x1, <4 x i64> %y
ret <4 x i64> %max
}
@@ -34,9 +34,9 @@ define <8 x i32> @test256_3(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1) nounwind
; CHECK: vpcmpnleuq {{.*%k[0-7]}}
; CHECK: vmovdqa64 {{.*}}%k1
; CHECK: ret
-define <4 x i64> @test256_4(<4 x i64> %x, <4 x i64> %y) nounwind {
+define <4 x i64> @test256_4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind {
%mask = icmp ugt <4 x i64> %x, %y
- %max = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %y
+ %max = select <4 x i1> %mask, <4 x i64> %x1, <4 x i64> %y
ret <4 x i64> %max
}
@@ -204,9 +204,9 @@ define <2 x i64> @test128_1(<2 x i64> %x, <2 x i64> %y) nounwind {
; CHECK: vpcmpgtq {{.*%k[0-7]}}
; CHECK: vmovdqa64 {{.*}}%k1
; CHECK: ret
-define <2 x i64> @test128_2(<2 x i64> %x, <2 x i64> %y) nounwind {
+define <2 x i64> @test128_2(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1) nounwind {
%mask = icmp sgt <2 x i64> %x, %y
- %max = select <2 x i1> %mask, <2 x i64> %x, <2 x i64> %y
+ %max = select <2 x i1> %mask, <2 x i64> %x1, <2 x i64> %y
ret <2 x i64> %max
}
@@ -224,9 +224,9 @@ define <4 x i32> @test128_3(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1) nounwind
; CHECK: vpcmpnleuq {{.*%k[0-7]}}
; CHECK: vmovdqa64 {{.*}}%k1
; CHECK: ret
-define <2 x i64> @test128_4(<2 x i64> %x, <2 x i64> %y) nounwind {
+define <2 x i64> @test128_4(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1) nounwind {
%mask = icmp ugt <2 x i64> %x, %y
- %max = select <2 x i1> %mask, <2 x i64> %x, <2 x i64> %y
+ %max = select <2 x i1> %mask, <2 x i64> %x1, <2 x i64> %y
ret <2 x i64> %max
}
diff --git a/test/CodeGen/X86/barrier.ll b/test/CodeGen/X86/barrier.ll
index 4769b39..1f60131 100644
--- a/test/CodeGen/X86/barrier.ll
+++ b/test/CodeGen/X86/barrier.ll
@@ -1,6 +1,7 @@
-; RUN: llc < %s -march=x86 -mattr=-sse2 | grep lock
+; RUN: llc < %s -march=x86 -mattr=-sse2 | FileCheck %s
define void @test() {
+; CHECK: lock
fence seq_cst
ret void
}
diff --git a/test/CodeGen/X86/bitcast-mmx.ll b/test/CodeGen/X86/bitcast-mmx.ll
new file mode 100644
index 0000000..de1cb5a
--- /dev/null
+++ b/test/CodeGen/X86/bitcast-mmx.ll
@@ -0,0 +1,77 @@
+; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck %s
+
+define i32 @t0(i64 %x) {
+; CHECK-LABEL: t0:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: movd %[[REG1:[a-z]+]], %mm0
+; CHECK-NEXT: pshufw $238, %mm0, %mm0
+; CHECK-NEXT: movd %mm0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast i64 %x to <4 x i16>
+ %1 = bitcast <4 x i16> %0 to x86_mmx
+ %2 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %1, i8 -18)
+ %3 = bitcast x86_mmx %2 to <4 x i16>
+ %4 = bitcast <4 x i16> %3 to <1 x i64>
+ %5 = extractelement <1 x i64> %4, i32 0
+ %6 = bitcast i64 %5 to <2 x i32>
+ %7 = extractelement <2 x i32> %6, i32 0
+ ret i32 %7
+}
+
+define i64 @t1(i64 %x, i32 %n) {
+; CHECK-LABEL: t1:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: movd %[[REG2:[a-z]+]], %mm0
+; CHECK-NEXT: movd %[[REG1]], %mm1
+; CHECK-NEXT: psllq %mm0, %mm1
+; CHECK-NEXT: movd %mm1, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast i64 %x to x86_mmx
+ %1 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %0, i32 %n)
+ %2 = bitcast x86_mmx %1 to i64
+ ret i64 %2
+}
+
+define i64 @t2(i64 %x, i32 %n, i32 %w) {
+; CHECK-LABEL: t2:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: movd %[[REG4:[a-z]+]], %mm0
+; CHECK-NEXT: movd %[[REG6:[a-z0-9]+]], %mm1
+; CHECK-NEXT: psllq %mm0, %mm1
+; CHECK-NEXT: movd %[[REG1]], %mm0
+; CHECK-NEXT: por %mm1, %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = insertelement <2 x i32> undef, i32 %w, i32 0
+ %1 = insertelement <2 x i32> %0, i32 0, i32 1
+ %2 = bitcast <2 x i32> %1 to x86_mmx
+ %3 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %2, i32 %n)
+ %4 = bitcast i64 %x to x86_mmx
+ %5 = tail call x86_mmx @llvm.x86.mmx.por(x86_mmx %4, x86_mmx %3)
+ %6 = bitcast x86_mmx %5 to i64
+ ret i64 %6
+}
+
+define i64 @t3(<1 x i64>* %y, i32* %n) {
+; CHECK-LABEL: t3:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: movq (%[[REG1]]), %mm0
+; CHECK-NEXT: psllq (%[[REG3:[a-z]+]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <1 x i64>* %y to x86_mmx*
+ %1 = load x86_mmx* %0, align 8
+ %2 = load i32* %n, align 4
+ %3 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %1, i32 %2)
+ %4 = bitcast x86_mmx %3 to i64
+ ret i64 %4
+}
+
+declare x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx, i8)
+declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
+declare x86_mmx @llvm.x86.mmx.por(x86_mmx, x86_mmx)
+
diff --git a/test/CodeGen/X86/block-placement.ll b/test/CodeGen/X86/block-placement.ll
index cc40bcf..e35be6a 100644
--- a/test/CodeGen/X86/block-placement.ll
+++ b/test/CodeGen/X86/block-placement.ll
@@ -124,7 +124,7 @@ exit:
ret i32 %sum
}
-!0 = metadata !{metadata !"branch_weights", i32 4, i32 64}
+!0 = !{!"branch_weights", i32 4, i32 64}
define i32 @test_loop_early_exits(i32 %i, i32* %a) {
; Check that we sink early exit blocks out of loop bodies.
@@ -506,7 +506,7 @@ if.end:
ret void
}
-!1 = metadata !{metadata !"branch_weights", i32 1000, i32 1}
+!1 = !{!"branch_weights", i32 1000, i32 1}
declare i32 @f()
declare i32 @g()
@@ -542,7 +542,7 @@ exit:
ret i32 %result
}
-!2 = metadata !{metadata !"branch_weights", i32 3, i32 1}
+!2 = !{!"branch_weights", i32 3, i32 1}
declare i32 @__gxx_personality_v0(...)
diff --git a/test/CodeGen/X86/break-avx-dep.ll b/test/CodeGen/X86/break-avx-dep.ll
deleted file mode 100644
index 210bda1..0000000
--- a/test/CodeGen/X86/break-avx-dep.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+avx | FileCheck %s
-;
-; rdar:15221834 False AVX register dependencies cause 5x slowdown on
-; flops-6. Make sure the unused register read by vcvtsi2sdq is zeroed
-; to avoid cyclic dependence on a write to the same register in a
-; previous iteration.
-
-; CHECK-LABEL: t1:
-; CHECK-LABEL: %loop
-; CHECK: vxorps %[[REG:xmm.]], %{{xmm.}}, %{{xmm.}}
-; CHECK: vcvtsi2sdq %{{r[0-9a-x]+}}, %[[REG]], %{{xmm.}}
-define i64 @t1(i64* nocapture %x, double* nocapture %y) nounwind {
-entry:
- %vx = load i64* %x
- br label %loop
-loop:
- %i = phi i64 [ 1, %entry ], [ %inc, %loop ]
- %s1 = phi i64 [ %vx, %entry ], [ %s2, %loop ]
- %fi = sitofp i64 %i to double
- %vy = load double* %y
- %fipy = fadd double %fi, %vy
- %iipy = fptosi double %fipy to i64
- %s2 = add i64 %s1, %iipy
- %inc = add nsw i64 %i, 1
- %exitcond = icmp eq i64 %inc, 156250000
- br i1 %exitcond, label %ret, label %loop
-ret:
- ret i64 %s2
-}
diff --git a/test/CodeGen/X86/break-false-dep.ll b/test/CodeGen/X86/break-false-dep.ll
new file mode 100644
index 0000000..7034fae
--- /dev/null
+++ b/test/CodeGen/X86/break-false-dep.ll
@@ -0,0 +1,201 @@
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2 -mcpu=nehalem | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse2 -mcpu=nehalem | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+avx -mcpu=corei7-avx | FileCheck %s --check-prefix=AVX
+
+define double @t1(float* nocapture %x) nounwind readonly ssp {
+entry:
+; SSE-LABEL: t1:
+; SSE: movss ([[A0:%rdi|%rcx]]), %xmm0
+; SSE: cvtss2sd %xmm0, %xmm0
+
+ %0 = load float* %x, align 4
+ %1 = fpext float %0 to double
+ ret double %1
+}
+
+define float @t2(double* nocapture %x) nounwind readonly ssp optsize {
+entry:
+; SSE-LABEL: t2:
+; SSE: cvtsd2ss ([[A0]]), %xmm0
+ %0 = load double* %x, align 8
+ %1 = fptrunc double %0 to float
+ ret float %1
+}
+
+define float @squirtf(float* %x) nounwind {
+entry:
+; SSE-LABEL: squirtf:
+; SSE: movss ([[A0]]), %xmm0
+; SSE: sqrtss %xmm0, %xmm0
+ %z = load float* %x
+ %t = call float @llvm.sqrt.f32(float %z)
+ ret float %t
+}
+
+define double @squirt(double* %x) nounwind {
+entry:
+; SSE-LABEL: squirt:
+; SSE: movsd ([[A0]]), %xmm0
+; SSE: sqrtsd %xmm0, %xmm0
+ %z = load double* %x
+ %t = call double @llvm.sqrt.f64(double %z)
+ ret double %t
+}
+
+define float @squirtf_size(float* %x) nounwind optsize {
+entry:
+; SSE-LABEL: squirtf_size:
+; SSE: sqrtss ([[A0]]), %xmm0
+ %z = load float* %x
+ %t = call float @llvm.sqrt.f32(float %z)
+ ret float %t
+}
+
+define double @squirt_size(double* %x) nounwind optsize {
+entry:
+; SSE-LABEL: squirt_size:
+; SSE: sqrtsd ([[A0]]), %xmm0
+ %z = load double* %x
+ %t = call double @llvm.sqrt.f64(double %z)
+ ret double %t
+}
+
+declare float @llvm.sqrt.f32(float)
+declare double @llvm.sqrt.f64(double)
+
+; SSE-LABEL: loopdep1
+; SSE: for.body
+;
+; This loop contains two cvtsi2ss instructions that update the same xmm
+; register. Verify that the execution dependency fix pass breaks those
+; dependencies by inserting xorps instructions.
+;
+; If the register allocator chooses different registers for the two cvtsi2ss
+; instructions, they are still dependent on themselves.
+; SSE: xorps [[XMM1:%xmm[0-9]+]]
+; SSE: , [[XMM1]]
+; SSE: cvtsi2ssl %{{.*}}, [[XMM1]]
+; SSE: xorps [[XMM2:%xmm[0-9]+]]
+; SSE: , [[XMM2]]
+; SSE: cvtsi2ssl %{{.*}}, [[XMM2]]
+;
+define float @loopdep1(i32 %m) nounwind uwtable readnone ssp {
+entry:
+ %tobool3 = icmp eq i32 %m, 0
+ br i1 %tobool3, label %for.end, label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %m.addr.07 = phi i32 [ %dec, %for.body ], [ %m, %entry ]
+ %s1.06 = phi float [ %add, %for.body ], [ 0.000000e+00, %entry ]
+ %s2.05 = phi float [ %add2, %for.body ], [ 0.000000e+00, %entry ]
+ %n.04 = phi i32 [ %inc, %for.body ], [ 1, %entry ]
+ %conv = sitofp i32 %n.04 to float
+ %add = fadd float %s1.06, %conv
+ %conv1 = sitofp i32 %m.addr.07 to float
+ %add2 = fadd float %s2.05, %conv1
+ %inc = add nsw i32 %n.04, 1
+ %dec = add nsw i32 %m.addr.07, -1
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %s1.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
+ %s2.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %add2, %for.body ]
+ %sub = fsub float %s1.0.lcssa, %s2.0.lcssa
+ ret float %sub
+}
+
+; rdar:15221834 False AVX register dependencies cause 5x slowdown on
+; flops-6. Make sure the unused register read by vcvtsi2sdq is zeroed
+; to avoid cyclic dependence on a write to the same register in a
+; previous iteration.
+
+; AVX-LABEL: loopdep2:
+; AVX-LABEL: %loop
+; AVX: vxorps %[[REG:xmm.]], %{{xmm.}}, %{{xmm.}}
+; AVX: vcvtsi2sdq %{{r[0-9a-x]+}}, %[[REG]], %{{xmm.}}
+; SSE-LABEL: loopdep2:
+; SSE-LABEL: %loop
+; SSE: xorps %[[REG:xmm.]], %[[REG]]
+; SSE: cvtsi2sdq %{{r[0-9a-x]+}}, %[[REG]]
+define i64 @loopdep2(i64* nocapture %x, double* nocapture %y) nounwind {
+entry:
+ %vx = load i64* %x
+ br label %loop
+loop:
+ %i = phi i64 [ 1, %entry ], [ %inc, %loop ]
+ %s1 = phi i64 [ %vx, %entry ], [ %s2, %loop ]
+ %fi = sitofp i64 %i to double
+ %vy = load double* %y
+ %fipy = fadd double %fi, %vy
+ %iipy = fptosi double %fipy to i64
+ %s2 = add i64 %s1, %iipy
+ %inc = add nsw i64 %i, 1
+ %exitcond = icmp eq i64 %inc, 156250000
+ br i1 %exitcond, label %ret, label %loop
+ret:
+ ret i64 %s2
+}
+
+; This loop contains a cvtsi2sd instruction that has a loop-carried
+; false dependency on an xmm that is modified by other scalar instructions
+; that follow it in the loop. Additionally, the source of convert is a
+; memory operand. Verify the execution dependency fix pass breaks this
+; dependency by inserting a xor before the convert.
+@x = common global [1024 x double] zeroinitializer, align 16
+@y = common global [1024 x double] zeroinitializer, align 16
+@z = common global [1024 x double] zeroinitializer, align 16
+@w = common global [1024 x double] zeroinitializer, align 16
+@v = common global [1024 x i32] zeroinitializer, align 16
+
+define void @loopdep3() {
+entry:
+ br label %for.cond1.preheader
+
+for.cond1.preheader: ; preds = %for.inc14, %entry
+ %i.025 = phi i32 [ 0, %entry ], [ %inc15, %for.inc14 ]
+ br label %for.body3
+
+for.body3:
+ %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @v, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %conv = sitofp i32 %0 to double
+ %arrayidx5 = getelementptr inbounds [1024 x double]* @x, i64 0, i64 %indvars.iv
+ %1 = load double* %arrayidx5, align 8
+ %mul = fmul double %conv, %1
+ %arrayidx7 = getelementptr inbounds [1024 x double]* @y, i64 0, i64 %indvars.iv
+ %2 = load double* %arrayidx7, align 8
+ %mul8 = fmul double %mul, %2
+ %arrayidx10 = getelementptr inbounds [1024 x double]* @z, i64 0, i64 %indvars.iv
+ %3 = load double* %arrayidx10, align 8
+ %mul11 = fmul double %mul8, %3
+ %arrayidx13 = getelementptr inbounds [1024 x double]* @w, i64 0, i64 %indvars.iv
+ store double %mul11, double* %arrayidx13, align 8
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.inc14, label %for.body3
+
+for.inc14: ; preds = %for.body3
+ %inc15 = add nsw i32 %i.025, 1
+ %exitcond26 = icmp eq i32 %inc15, 100000
+ br i1 %exitcond26, label %for.end16, label %for.cond1.preheader
+
+for.end16: ; preds = %for.inc14
+ ret void
+
+;SSE-LABEL:@loopdep3
+;SSE: xorps [[XMM0:%xmm[0-9]+]], [[XMM0]]
+;SSE-NEXT: cvtsi2sdl {{.*}}, [[XMM0]]
+;SSE-NEXT: mulsd {{.*}}, [[XMM0]]
+;SSE-NEXT: mulsd {{.*}}, [[XMM0]]
+;SSE-NEXT: mulsd {{.*}}, [[XMM0]]
+;SSE-NEXT: movsd [[XMM0]],
+;AVX-LABEL:@loopdep3
+;AVX: vxorps [[XMM0:%xmm[0-9]+]], [[XMM0]]
+;AVX-NEXT: vcvtsi2sdl {{.*}}, [[XMM0]], [[XMM0]]
+;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]]
+;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]]
+;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]]
+;AVX-NEXT: vmovsd [[XMM0]],
+}
diff --git a/test/CodeGen/X86/break-sse-dep.ll b/test/CodeGen/X86/break-sse-dep.ll
deleted file mode 100644
index 8124d6f..0000000
--- a/test/CodeGen/X86/break-sse-dep.ll
+++ /dev/null
@@ -1,62 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2 -mcpu=nehalem | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse2 -mcpu=nehalem | FileCheck %s
-
-define double @t1(float* nocapture %x) nounwind readonly ssp {
-entry:
-; CHECK-LABEL: t1:
-; CHECK: movss ([[A0:%rdi|%rcx]]), %xmm0
-; CHECK: cvtss2sd %xmm0, %xmm0
-
- %0 = load float* %x, align 4
- %1 = fpext float %0 to double
- ret double %1
-}
-
-define float @t2(double* nocapture %x) nounwind readonly ssp optsize {
-entry:
-; CHECK-LABEL: t2:
-; CHECK: cvtsd2ss ([[A0]]), %xmm0
- %0 = load double* %x, align 8
- %1 = fptrunc double %0 to float
- ret float %1
-}
-
-define float @squirtf(float* %x) nounwind {
-entry:
-; CHECK-LABEL: squirtf:
-; CHECK: movss ([[A0]]), %xmm0
-; CHECK: sqrtss %xmm0, %xmm0
- %z = load float* %x
- %t = call float @llvm.sqrt.f32(float %z)
- ret float %t
-}
-
-define double @squirt(double* %x) nounwind {
-entry:
-; CHECK-LABEL: squirt:
-; CHECK: sqrtsd ([[A0]]), %xmm0
- %z = load double* %x
- %t = call double @llvm.sqrt.f64(double %z)
- ret double %t
-}
-
-define float @squirtf_size(float* %x) nounwind optsize {
-entry:
-; CHECK-LABEL: squirtf_size:
-; CHECK: sqrtss ([[A0]]), %xmm0
- %z = load float* %x
- %t = call float @llvm.sqrt.f32(float %z)
- ret float %t
-}
-
-define double @squirt_size(double* %x) nounwind optsize {
-entry:
-; CHECK-LABEL: squirt_size:
-; CHECK: sqrtsd ([[A0]]), %xmm0
- %z = load double* %x
- %t = call double @llvm.sqrt.f64(double %z)
- ret double %t
-}
-
-declare float @llvm.sqrt.f32(float)
-declare double @llvm.sqrt.f64(double)
diff --git a/test/CodeGen/X86/bswap-vector.ll b/test/CodeGen/X86/bswap-vector.ll
index 9dc960d..7d5f380 100644
--- a/test/CodeGen/X86/bswap-vector.ll
+++ b/test/CodeGen/X86/bswap-vector.ll
@@ -1,7 +1,8 @@
-; RUN: llc < %s -mcpu=x86-64 | FileCheck %s -check-prefix=CHECK-NOSSSE3
-; RUN: llc < %s -mcpu=core2 | FileCheck %s -check-prefix=CHECK-SSSE3
-; RUN: llc < %s -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK-AVX2
-; RUN: llc < %s -mcpu=core-avx2 -x86-experimental-vector-widening-legalization | FileCheck %s -check-prefix=CHECK-WIDE-AVX2
+; RUN: llc < %s -mcpu=x86-64 | FileCheck %s --check-prefix=CHECK-NOSSSE3
+; RUN: llc < %s -mcpu=core2 | FileCheck %s --check-prefix=CHECK-SSSE3
+; RUN: llc < %s -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK-AVX2
+; RUN: llc < %s -mcpu=core-avx2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-WIDE-AVX2
+
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@@ -9,165 +10,278 @@ declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>)
declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
-define <8 x i16> @test1(<8 x i16> %v) #0 {
+define <8 x i16> @test1(<8 x i16> %v) {
+; CHECK-NOSSSE3-LABEL: test1:
+; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3-NEXT: pxor %xmm1, %xmm1
+; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm2
+; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,6]
+; CHECK-NOSSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
+; CHECK-NOSSSE3-NEXT: packuswb %xmm2, %xmm0
+; CHECK-NOSSSE3-NEXT: retq
+;
+; CHECK-SSSE3-LABEL: test1:
+; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; CHECK-SSSE3-NEXT: retq
+;
+; CHECK-AVX2-LABEL: test1:
+; CHECK-AVX2: # BB#0: # %entry
+; CHECK-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; CHECK-AVX2-NEXT: retq
+;
+; CHECK-WIDE-AVX2-LABEL: test1:
+; CHECK-WIDE-AVX2: # BB#0: # %entry
+; CHECK-WIDE-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; CHECK-WIDE-AVX2-NEXT: retq
entry:
%r = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %v)
ret <8 x i16> %r
-
-; CHECK-NOSSSE3-LABEL: @test1
-; CHECK-NOSSSE3: rolw
-; CHECK-NOSSSE3: rolw
-; CHECK-NOSSSE3: rolw
-; CHECK-NOSSSE3: rolw
-; CHECK-NOSSSE3: rolw
-; CHECK-NOSSSE3: rolw
-; CHECK-NOSSSE3: rolw
-; CHECK-NOSSSE3: rolw
-; CHECK-NOSSSE3: retq
-
-; CHECK-SSSE3-LABEL: @test1
-; CHECK-SSSE3: pshufb
-; CHECK-SSSE3-NEXT: retq
-
-; CHECK-AVX2-LABEL: @test1
-; CHECK-AVX2: vpshufb
-; CHECK-AVX2-NEXT: retq
-
-; CHECK-WIDE-AVX2-LABEL: @test1
-; CHECK-WIDE-AVX2: vpshufb
-; CHECK-WIDE-AVX2-NEXT: retq
}
-define <4 x i32> @test2(<4 x i32> %v) #0 {
+define <4 x i32> @test2(<4 x i32> %v) {
+; CHECK-NOSSSE3-LABEL: test2:
+; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3-NEXT: pxor %xmm1, %xmm1
+; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm2
+; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: packuswb %xmm2, %xmm0
+; CHECK-NOSSSE3-NEXT: retq
+;
+; CHECK-SSSE3-LABEL: test2:
+; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; CHECK-SSSE3-NEXT: retq
+;
+; CHECK-AVX2-LABEL: test2:
+; CHECK-AVX2: # BB#0: # %entry
+; CHECK-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; CHECK-AVX2-NEXT: retq
+;
+; CHECK-WIDE-AVX2-LABEL: test2:
+; CHECK-WIDE-AVX2: # BB#0: # %entry
+; CHECK-WIDE-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; CHECK-WIDE-AVX2-NEXT: retq
entry:
%r = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %v)
ret <4 x i32> %r
-
-; CHECK-NOSSSE3-LABEL: @test2
-; CHECK-NOSSSE3: bswapl
-; CHECK-NOSSSE3: bswapl
-; CHECK-NOSSSE3: bswapl
-; CHECK-NOSSSE3: bswapl
-; CHECK-NOSSSE3: retq
-
-; CHECK-SSSE3-LABEL: @test2
-; CHECK-SSSE3: pshufb
-; CHECK-SSSE3-NEXT: retq
-
-; CHECK-AVX2-LABEL: @test2
-; CHECK-AVX2: vpshufb
-; CHECK-AVX2-NEXT: retq
-
-; CHECK-WIDE-AVX2-LABEL: @test2
-; CHECK-WIDE-AVX2: vpshufb
-; CHECK-WIDE-AVX2-NEXT: retq
}
-define <2 x i64> @test3(<2 x i64> %v) #0 {
+define <2 x i64> @test3(<2 x i64> %v) {
+; CHECK-NOSSSE3-LABEL: test3:
+; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3-NEXT: pxor %xmm1, %xmm1
+; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm2
+; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; CHECK-NOSSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; CHECK-NOSSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: packuswb %xmm2, %xmm0
+; CHECK-NOSSSE3-NEXT: retq
+;
+; CHECK-SSSE3-LABEL: test3:
+; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
+; CHECK-SSSE3-NEXT: retq
+;
+; CHECK-AVX2-LABEL: test3:
+; CHECK-AVX2: # BB#0: # %entry
+; CHECK-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
+; CHECK-AVX2-NEXT: retq
+;
+; CHECK-WIDE-AVX2-LABEL: test3:
+; CHECK-WIDE-AVX2: # BB#0: # %entry
+; CHECK-WIDE-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
+; CHECK-WIDE-AVX2-NEXT: retq
entry:
%r = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %v)
ret <2 x i64> %r
-
-; CHECK-NOSSSE3-LABEL: @test3
-; CHECK-NOSSSE3: bswapq
-; CHECK-NOSSSE3: bswapq
-; CHECK-NOSSSE3: retq
-
-; CHECK-SSSE3-LABEL: @test3
-; CHECK-SSSE3: pshufb
-; CHECK-SSSE3-NEXT: retq
-
-; CHECK-AVX2-LABEL: @test3
-; CHECK-AVX2: vpshufb
-; CHECK-AVX2-NEXT: retq
-
-; CHECK-WIDE-AVX2-LABEL: @test3
-; CHECK-WIDE-AVX2: vpshufb
-; CHECK-WIDE-AVX2-NEXT: retq
}
declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>)
declare <8 x i32> @llvm.bswap.v8i32(<8 x i32>)
declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>)
-define <16 x i16> @test4(<16 x i16> %v) #0 {
+define <16 x i16> @test4(<16 x i16> %v) {
+; CHECK-NOSSSE3-LABEL: test4:
+; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3-NEXT: pxor %xmm2, %xmm2
+; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm3
+; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,7,6]
+; CHECK-NOSSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
+; CHECK-NOSSSE3-NEXT: packuswb %xmm3, %xmm0
+; CHECK-NOSSSE3-NEXT: movdqa %xmm1, %xmm3
+; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,7,6]
+; CHECK-NOSSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,7,6]
+; CHECK-NOSSSE3-NEXT: packuswb %xmm3, %xmm1
+; CHECK-NOSSSE3-NEXT: retq
+;
+; CHECK-SSSE3-LABEL: test4:
+; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; CHECK-SSSE3-NEXT: pshufb %xmm2, %xmm0
+; CHECK-SSSE3-NEXT: pshufb %xmm2, %xmm1
+; CHECK-SSSE3-NEXT: retq
+;
+; CHECK-AVX2-LABEL: test4:
+; CHECK-AVX2: # BB#0: # %entry
+; CHECK-AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
+; CHECK-AVX2-NEXT: retq
+;
+; CHECK-WIDE-AVX2-LABEL: test4:
+; CHECK-WIDE-AVX2: # BB#0: # %entry
+; CHECK-WIDE-AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
+; CHECK-WIDE-AVX2-NEXT: retq
entry:
%r = call <16 x i16> @llvm.bswap.v16i16(<16 x i16> %v)
ret <16 x i16> %r
-
-; CHECK-SSSE3-LABEL: @test4
-; CHECK-SSSE3: pshufb
-; CHECK-SSSE3: pshufb
-; CHECK-SSSE3-NEXT: retq
-
-; CHECK-AVX2-LABEL: @test4
-; CHECK-AVX2: vpshufb
-; CHECK-AVX2-NEXT: retq
-
-; CHECK-WIDE-AVX2-LABEL: @test4
-; CHECK-WIDE-AVX2: vpshufb
-; CHECK-WIDE-AVX2-NEXT: retq
}
-define <8 x i32> @test5(<8 x i32> %v) #0 {
+define <8 x i32> @test5(<8 x i32> %v) {
+; CHECK-NOSSSE3-LABEL: test5:
+; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3-NEXT: pxor %xmm2, %xmm2
+; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm3
+; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: packuswb %xmm3, %xmm0
+; CHECK-NOSSSE3-NEXT: movdqa %xmm1, %xmm3
+; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: packuswb %xmm3, %xmm1
+; CHECK-NOSSSE3-NEXT: retq
+;
+; CHECK-SSSE3-LABEL: test5:
+; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; CHECK-SSSE3-NEXT: pshufb %xmm2, %xmm0
+; CHECK-SSSE3-NEXT: pshufb %xmm2, %xmm1
+; CHECK-SSSE3-NEXT: retq
+;
+; CHECK-AVX2-LABEL: test5:
+; CHECK-AVX2: # BB#0: # %entry
+; CHECK-AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28]
+; CHECK-AVX2-NEXT: retq
+;
+; CHECK-WIDE-AVX2-LABEL: test5:
+; CHECK-WIDE-AVX2: # BB#0: # %entry
+; CHECK-WIDE-AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28]
+; CHECK-WIDE-AVX2-NEXT: retq
entry:
%r = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> %v)
ret <8 x i32> %r
-
-; CHECK-SSSE3-LABEL: @test5
-; CHECK-SSSE3: pshufb
-; CHECK-SSSE3: pshufb
-; CHECK-SSSE3-NEXT: retq
-
-; CHECK-AVX2-LABEL: @test5
-; CHECK-AVX2: vpshufb
-; CHECK-AVX2-NEXT: retq
-
-; CHECK-WIDE-AVX2-LABEL: @test5
-; CHECK-WIDE-AVX2: vpshufb
-; CHECK-WIDE-AVX2-NEXT: retq
}
-define <4 x i64> @test6(<4 x i64> %v) #0 {
+define <4 x i64> @test6(<4 x i64> %v) {
+; CHECK-NOSSSE3-LABEL: test6:
+; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3-NEXT: pxor %xmm2, %xmm2
+; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm3
+; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; CHECK-NOSSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; CHECK-NOSSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: packuswb %xmm3, %xmm0
+; CHECK-NOSSSE3-NEXT: movdqa %xmm1, %xmm3
+; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; CHECK-NOSSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; CHECK-NOSSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: packuswb %xmm3, %xmm1
+; CHECK-NOSSSE3-NEXT: retq
+;
+; CHECK-SSSE3-LABEL: test6:
+; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
+; CHECK-SSSE3-NEXT: pshufb %xmm2, %xmm0
+; CHECK-SSSE3-NEXT: pshufb %xmm2, %xmm1
+; CHECK-SSSE3-NEXT: retq
+;
+; CHECK-AVX2-LABEL: test6:
+; CHECK-AVX2: # BB#0: # %entry
+; CHECK-AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
+; CHECK-AVX2-NEXT: retq
+;
+; CHECK-WIDE-AVX2-LABEL: test6:
+; CHECK-WIDE-AVX2: # BB#0: # %entry
+; CHECK-WIDE-AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
+; CHECK-WIDE-AVX2-NEXT: retq
entry:
%r = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %v)
ret <4 x i64> %r
-
-; CHECK-SSSE3-LABEL: @test6
-; CHECK-SSSE3: pshufb
-; CHECK-SSSE3: pshufb
-; CHECK-SSSE3-NEXT: retq
-
-; CHECK-AVX2-LABEL: @test6
-; CHECK-AVX2: vpshufb
-; CHECK-AVX2-NEXT: retq
-
-; CHECK-WIDE-AVX2-LABEL: @test6
-; CHECK-WIDE-AVX2: vpshufb
-; CHECK-WIDE-AVX2-NEXT: retq
}
declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>)
-define <4 x i16> @test7(<4 x i16> %v) #0 {
+define <4 x i16> @test7(<4 x i16> %v) {
+; CHECK-NOSSSE3-LABEL: test7:
+; CHECK-NOSSSE3: # BB#0: # %entry
+; CHECK-NOSSSE3-NEXT: pxor %xmm1, %xmm1
+; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm2
+; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; CHECK-NOSSSE3-NEXT: packuswb %xmm2, %xmm0
+; CHECK-NOSSSE3-NEXT: psrld $16, %xmm0
+; CHECK-NOSSSE3-NEXT: retq
+;
+; CHECK-SSSE3-LABEL: test7:
+; CHECK-SSSE3: # BB#0: # %entry
+; CHECK-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; CHECK-SSSE3-NEXT: psrld $16, %xmm0
+; CHECK-SSSE3-NEXT: retq
+;
+; CHECK-AVX2-LABEL: test7:
+; CHECK-AVX2: # BB#0: # %entry
+; CHECK-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; CHECK-AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: retq
+;
+; CHECK-WIDE-AVX2-LABEL: test7:
+; CHECK-WIDE-AVX2: # BB#0: # %entry
+; CHECK-WIDE-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; CHECK-WIDE-AVX2-NEXT: retq
entry:
%r = call <4 x i16> @llvm.bswap.v4i16(<4 x i16> %v)
ret <4 x i16> %r
-
-; CHECK-SSSE3-LABEL: @test7
-; CHECK-SSSE3: pshufb
-; CHECK-SSSE3: psrld $16
-; CHECK-SSSE3-NEXT: retq
-
-; CHECK-AVX2-LABEL: @test7
-; CHECK-AVX2: vpshufb
-; CHECK-AVX2: vpsrld $16
-; CHECK-AVX2-NEXT: retq
-
-; CHECK-WIDE-AVX2-LABEL: @test7
-; CHECK-WIDE-AVX2: vpshufb
-; CHECK-WIDE-AVX2-NEXT: retq
}
-
-attributes #0 = { nounwind uwtable }
-
diff --git a/test/CodeGen/X86/chain_order.ll b/test/CodeGen/X86/chain_order.ll
index c88726e..72e6f78 100644
--- a/test/CodeGen/X86/chain_order.ll
+++ b/test/CodeGen/X86/chain_order.ll
@@ -1,13 +1,13 @@
; RUN: llc < %s -mcpu=corei7-avx -mtriple=x86_64-linux | FileCheck %s
-;CHECK-LABEL: cftx020:
-;CHECK: vmovsd (%rdi), %xmm{{.*}}
-;CHECK: vmovsd 16(%rdi), %xmm{{.*}}
-;CHECK: vmovsd 24(%rdi), %xmm{{.*}}
-;CHECK: vmovhpd 8(%rdi), %xmm{{.*}}
-;CHECK: vmovupd %xmm{{.*}}, (%rdi)
-;CHECK: vmovupd %xmm{{.*}}, 16(%rdi)
-;CHECK: ret
+; CHECK-LABEL: cftx020:
+; CHECK: vmovsd (%rdi), %xmm{{.*}}
+; CHECK-NEXT: vmovsd 16(%rdi), %xmm{{.*}}
+; CHECK-NEXT: vmovhpd 24(%rdi), %xmm{{.*}}
+; CHECK-NEXT: vmovhpd 8(%rdi), %xmm{{.*}}
+; CHECK: vmovupd %xmm{{.*}}, (%rdi)
+; CHECK-NEXT: vmovupd %xmm{{.*}}, 16(%rdi)
+; CHECK: ret
; A test from pifft (after SLP-vectorization) that fails when we drop the chain on newly merged loads.
define void @cftx020(double* nocapture %a) {
diff --git a/test/CodeGen/X86/clobber-fi0.ll b/test/CodeGen/X86/clobber-fi0.ll
index 38a42db..4876c35 100644
--- a/test/CodeGen/X86/clobber-fi0.ll
+++ b/test/CodeGen/X86/clobber-fi0.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mcpu=generic -mtriple=x86_64-linux | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.7.0"
diff --git a/test/CodeGen/X86/cmov.ll b/test/CodeGen/X86/cmov.ll
index d38d2b4..355c6b4 100644
--- a/test/CodeGen/X86/cmov.ll
+++ b/test/CodeGen/X86/cmov.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -disable-cgp-select2branch | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-apple-darwin10 -disable-cgp-select2branch | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
define i32 @test1(i32 %x, i32 %n, i32 %w, i32* %vp) nounwind readnone {
diff --git a/test/CodeGen/X86/cmpxchg-clobber-flags.ll b/test/CodeGen/X86/cmpxchg-clobber-flags.ll
index 3cb8b97..b7995db 100644
--- a/test/CodeGen/X86/cmpxchg-clobber-flags.ll
+++ b/test/CodeGen/X86/cmpxchg-clobber-flags.ll
@@ -1,19 +1,21 @@
-; RUN: llc -mtriple=x86_64-linux-gnu %s -o - | FileCheck %s
-; RUN: llc -mtriple=x86_64-linux-gnu -pre-RA-sched=fast %s -o - | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=i386-linux-gnu %s -o - | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=i386-linux-gnu -pre-RA-sched=fast %s -o - | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=x86_64-linux-gnu %s -o - | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=x86_64-linux-gnu -pre-RA-sched=fast %s -o - | FileCheck %s
declare i32 @bar()
define i64 @test_intervening_call(i64* %foo, i64 %bar, i64 %baz) {
; CHECK-LABEL: test_intervening_call:
; CHECK: cmpxchg
-; CHECK: pushfq
-; CHECK: popq [[FLAGS:%.*]]
+; CHECK: pushf[[LQ:[lq]]]
+; CHECK-NEXT: pop[[LQ]] [[FLAGS:%.*]]
-; CHECK: callq bar
+; CHECK-NEXT: call[[LQ]] bar
-; CHECK: pushq [[FLAGS]]
-; CHECK: popfq
-; CHECK: jne
+; CHECK-NEXT: push[[LQ]] [[FLAGS]]
+; CHECK-NEXT: popf[[LQ]]
+; CHECK-NEXT: jne
%cx = cmpxchg i64* %foo, i64 %bar, i64 %baz seq_cst seq_cst
%p = extractvalue { i64, i1 } %cx, 1
call i32 @bar()
@@ -68,14 +70,13 @@ define i32 @test_feed_cmov(i32* %addr, i32 %desired, i32 %new) {
; CHECK-LABEL: test_feed_cmov:
; CHECK: cmpxchg
-; CHECK: pushfq
-; CHECK: popq [[FLAGS:%.*]]
-
-; CHECK: callq bar
+; CHECK: pushf[[LQ:[lq]]]
+; CHECK-NEXT: pop[[LQ]] [[FLAGS:%.*]]
-; CHECK: pushq [[FLAGS]]
-; CHECK: popfq
+; CHECK-NEXT: call[[LQ]] bar
+; CHECK-NEXT: push[[LQ]] [[FLAGS]]
+; CHECK-NEXT: popf[[LQ]]
%res = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
%success = extractvalue { i32, i1 } %res, 1
diff --git a/test/CodeGen/X86/coalesce_commute_subreg.ll b/test/CodeGen/X86/coalesce_commute_subreg.ll
new file mode 100644
index 0000000..8d0a20c
--- /dev/null
+++ b/test/CodeGen/X86/coalesce_commute_subreg.ll
@@ -0,0 +1,51 @@
+; RUN: llc -mtriple="x86_64-apple-darwin" -o - -verify-machineinstrs %s
+
+define void @make_wanted() #0 {
+entry:
+ br i1 undef, label %for.end20, label %for.cond1.preheader.lr.ph
+
+for.cond1.preheader.lr.ph:
+ br label %for.body3
+
+for.body3:
+ %cmp20.i = icmp eq i32 undef, 0
+ %.col.057 = select i1 %cmp20.i, i32 0, i32 undef
+ br i1 undef, label %while.cond.i, label %for.body5.lr.ph.i
+
+for.body5.lr.ph.i:
+ %0 = sext i32 %.col.057 to i64
+ %1 = sub i32 0, %.col.057
+ %2 = zext i32 %1 to i64
+ %3 = add nuw nsw i64 %2, 1
+ %n.vec110 = and i64 %3, 8589934588
+ %end.idx.rnd.down111 = add nsw i64 %n.vec110, %0
+ br i1 undef, label %middle.block105, label %vector.ph103
+
+vector.ph103:
+ br i1 undef, label %middle.block105, label %vector.body104
+
+vector.body104:
+ %4 = icmp eq i64 undef, %end.idx.rnd.down111
+ br i1 %4, label %middle.block105, label %vector.body104
+
+middle.block105:
+ %resume.val114 = phi i64 [ %0, %for.body5.lr.ph.i ], [ %end.idx.rnd.down111, %vector.body104 ], [ %end.idx.rnd.down111, %vector.ph103 ]
+ %cmp.n116 = icmp eq i64 undef, %resume.val114
+ br i1 %cmp.n116, label %while.cond.i, label %for.body5.i.preheader
+
+for.body5.i.preheader:
+ %lcmp.or182 = or i1 undef, undef
+ br i1 %lcmp.or182, label %for.body5.i.prol, label %while.cond.i
+
+for.body5.i.prol:
+ br i1 undef, label %for.body5.i.prol, label %while.cond.i
+
+while.cond.i:
+ br i1 undef, label %while.cond.i, label %if.then
+
+if.then:
+ br label %for.body3
+
+for.end20:
+ ret void
+}
diff --git a/test/CodeGen/X86/coalescer-dce.ll b/test/CodeGen/X86/coalescer-dce.ll
index 7f72e3d..208d706 100644
--- a/test/CodeGen/X86/coalescer-dce.ll
+++ b/test/CodeGen/X86/coalescer-dce.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -disable-fp-elim -disable-machine-dce -verify-coalescing
+; RUN: llc < %s -verify-machineinstrs -disable-fp-elim -disable-machine-dce -verify-coalescing
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-macosx10.7.0"
diff --git a/test/CodeGen/X86/codegen-prepare-extload.ll b/test/CodeGen/X86/codegen-prepare-extload.ll
index 9320706..9b27c33 100644
--- a/test/CodeGen/X86/codegen-prepare-extload.ll
+++ b/test/CodeGen/X86/codegen-prepare-extload.ll
@@ -1,12 +1,21 @@
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-win64 | FileCheck %s
-; rdar://7304838
+; RUN: opt -codegenprepare < %s -mtriple=x86_64-apple-macosx -S | FileCheck %s --check-prefix=OPTALL --check-prefix=OPT --check-prefix=NONSTRESS
+; RUN: opt -codegenprepare < %s -mtriple=x86_64-apple-macosx -S -stress-cgp-ext-ld-promotion | FileCheck %s --check-prefix=OPTALL --check-prefix=OPT --check-prefix=STRESS
+; RUN: opt -codegenprepare < %s -mtriple=x86_64-apple-macosx -S -disable-cgp-ext-ld-promotion | FileCheck %s --check-prefix=OPTALL --check-prefix=DISABLE
+; rdar://7304838
; CodeGenPrepare should move the zext into the block with the load
; so that SelectionDAG can select it with the load.
-
+;
+; CHECK-LABEL: foo:
; CHECK: movsbl ({{%rdi|%rcx}}), %eax
-
+;
+; OPTALL-LABEL: @foo
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
+; OPTALL: store i32 [[ZEXT]], i32* %q
+; OPTALL: ret
define void @foo(i8* %p, i32* %q) {
entry:
%t = load i8* %p
@@ -19,3 +28,336 @@ true:
false:
ret void
}
+
+; Check that we manage to form a zextload is an operation with only one
+; argument to explicitly extend is in the the way.
+; OPTALL-LABEL: @promoteOneArg
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPT-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
+; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXT]], 2
+; Make sure the operation is not promoted when the promotion pass is disabled.
+; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i8 [[LD]], 2
+; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32
+; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: ret
+define void @promoteOneArg(i8* %p, i32* %q) {
+entry:
+ %t = load i8* %p
+ %add = add nuw i8 %t, 2
+ %a = icmp slt i8 %t, 20
+ br i1 %a, label %true, label %false
+true:
+ %s = zext i8 %add to i32
+ store i32 %s, i32* %q
+ ret void
+false:
+ ret void
+}
+
+; Check that we manage to form a sextload is an operation with only one
+; argument to explicitly extend is in the the way.
+; Version with sext.
+; OPTALL-LABEL: @promoteOneArgSExt
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPT-NEXT: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i8 [[LD]] to i32
+; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i32 [[SEXT]], 2
+; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i8 [[LD]], 2
+; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = sext i8 [[ADD]] to i32
+; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: ret
+define void @promoteOneArgSExt(i8* %p, i32* %q) {
+entry:
+ %t = load i8* %p
+ %add = add nsw i8 %t, 2
+ %a = icmp slt i8 %t, 20
+ br i1 %a, label %true, label %false
+true:
+ %s = sext i8 %add to i32
+ store i32 %s, i32* %q
+ ret void
+false:
+ ret void
+}
+
+; Check that we manage to form a zextload is an operation with two
+; arguments to explicitly extend is in the the way.
+; Extending %add will create two extensions:
+; 1. One for %b.
+; 2. One for %t.
+; #1 will not be removed as we do not know anything about %b.
+; #2 may not be merged with the load because %t is used in a comparison.
+; Since two extensions may be emitted in the end instead of one before the
+; transformation, the regular heuristic does not apply the optimization.
+;
+; OPTALL-LABEL: @promoteTwoArgZext
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+;
+; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
+; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i8 %b to i32
+; STRESS-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXTLD]], [[ZEXTB]]
+;
+; NONSTRESS: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i8 [[LD]], %b
+; NONSTRESS: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32
+;
+; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i8 [[LD]], %b
+; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32
+;
+; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: ret
+define void @promoteTwoArgZext(i8* %p, i32* %q, i8 %b) {
+entry:
+ %t = load i8* %p
+ %add = add nuw i8 %t, %b
+ %a = icmp slt i8 %t, 20
+ br i1 %a, label %true, label %false
+true:
+ %s = zext i8 %add to i32
+ store i32 %s, i32* %q
+ ret void
+false:
+ ret void
+}
+
+; Check that we manage to form a sextload is an operation with two
+; arguments to explicitly extend is in the the way.
+; Version with sext.
+; OPTALL-LABEL: @promoteTwoArgSExt
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+;
+; STRESS-NEXT: [[SEXTLD:%[a-zA-Z_0-9-]+]] = sext i8 [[LD]] to i32
+; STRESS-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i8 %b to i32
+; STRESS-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i32 [[SEXTLD]], [[SEXTB]]
+;
+; NONSTRESS: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i8 [[LD]], %b
+; NONSTRESS: [[RES:%[a-zA-Z_0-9-]+]] = sext i8 [[ADD]] to i32
+;
+; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i8 [[LD]], %b
+; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = sext i8 [[ADD]] to i32
+; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: ret
+define void @promoteTwoArgSExt(i8* %p, i32* %q, i8 %b) {
+entry:
+ %t = load i8* %p
+ %add = add nsw i8 %t, %b
+ %a = icmp slt i8 %t, 20
+ br i1 %a, label %true, label %false
+true:
+ %s = sext i8 %add to i32
+ store i32 %s, i32* %q
+ ret void
+false:
+ ret void
+}
+
+; Check that we do not a zextload if we need to introduce more than
+; one additional extension.
+; OPTALL-LABEL: @promoteThreeArgZext
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+;
+; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
+; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i8 %b to i32
+; STRESS-NEXT: [[TMP:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXTLD]], [[ZEXTB]]
+; STRESS-NEXT: [[ZEXTC:%[a-zA-Z_0-9-]+]] = zext i8 %c to i32
+; STRESS-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw i32 [[TMP]], [[ZEXTC]]
+;
+; NONSTRESS-NEXT: [[TMP:%[a-zA-Z_0-9-]+]] = add nuw i8 [[LD]], %b
+; NONSTRESS-NEXT: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i8 [[TMP]], %c
+; NONSTRESS: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32
+;
+; DISABLE: add nuw i8
+; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i8
+; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32
+;
+; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: ret
+define void @promoteThreeArgZext(i8* %p, i32* %q, i8 %b, i8 %c) {
+entry:
+ %t = load i8* %p
+ %tmp = add nuw i8 %t, %b
+ %add = add nuw i8 %tmp, %c
+ %a = icmp slt i8 %t, 20
+ br i1 %a, label %true, label %false
+true:
+ %s = zext i8 %add to i32
+ store i32 %s, i32* %q
+ ret void
+false:
+ ret void
+}
+
+; Check that we manage to form a zextload after promoting and merging
+; two extensions.
+; OPTALL-LABEL: @promoteMergeExtArgZExt
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+;
+; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
+; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i16 %b to i32
+; STRESS-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXTLD]], [[ZEXTB]]
+;
+; NONSTRESS: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i16
+; NONSTRESS: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i16 [[ZEXTLD]], %b
+; NONSTRESS: [[RES:%[a-zA-Z_0-9-]+]] = zext i16 [[ADD]] to i32
+;
+; DISABLE: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i16
+; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i16 [[ZEXTLD]], %b
+; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i16 [[ADD]] to i32
+;
+; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: ret
+define void @promoteMergeExtArgZExt(i8* %p, i32* %q, i16 %b) {
+entry:
+ %t = load i8* %p
+ %ext = zext i8 %t to i16
+ %add = add nuw i16 %ext, %b
+ %a = icmp slt i8 %t, 20
+ br i1 %a, label %true, label %false
+true:
+ %s = zext i16 %add to i32
+ store i32 %s, i32* %q
+ ret void
+false:
+ ret void
+}
+
+; Check that we manage to form a sextload after promoting and merging
+; two extensions.
+; Version with sext.
+; OPTALL-LABEL: @promoteMergeExtArgSExt
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+;
+; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
+; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = sext i16 %b to i32
+; STRESS-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i32 [[ZEXTLD]], [[ZEXTB]]
+;
+; NONSTRESS: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i16
+; NONSTRESS: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i16 [[ZEXTLD]], %b
+; NONSTRESS: [[RES:%[a-zA-Z_0-9-]+]] = sext i16 [[ADD]] to i32
+;
+; DISABLE: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i16
+; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i16 [[ZEXTLD]], %b
+; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = sext i16 [[ADD]] to i32
+; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: ret
+define void @promoteMergeExtArgSExt(i8* %p, i32* %q, i16 %b) {
+entry:
+ %t = load i8* %p
+ %ext = zext i8 %t to i16
+ %add = add nsw i16 %ext, %b
+ %a = icmp slt i8 %t, 20
+ br i1 %a, label %true, label %false
+true:
+ %s = sext i16 %add to i32
+ store i32 %s, i32* %q
+ ret void
+false:
+ ret void
+}
+
+; Check that we manage to catch all the extload opportunities that are exposed
+; by the different iterations of codegen prepare.
+; Moreover, check that we do not promote more than we need to.
+; Here is what is happening in this test (not necessarly in this order):
+; 1. We try to promote the operand of %sextadd.
+; a. This creates one sext of %ld2 and one of %zextld
+; b. The sext of %ld2 can be combine with %ld2, so we remove one sext but
+; introduced one. This is fine with the current heuristic: neutral.
+; => We have one zext of %zextld left and we created one sext of %ld2.
+; 2. We try to promote the operand of %sextaddza.
+; a. This creates one sext of %zexta and one of %zextld
+; b. The sext of %zexta does not lead to any load, it stays here, even if it
+; could have been combine with the zext of %a.
+; c. The sext of %zextld leads to %ld and can be combined with it. This is
+; done by promoting %zextld. This is fine with the current heuristic:
+; neutral.
+; => We have created a new zext of %ld and we created one sext of %zexta.
+; 3. We try to promote the operand of %sextaddb.
+; a. This creates one sext of %b and one of %zextld
+; b. The sext of %b is a dead-end, nothing to be done.
+; c. Same thing as 2.c. happens.
+; => We have created a new zext of %ld and we created one sext of %b.
+; 4. We try to promote the operand of the zext of %zextld introduced in #1.
+; a. Same thing as 2.c. happens.
+; b. %zextld does not have any other uses. It is dead coded.
+; => We have created a new zext of %ld and we removed a zext of %zextld and
+; a zext of %ld.
+; Currently we do not try to reuse existing extensions, so in the end we have
+; 3 identical zext of %ld. The extensions will be CSE'ed by SDag.
+;
+; OPTALL-LABEL: @severalPromotions
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %addr1
+; OPT-NEXT: [[ZEXTLD1_1:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
+; OPT-NEXT: [[ZEXTLD1_2:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
+; OPT-NEXT: [[ZEXTLD1_3:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
+; OPT-NEXT: [[LD2:%[a-zA-Z_0-9-]+]] = load i32* %addr2
+; OPT-NEXT: [[SEXTLD2:%[a-zA-Z_0-9-]+]] = sext i32 [[LD2]] to i64
+; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTLD2]], [[ZEXTLD1_1]]
+; We do not combine this one: see 2.b.
+; OPT-NEXT: [[ZEXTA:%[a-zA-Z_0-9-]+]] = zext i8 %a to i32
+; OPT-NEXT: [[SEXTZEXTA:%[a-zA-Z_0-9-]+]] = sext i32 [[ZEXTA]] to i64
+; OPT-NEXT: [[RESZA:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTZEXTA]], [[ZEXTLD1_3]]
+; OPT-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i32 %b to i64
+; OPT-NEXT: [[RESB:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTB]], [[ZEXTLD1_2]]
+;
+; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i32
+; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = sext i32 [[ADD]] to i64
+; DISABLE: [[ADDZA:%[a-zA-Z_0-9-]+]] = add nsw i32
+; DISABLE: [[RESZA:%[a-zA-Z_0-9-]+]] = sext i32 [[ADDZA]] to i64
+; DISABLE: [[ADDB:%[a-zA-Z_0-9-]+]] = add nsw i32
+; DISABLE: [[RESB:%[a-zA-Z_0-9-]+]] = sext i32 [[ADDB]] to i64
+;
+; OPTALL: call void @dummy(i64 [[RES]], i64 [[RESZA]], i64 [[RESB]])
+; OPTALL: ret
+define void @severalPromotions(i8* %addr1, i32* %addr2, i8 %a, i32 %b) {
+ %ld = load i8* %addr1
+ %zextld = zext i8 %ld to i32
+ %ld2 = load i32* %addr2
+ %add = add nsw i32 %ld2, %zextld
+ %sextadd = sext i32 %add to i64
+ %zexta = zext i8 %a to i32
+ %addza = add nsw i32 %zexta, %zextld
+ %sextaddza = sext i32 %addza to i64
+ %addb = add nsw i32 %b, %zextld
+ %sextaddb = sext i32 %addb to i64
+ call void @dummy(i64 %sextadd, i64 %sextaddza, i64 %sextaddb)
+ ret void
+}
+
+declare void @dummy(i64, i64, i64)
+
+; Make sure we do not try to promote vector types since the type promotion
+; helper does not support them for now.
+; OPTALL-LABEL: @vectorPromotion
+; OPTALL: [[SHL:%[a-zA-Z_0-9-]+]] = shl nuw nsw <2 x i32> zeroinitializer, <i32 8, i32 8>
+; OPTALL: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext <2 x i32> [[SHL]] to <2 x i64>
+; OPTALL: ret
+define void @vectorPromotion() {
+entry:
+ %a = shl nuw nsw <2 x i32> zeroinitializer, <i32 8, i32 8>
+ %b = zext <2 x i32> %a to <2 x i64>
+ ret void
+}
+
+@a = common global i32 0, align 4
+@c = common global [2 x i32] zeroinitializer, align 4
+
+; PR21978.
+; Make sure we support promotion of operands that produces a Value as opposed
+; to an instruction.
+; This used to cause a crash.
+; OPTALL-LABEL: @promotionOfArgEndsUpInValue
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i16* %addr
+
+; OPT-NEXT: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i16 [[LD]] to i32
+; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw nsw i32 [[SEXT]], zext (i1 icmp ne (i32* getelementptr inbounds ([2 x i32]* @c, i64 0, i64 1), i32* @a) to i32)
+;
+; DISABLE-NEXT: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw nsw i16 [[LD]], zext (i1 icmp ne (i32* getelementptr inbounds ([2 x i32]* @c, i64 0, i64 1), i32* @a) to i16)
+; DISABLE-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = sext i16 [[ADD]] to i32
+;
+; OPTALL-NEXT: ret i32 [[RES]]
+define i32 @promotionOfArgEndsUpInValue(i16* %addr) {
+entry:
+ %val = load i16* %addr
+ %add = add nuw nsw i16 %val, zext (i1 icmp ne (i32* getelementptr inbounds ([2 x i32]* @c, i64 0, i64 1), i32* @a) to i16)
+ %conv3 = sext i16 %add to i32
+ ret i32 %conv3
+}
diff --git a/test/CodeGen/X86/coff-comdat.ll b/test/CodeGen/X86/coff-comdat.ll
index ac4546d..44e1cb2 100644
--- a/test/CodeGen/X86/coff-comdat.ll
+++ b/test/CodeGen/X86/coff-comdat.ll
@@ -1,58 +1,58 @@
; RUN: llc -mtriple i386-pc-win32 < %s | FileCheck %s
$f1 = comdat any
-@v1 = global i32 0, comdat $f1
-define void @f1() comdat $f1 {
+@v1 = global i32 0, comdat($f1)
+define void @f1() comdat($f1) {
ret void
}
$f2 = comdat exactmatch
-@v2 = global i32 0, comdat $f2
-define void @f2() comdat $f2 {
+@v2 = global i32 0, comdat($f2)
+define void @f2() comdat($f2) {
ret void
}
$f3 = comdat largest
-@v3 = global i32 0, comdat $f3
-define void @f3() comdat $f3 {
+@v3 = global i32 0, comdat($f3)
+define void @f3() comdat($f3) {
ret void
}
$f4 = comdat noduplicates
-@v4 = global i32 0, comdat $f4
-define void @f4() comdat $f4 {
+@v4 = global i32 0, comdat($f4)
+define void @f4() comdat($f4) {
ret void
}
$f5 = comdat samesize
-@v5 = global i32 0, comdat $f5
-define void @f5() comdat $f5 {
+@v5 = global i32 0, comdat($f5)
+define void @f5() comdat($f5) {
ret void
}
$f6 = comdat samesize
-@v6 = global i32 0, comdat $f6
-@f6 = global i32 0, comdat $f6
+@v6 = global i32 0, comdat($f6)
+@f6 = global i32 0, comdat($f6)
$"\01@f7@0" = comdat any
-define x86_fastcallcc void @"\01@v7@0"() comdat $"\01@f7@0" {
+define x86_fastcallcc void @"\01@v7@0"() comdat($"\01@f7@0") {
ret void
}
-define x86_fastcallcc void @"\01@f7@0"() comdat $"\01@f7@0" {
+define x86_fastcallcc void @"\01@f7@0"() comdat($"\01@f7@0") {
ret void
}
$f8 = comdat any
-define x86_fastcallcc void @v8() comdat $f8 {
+define x86_fastcallcc void @v8() comdat($f8) {
ret void
}
-define x86_fastcallcc void @f8() comdat $f8 {
+define x86_fastcallcc void @f8() comdat($f8) {
ret void
}
$vftable = comdat largest
-@some_name = private unnamed_addr constant [2 x i8*] zeroinitializer, comdat $vftable
+@some_name = private unnamed_addr constant [2 x i8*] zeroinitializer, comdat($vftable)
@vftable = alias getelementptr([2 x i8*]* @some_name, i32 0, i32 1)
; CHECK: .section .text,"xr",discard,_f1
@@ -73,20 +73,20 @@ $vftable = comdat largest
; CHECK: .globl @v8@0
; CHECK: .section .text,"xr",discard,@f8@0
; CHECK: .globl @f8@0
-; CHECK: .section .bss,"wb",associative,_f1
+; CHECK: .section .bss,"bw",associative,_f1
; CHECK: .globl _v1
-; CHECK: .section .bss,"wb",associative,_f2
+; CHECK: .section .bss,"bw",associative,_f2
; CHECK: .globl _v2
-; CHECK: .section .bss,"wb",associative,_f3
+; CHECK: .section .bss,"bw",associative,_f3
; CHECK: .globl _v3
-; CHECK: .section .bss,"wb",associative,_f4
+; CHECK: .section .bss,"bw",associative,_f4
; CHECK: .globl _v4
-; CHECK: .section .bss,"wb",associative,_f5
+; CHECK: .section .bss,"bw",associative,_f5
; CHECK: .globl _v5
-; CHECK: .section .bss,"wb",associative,_f6
+; CHECK: .section .bss,"bw",associative,_f6
; CHECK: .globl _v6
-; CHECK: .section .bss,"wb",same_size,_f6
+; CHECK: .section .bss,"bw",same_size,_f6
; CHECK: .globl _f6
-; CHECK: .section .rdata,"rd",largest,_vftable
+; CHECK: .section .rdata,"dr",largest,_vftable
; CHECK: .globl _vftable
; CHECK: _vftable = L_some_name+4
diff --git a/test/CodeGen/X86/coff-comdat2.ll b/test/CodeGen/X86/coff-comdat2.ll
index 58bc04e..a417d09 100644
--- a/test/CodeGen/X86/coff-comdat2.ll
+++ b/test/CodeGen/X86/coff-comdat2.ll
@@ -5,5 +5,5 @@ target triple = "i686-pc-windows-msvc"
$foo = comdat largest
@foo = global i32 0
-@bar = global i32 0, comdat $foo
+@bar = global i32 0, comdat($foo)
; CHECK: Associative COMDAT symbol 'foo' is not a key for its COMDAT.
diff --git a/test/CodeGen/X86/coff-comdat3.ll b/test/CodeGen/X86/coff-comdat3.ll
index 76e464b..01651ce 100644
--- a/test/CodeGen/X86/coff-comdat3.ll
+++ b/test/CodeGen/X86/coff-comdat3.ll
@@ -4,5 +4,5 @@ target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
target triple = "i686-pc-windows-msvc"
$foo = comdat largest
-@bar = global i32 0, comdat $foo
+@bar = global i32 0, comdat($foo)
; CHECK: Associative COMDAT symbol 'foo' does not exist.
diff --git a/test/CodeGen/X86/combine-and.ll b/test/CodeGen/X86/combine-and.ll
index 59a7a19..bb46ac5 100644
--- a/test/CodeGen/X86/combine-and.ll
+++ b/test/CodeGen/X86/combine-and.ll
@@ -6,159 +6,173 @@
define <4 x i32> @test1(<4 x i32> %A) {
+; CHECK-LABEL: test1:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 -1, i32 0, i32 0, i32 0>
ret <4 x i32> %1
}
-; CHECK-LABEL: test1
-; CHECK: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; CHECK-NEXT: retq
-
define <4 x i32> @test2(<4 x i32> %A) {
+; CHECK-LABEL: test2:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 0>
ret <4 x i32> %1
}
-; CHECK-LABEL: test2
-; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test3(<4 x i32> %A) {
+; CHECK-LABEL: test3:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 0, i32 0, i32 -1, i32 0>
ret <4 x i32> %1
}
-; CHECK-LABEL: test3
-; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test4(<4 x i32> %A) {
+; CHECK-LABEL: test4:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 0, i32 0, i32 0, i32 -1>
ret <4 x i32> %1
}
-; CHECK-LABEL: test4
-; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test5(<4 x i32> %A) {
+; CHECK-LABEL: test5:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 0>
ret <4 x i32> %1
}
-; CHECK-LABEL: test5
-; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test6(<4 x i32> %A) {
+; CHECK-LABEL: test6:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 -1>
ret <4 x i32> %1
}
-; CHECK-LABEL: test6
-; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test7(<4 x i32> %A) {
+; CHECK-LABEL: test7:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 0, i32 0, i32 -1, i32 -1>
ret <4 x i32> %1
}
-; CHECK-LABEL: test7
-; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test8(<4 x i32> %A) {
+; CHECK-LABEL: test8:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 -1, i32 0, i32 0, i32 -1>
ret <4 x i32> %1
}
-; CHECK-LABEL: test8
-; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test9(<4 x i32> %A) {
+; CHECK-LABEL: test9:
+; CHECK: # BB#0:
+; CHECK-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 0, i32 0>
ret <4 x i32> %1
}
-; CHECK-LABEL: test9
-; CHECK: movq %xmm0, %xmm0
-; CHECK-NEXT: retq
-
define <4 x i32> @test10(<4 x i32> %A) {
+; CHECK-LABEL: test10:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 0, i32 -1, i32 -1, i32 0>
ret <4 x i32> %1
}
-; CHECK-LABEL: test10
-; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test11(<4 x i32> %A) {
+; CHECK-LABEL: test11:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 0, i32 -1, i32 -1, i32 -1>
ret <4 x i32> %1
}
-; CHECK-LABEL: test11
-; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test12(<4 x i32> %A) {
+; CHECK-LABEL: test12:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 -1, i32 0>
ret <4 x i32> %1
}
-; CHECK-LABEL: test12
-; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test13(<4 x i32> %A) {
+; CHECK-LABEL: test13:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 0, i32 -1>
ret <4 x i32> %1
}
-; CHECK-LABEL: test13
-; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test14(<4 x i32> %A) {
+; CHECK-LABEL: test14:
+; CHECK: # BB#0:
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 -1>
ret <4 x i32> %1
}
-; CHECK-LABEL: test14
-; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test15(<4 x i32> %A, <4 x i32> %B) {
+; CHECK-LABEL: test15:
+; CHECK: # BB#0:
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 -1>
%2 = and <4 x i32> %B, <i32 0, i32 -1, i32 0, i32 0>
%3 = or <4 x i32> %1, %2
ret <4 x i32> %3
}
-; CHECK-LABEL: test15
-; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test16(<4 x i32> %A, <4 x i32> %B) {
+; CHECK-LABEL: test16:
+; CHECK: # BB#0:
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 0>
%2 = and <4 x i32> %B, <i32 0, i32 -1, i32 0, i32 -1>
%3 = or <4 x i32> %1, %2
ret <4 x i32> %3
}
-; CHECK-LABEL: test16
-; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
-; CHECK-NEXT: retq
-
define <4 x i32> @test17(<4 x i32> %A, <4 x i32> %B) {
+; CHECK-LABEL: test17:
+; CHECK: # BB#0:
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
+; CHECK-NEXT: retq
%1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 -1>
%2 = and <4 x i32> %B, <i32 -1, i32 0, i32 -1, i32 0>
%3 = or <4 x i32> %1, %2
ret <4 x i32> %3
}
-; CHECK-LABEL: test17
-; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
-; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/combine-or.ll b/test/CodeGen/X86/combine-or.ll
index 9539eae..8a0ffc1 100644
--- a/test/CodeGen/X86/combine-or.ll
+++ b/test/CodeGen/X86/combine-or.ll
@@ -153,7 +153,8 @@ define <4 x i32> @test12(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test13(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test13:
; CHECK: # BB#0:
-; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[2,3]
+; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 1, i32 1, i32 4, i32 4>
%shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3>
@@ -177,8 +178,9 @@ define <2 x i64> @test14(<2 x i64> %a, <2 x i64> %b) {
define <4 x i32> @test15(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test15:
; CHECK: # BB#0:
-; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,1],xmm0[2,1]
-; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,2,1]
+; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,1,2,3]
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 1>
%shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 2, i32 1, i32 4, i32 4>
@@ -206,12 +208,9 @@ define <2 x i64> @test16(<2 x i64> %a, <2 x i64> %b) {
define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test17:
; CHECK: # BB#0:
-; CHECK-NEXT: xorps %xmm2, %xmm2
-; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,0]
-; CHECK-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[0,2]
-; CHECK-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
-; CHECK-NEXT: orps %xmm1, %xmm2
-; CHECK-NEXT: movaps %xmm2, %xmm0
+; CHECK-NEXT: psllq $32, %xmm0
+; CHECK-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
+; CHECK-NEXT: por %xmm1, %xmm0
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 4, i32 2>
%shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 1, i32 4, i32 4>
@@ -223,10 +222,10 @@ define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test18:
; CHECK: # BB#0:
-; CHECK-NEXT: xorps %xmm2, %xmm2
-; CHECK-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
+; CHECK-NEXT: pxor %xmm2, %xmm2
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
-; CHECK-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
+; CHECK-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
; CHECK-NEXT: por %xmm1, %xmm0
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 4, i32 4>
@@ -239,14 +238,12 @@ define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test19:
; CHECK: # BB#0:
-; CHECK-NEXT: xorps %xmm2, %xmm2
-; CHECK-NEXT: xorps %xmm3, %xmm3
-; CHECK-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0],xmm0[0,3]
-; CHECK-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3]
-; CHECK-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[0,0]
-; CHECK-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[2,2]
-; CHECK-NEXT: orps %xmm3, %xmm2
-; CHECK-NEXT: movaps %xmm2, %xmm0
+; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,2,3]
+; CHECK-NEXT: pxor %xmm3, %xmm3
+; CHECK-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,2,2]
+; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5,6,7]
+; CHECK-NEXT: por %xmm2, %xmm0
; CHECK-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 4, i32 3>
%shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 2, i32 2>
@@ -258,8 +255,8 @@ define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
define <2 x i64> @test20(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test20:
; CHECK: # BB#0:
-; CHECK-NEXT: orps %xmm1, %xmm0
-; CHECK-NEXT: movq %xmm0, %xmm0
+; CHECK-NEXT: por %xmm1, %xmm0
+; CHECK-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; CHECK-NEXT: retq
%shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
%shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
@@ -271,9 +268,8 @@ define <2 x i64> @test20(<2 x i64> %a, <2 x i64> %b) {
define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test21:
; CHECK: # BB#0:
-; CHECK-NEXT: orps %xmm1, %xmm0
-; CHECK-NEXT: movq %xmm0, %xmm0
-; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; CHECK-NEXT: por %xmm1, %xmm0
+; CHECK-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
; CHECK-NEXT: retq
%shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 0>
%shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 0>
diff --git a/test/CodeGen/X86/commute-clmul.ll b/test/CodeGen/X86/commute-clmul.ll
new file mode 100644
index 0000000..fe3e556
--- /dev/null
+++ b/test/CodeGen/X86/commute-clmul.ll
@@ -0,0 +1,60 @@
+; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+sse2,+pclmul < %s | FileCheck %s --check-prefix=SSE
+; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+avx2,+pclmul < %s | FileCheck %s --check-prefix=AVX
+
+declare <2 x i64> @llvm.x86.pclmulqdq(<2 x i64>, <2 x i64>, i8) nounwind readnone
+
+define <2 x i64> @commute_lq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
+ ;SSE-LABEL: commute_lq_lq
+ ;SSE: pclmulqdq $0, (%rdi), %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_lq_lq
+ ;AVX: vpclmulqdq $0, (%rdi), %xmm0, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <2 x i64>* %a0
+ %2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 0)
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @commute_lq_hq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
+ ;SSE-LABEL: commute_lq_hq
+ ;SSE: pclmulqdq $1, (%rdi), %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_lq_hq
+ ;AVX: vpclmulqdq $1, (%rdi), %xmm0, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <2 x i64>* %a0
+ %2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 16)
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @commute_hq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
+ ;SSE-LABEL: commute_hq_lq
+ ;SSE: pclmulqdq $16, (%rdi), %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_hq_lq
+ ;AVX: vpclmulqdq $16, (%rdi), %xmm0, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <2 x i64>* %a0
+ %2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 1)
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @commute_hq_hq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
+ ;SSE-LABEL: commute_hq_hq
+ ;SSE: pclmulqdq $17, (%rdi), %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_hq_hq
+ ;AVX: vpclmulqdq $17, (%rdi), %xmm0, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <2 x i64>* %a0
+ %2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 17)
+ ret <2 x i64> %2
+}
diff --git a/test/CodeGen/X86/commute-fcmp.ll b/test/CodeGen/X86/commute-fcmp.ll
new file mode 100644
index 0000000..0d7f2af
--- /dev/null
+++ b/test/CodeGen/X86/commute-fcmp.ll
@@ -0,0 +1,340 @@
+; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE
+; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX
+
+;
+; Float Comparisons
+; Only equal/not-equal/ordered/unordered can be safely commuted
+;
+
+define <4 x i32> @commute_cmpps_eq(<4 x float>* %a0, <4 x float> %a1) #0 {
+ ;SSE-LABEL: commute_cmpps_eq
+ ;SSE: cmpeqps (%rdi), %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_cmpps_eq
+ ;AVX: vcmpeqps (%rdi), %xmm0, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <4 x float>* %a0
+ %2 = fcmp oeq <4 x float> %1, %a1
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <4 x i32> @commute_cmpps_ne(<4 x float>* %a0, <4 x float> %a1) #0 {
+ ;SSE-LABEL: commute_cmpps_ne
+ ;SSE: cmpneqps (%rdi), %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_cmpps_ne
+ ;AVX: vcmpneqps (%rdi), %xmm0, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <4 x float>* %a0
+ %2 = fcmp une <4 x float> %1, %a1
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <4 x i32> @commute_cmpps_ord(<4 x float>* %a0, <4 x float> %a1) #0 {
+ ;SSE-LABEL: commute_cmpps_ord
+ ;SSE: cmpordps (%rdi), %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_cmpps_ord
+ ;AVX: vcmpordps (%rdi), %xmm0, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <4 x float>* %a0
+ %2 = fcmp ord <4 x float> %1, %a1
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <4 x i32> @commute_cmpps_uno(<4 x float>* %a0, <4 x float> %a1) #0 {
+ ;SSE-LABEL: commute_cmpps_uno
+ ;SSE: cmpunordps (%rdi), %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_cmpps_uno
+ ;AVX: vcmpunordps (%rdi), %xmm0, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <4 x float>* %a0
+ %2 = fcmp uno <4 x float> %1, %a1
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <4 x i32> @commute_cmpps_lt(<4 x float>* %a0, <4 x float> %a1) #0 {
+ ;SSE-LABEL: commute_cmpps_lt
+ ;SSE: movaps (%rdi), %xmm1
+ ;SSE-NEXT: cmpltps %xmm0, %xmm1
+ ;SSE-NEXT: movaps %xmm1, %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_cmpps_lt
+ ;AVX: vmovaps (%rdi), %xmm1
+ ;AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <4 x float>* %a0
+ %2 = fcmp olt <4 x float> %1, %a1
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <4 x i32> @commute_cmpps_le(<4 x float>* %a0, <4 x float> %a1) #0 {
+ ;SSE-LABEL: commute_cmpps_le
+ ;SSE: movaps (%rdi), %xmm1
+ ;SSE-NEXT: cmpleps %xmm0, %xmm1
+ ;SSE-NEXT: movaps %xmm1, %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_cmpps_le
+ ;AVX: vmovaps (%rdi), %xmm1
+ ;AVX-NEXT: vcmpleps %xmm0, %xmm1, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <4 x float>* %a0
+ %2 = fcmp ole <4 x float> %1, %a1
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <8 x i32> @commute_cmpps_eq_ymm(<8 x float>* %a0, <8 x float> %a1) #0 {
+ ;AVX-LABEL: commute_cmpps_eq_ymm
+ ;AVX: vcmpeqps (%rdi), %ymm0, %ymm0
+ ;AVX-NEXT: retq
+
+ %1 = load <8 x float>* %a0
+ %2 = fcmp oeq <8 x float> %1, %a1
+ %3 = sext <8 x i1> %2 to <8 x i32>
+ ret <8 x i32> %3
+}
+
+define <8 x i32> @commute_cmpps_ne_ymm(<8 x float>* %a0, <8 x float> %a1) #0 {
+ ;AVX-LABEL: commute_cmpps_ne_ymm
+ ;AVX: vcmpneqps (%rdi), %ymm0, %ymm0
+ ;AVX-NEXT: retq
+
+ %1 = load <8 x float>* %a0
+ %2 = fcmp une <8 x float> %1, %a1
+ %3 = sext <8 x i1> %2 to <8 x i32>
+ ret <8 x i32> %3
+}
+
+define <8 x i32> @commute_cmpps_ord_ymm(<8 x float>* %a0, <8 x float> %a1) #0 {
+ ;AVX-LABEL: commute_cmpps_ord_ymm
+ ;AVX: vcmpordps (%rdi), %ymm0, %ymm0
+ ;AVX-NEXT: retq
+
+ %1 = load <8 x float>* %a0
+ %2 = fcmp ord <8 x float> %1, %a1
+ %3 = sext <8 x i1> %2 to <8 x i32>
+ ret <8 x i32> %3
+}
+
+define <8 x i32> @commute_cmpps_uno_ymm(<8 x float>* %a0, <8 x float> %a1) #0 {
+ ;AVX-LABEL: commute_cmpps_uno_ymm
+ ;AVX: vcmpunordps (%rdi), %ymm0, %ymm0
+ ;AVX-NEXT: retq
+
+ %1 = load <8 x float>* %a0
+ %2 = fcmp uno <8 x float> %1, %a1
+ %3 = sext <8 x i1> %2 to <8 x i32>
+ ret <8 x i32> %3
+}
+
+define <8 x i32> @commute_cmpps_lt_ymm(<8 x float>* %a0, <8 x float> %a1) #0 {
+ ;AVX-LABEL: commute_cmpps_lt_ymm
+ ;AVX: vmovaps (%rdi), %ymm1
+ ;AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
+ ;AVX-NEXT: retq
+
+ %1 = load <8 x float>* %a0
+ %2 = fcmp olt <8 x float> %1, %a1
+ %3 = sext <8 x i1> %2 to <8 x i32>
+ ret <8 x i32> %3
+}
+
+define <8 x i32> @commute_cmpps_le_ymm(<8 x float>* %a0, <8 x float> %a1) #0 {
+ ;AVX-LABEL: commute_cmpps_le_ymm
+ ;AVX: vmovaps (%rdi), %ymm1
+ ;AVX-NEXT: vcmpleps %ymm0, %ymm1, %ymm0
+ ;AVX-NEXT: retq
+
+ %1 = load <8 x float>* %a0
+ %2 = fcmp ole <8 x float> %1, %a1
+ %3 = sext <8 x i1> %2 to <8 x i32>
+ ret <8 x i32> %3
+}
+
+;
+; Double Comparisons
+; Only equal/not-equal/ordered/unordered can be safely commuted
+;
+
+define <2 x i64> @commute_cmppd_eq(<2 x double>* %a0, <2 x double> %a1) #0 {
+ ;SSE-LABEL: commute_cmppd_eq
+ ;SSE: cmpeqpd (%rdi), %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_cmppd_eq
+ ;AVX: vcmpeqpd (%rdi), %xmm0, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <2 x double>* %a0
+ %2 = fcmp oeq <2 x double> %1, %a1
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @commute_cmppd_ne(<2 x double>* %a0, <2 x double> %a1) #0 {
+ ;SSE-LABEL: commute_cmppd_ne
+ ;SSE: cmpneqpd (%rdi), %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_cmppd_ne
+ ;AVX: vcmpneqpd (%rdi), %xmm0, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <2 x double>* %a0
+ %2 = fcmp une <2 x double> %1, %a1
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @commute_cmppd_ord(<2 x double>* %a0, <2 x double> %a1) #0 {
+ ;SSE-LABEL: commute_cmppd_ord
+ ;SSE: cmpordpd (%rdi), %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_cmppd_ord
+ ;AVX: vcmpordpd (%rdi), %xmm0, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <2 x double>* %a0
+ %2 = fcmp ord <2 x double> %1, %a1
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @commute_cmppd_uno(<2 x double>* %a0, <2 x double> %a1) #0 {
+ ;SSE-LABEL: commute_cmppd_uno
+ ;SSE: cmpunordpd (%rdi), %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_cmppd_uno
+ ;AVX: vcmpunordpd (%rdi), %xmm0, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <2 x double>* %a0
+ %2 = fcmp uno <2 x double> %1, %a1
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @commute_cmppd_lt(<2 x double>* %a0, <2 x double> %a1) #0 {
+ ;SSE-LABEL: commute_cmppd_lt
+ ;SSE: movapd (%rdi), %xmm1
+ ;SSE-NEXT: cmpltpd %xmm0, %xmm1
+ ;SSE-NEXT: movapd %xmm1, %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_cmppd_lt
+ ;AVX: vmovapd (%rdi), %xmm1
+ ;AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <2 x double>* %a0
+ %2 = fcmp olt <2 x double> %1, %a1
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @commute_cmppd_le(<2 x double>* %a0, <2 x double> %a1) #0 {
+ ;SSE-LABEL: commute_cmppd_le
+ ;SSE: movapd (%rdi), %xmm1
+ ;SSE-NEXT: cmplepd %xmm0, %xmm1
+ ;SSE-NEXT: movapd %xmm1, %xmm0
+ ;SSE-NEXT: retq
+
+ ;AVX-LABEL: commute_cmppd_le
+ ;AVX: vmovapd (%rdi), %xmm1
+ ;AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm0
+ ;AVX-NEXT: retq
+
+ %1 = load <2 x double>* %a0
+ %2 = fcmp ole <2 x double> %1, %a1
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <4 x i64> @commute_cmppd_eq_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 {
+ ;AVX-LABEL: commute_cmppd_eq
+ ;AVX: vcmpeqpd (%rdi), %ymm0, %ymm0
+ ;AVX-NEXT: retq
+
+ %1 = load <4 x double>* %a0
+ %2 = fcmp oeq <4 x double> %1, %a1
+ %3 = sext <4 x i1> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @commute_cmppd_ne_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 {
+ ;AVX-LABEL: commute_cmppd_ne
+ ;AVX: vcmpneqpd (%rdi), %ymm0, %ymm0
+ ;AVX-NEXT: retq
+
+ %1 = load <4 x double>* %a0
+ %2 = fcmp une <4 x double> %1, %a1
+ %3 = sext <4 x i1> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @commute_cmppd_ord_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 {
+ ;AVX-LABEL: commute_cmppd_ord
+ ;AVX: vcmpordpd (%rdi), %ymm0, %ymm0
+ ;AVX-NEXT: retq
+
+ %1 = load <4 x double>* %a0
+ %2 = fcmp ord <4 x double> %1, %a1
+ %3 = sext <4 x i1> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @commute_cmppd_uno_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 {
+ ;AVX-LABEL: commute_cmppd_uno
+ ;AVX: vcmpunordpd (%rdi), %ymm0, %ymm0
+ ;AVX-NEXT: retq
+
+ %1 = load <4 x double>* %a0
+ %2 = fcmp uno <4 x double> %1, %a1
+ %3 = sext <4 x i1> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @commute_cmppd_lt_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 {
+ ;AVX-LABEL: commute_cmppd_lt
+ ;AVX: vmovapd (%rdi), %ymm1
+ ;AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
+ ;AVX-NEXT: retq
+
+ %1 = load <4 x double>* %a0
+ %2 = fcmp olt <4 x double> %1, %a1
+ %3 = sext <4 x i1> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @commute_cmppd_le_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 {
+ ;AVX-LABEL: commute_cmppd_le
+ ;AVX: vmovapd (%rdi), %ymm1
+ ;AVX-NEXT: vcmplepd %ymm0, %ymm1, %ymm0
+ ;AVX-NEXT: retq
+
+ %1 = load <4 x double>* %a0
+ %2 = fcmp ole <4 x double> %1, %a1
+ %3 = sext <4 x i1> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
diff --git a/test/CodeGen/X86/commute-xop.ll b/test/CodeGen/X86/commute-xop.ll
new file mode 100644
index 0000000..a3e14fe
--- /dev/null
+++ b/test/CodeGen/X86/commute-xop.ll
@@ -0,0 +1,184 @@
+; RUN: llc -O3 -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx,+xop < %s | FileCheck %s
+
+define <16 x i8> @commute_fold_vpcomb(<16 x i8>* %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: commute_fold_vpcomb
+ ;CHECK: vpcomgtb (%rdi), %xmm0, %xmm0
+ %1 = load <16 x i8>* %a0
+ %2 = call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %1, <16 x i8> %a1, i8 0) ; vpcomltb
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8>, <16 x i8>, i8) nounwind readnone
+
+define <4 x i32> @commute_fold_vpcomd(<4 x i32>* %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: commute_fold_vpcomd
+ ;CHECK: vpcomged (%rdi), %xmm0, %xmm0
+ %1 = load <4 x i32>* %a0
+ %2 = call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %1, <4 x i32> %a1, i8 1) ; vpcomled
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32>, <4 x i32>, i8) nounwind readnone
+
+define <2 x i64> @commute_fold_vpcomq(<2 x i64>* %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: commute_fold_vpcomq
+ ;CHECK: vpcomltq (%rdi), %xmm0, %xmm0
+ %1 = load <2 x i64>* %a0
+ %2 = call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %1, <2 x i64> %a1, i8 2) ; vpcomgtq
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64>, <2 x i64>, i8) nounwind readnone
+
+define <16 x i8> @commute_fold_vpcomub(<16 x i8>* %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: commute_fold_vpcomub
+ ;CHECK: vpcomleub (%rdi), %xmm0, %xmm0
+ %1 = load <16 x i8>* %a0
+ %2 = call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %1, <16 x i8> %a1, i8 3) ; vpcomgeub
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8>, <16 x i8>, i8) nounwind readnone
+
+define <4 x i32> @commute_fold_vpcomud(<4 x i32>* %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: commute_fold_vpcomud
+ ;CHECK: vpcomequd (%rdi), %xmm0, %xmm0
+ %1 = load <4 x i32>* %a0
+ %2 = call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %1, <4 x i32> %a1, i8 4) ; vpcomequd
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32>, <4 x i32>, i8) nounwind readnone
+
+define <2 x i64> @commute_fold_vpcomuq(<2 x i64>* %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: commute_fold_vpcomuq
+ ;CHECK: vpcomnequq (%rdi), %xmm0, %xmm0
+ %1 = load <2 x i64>* %a0
+ %2 = call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %1, <2 x i64> %a1, i8 5) ; vpcomnequq
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64>, <2 x i64>, i8) nounwind readnone
+
+define <8 x i16> @commute_fold_vpcomuw(<8 x i16>* %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: commute_fold_vpcomuw
+ ;CHECK: vpcomfalseuw (%rdi), %xmm0, %xmm0
+ %1 = load <8 x i16>* %a0
+ %2 = call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %1, <8 x i16> %a1, i8 6) ; vpcomfalseuw
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16>, <8 x i16>, i8) nounwind readnone
+
+define <8 x i16> @commute_fold_vpcomw(<8 x i16>* %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: commute_fold_vpcomw
+ ;CHECK: vpcomtruew (%rdi), %xmm0, %xmm0
+ %1 = load <8 x i16>* %a0
+ %2 = call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %1, <8 x i16> %a1, i8 7) ; vpcomtruew
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16>, <8 x i16>, i8) nounwind readnone
+
+define <4 x i32> @commute_fold_vpmacsdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32> %a2) {
+ ;CHECK-LABEL: commute_fold_vpmacsdd
+ ;CHECK: vpmacsdd %xmm1, (%rdi), %xmm0, %xmm0
+ %1 = load <4 x i32>* %a0
+ %2 = call <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32> %1, <4 x i32> %a1, <4 x i32> %a2)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x i64> @commute_fold_vpmacsdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
+ ;CHECK-LABEL: commute_fold_vpmacsdqh
+ ;CHECK: vpmacsdqh %xmm1, (%rdi), %xmm0, %xmm0
+ %1 = load <4 x i32>* %a0
+ %2 = call <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @commute_fold_vpmacsdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
+ ;CHECK-LABEL: commute_fold_vpmacsdql
+ ;CHECK: vpmacsdql %xmm1, (%rdi), %xmm0, %xmm0
+ %1 = load <4 x i32>* %a0
+ %2 = call <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
+
+define <4 x i32> @commute_fold_vpmacssdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32> %a2) {
+ ;CHECK-LABEL: commute_fold_vpmacssdd
+ ;CHECK: vpmacssdd %xmm1, (%rdi), %xmm0, %xmm0
+ %1 = load <4 x i32>* %a0
+ %2 = call <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32> %1, <4 x i32> %a1, <4 x i32> %a2)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x i64> @commute_fold_vpmacssdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
+ ;CHECK-LABEL: commute_fold_vpmacssdqh
+ ;CHECK: vpmacssdqh %xmm1, (%rdi), %xmm0, %xmm0
+ %1 = load <4 x i32>* %a0
+ %2 = call <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @commute_fold_vpmacssdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
+ ;CHECK-LABEL: commute_fold_vpmacssdql
+ ;CHECK: vpmacssdql %xmm1, (%rdi), %xmm0, %xmm0
+ %1 = load <4 x i32>* %a0
+ %2 = call <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
+
+define <4 x i32> @commute_fold_vpmacsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
+ ;CHECK-LABEL: commute_fold_vpmacsswd
+ ;CHECK: vpmacsswd %xmm1, (%rdi), %xmm0, %xmm0
+ %1 = load <8 x i16>* %a0
+ %2 = call <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @commute_fold_vpmacssww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16> %a2) {
+ ;CHECK-LABEL: commute_fold_vpmacssww
+ ;CHECK: vpmacssww %xmm1, (%rdi), %xmm0, %xmm0
+ %1 = load <8 x i16>* %a0
+ %2 = call <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16> %1, <8 x i16> %a1, <8 x i16> %a2)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @commute_fold_vpmacswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
+ ;CHECK-LABEL: commute_fold_vpmacswd
+ ;CHECK: vpmacswd %xmm1, (%rdi), %xmm0, %xmm0
+ %1 = load <8 x i16>* %a0
+ %2 = call <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @commute_fold_vpmacsww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16> %a2) {
+ ;CHECK-LABEL: commute_fold_vpmacsww
+ ;CHECK: vpmacsww %xmm1, (%rdi), %xmm0, %xmm0
+ %1 = load <8 x i16>* %a0
+ %2 = call <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16> %1, <8 x i16> %a1, <8 x i16> %a2)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @commute_fold_vpmadcsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
+ ;CHECK-LABEL: commute_fold_vpmadcsswd
+ ;CHECK: vpmadcsswd %xmm1, (%rdi), %xmm0, %xmm0
+ %1 = load <8 x i16>* %a0
+ %2 = call <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
+
+define <4 x i32> @commute_fold_vpmadcswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
+ ;CHECK-LABEL: commute_fold_vpmadcswd
+ ;CHECK: vpmadcswd %xmm1, (%rdi), %xmm0, %xmm0
+ %1 = load <8 x i16>* %a0
+ %2 = call <4 x i32> @llvm.x86.xop.vpmadcswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpmadcswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
+
+
+
diff --git a/test/CodeGen/X86/compact-unwind.ll b/test/CodeGen/X86/compact-unwind.ll
index 9d3a125..d3b89a5 100644
--- a/test/CodeGen/X86/compact-unwind.ll
+++ b/test/CodeGen/X86/compact-unwind.ll
@@ -1,12 +1,20 @@
; RUN: llc < %s -disable-fp-elim -mtriple x86_64-apple-darwin11 -mcpu corei7 | FileCheck -check-prefix=ASM %s
; RUN: llc < %s -disable-fp-elim -mtriple x86_64-apple-darwin11 -mcpu corei7 -filetype=obj -o - \
-; RUN: | llvm-objdump -triple x86_64-apple-darwin11 -s - \
+; RUN: | llvm-objdump -triple x86_64-apple-darwin11 -unwind-info - \
; RUN: | FileCheck -check-prefix=CU %s
; RUN: llc < %s -disable-fp-elim -mtriple x86_64-apple-darwin11 -mcpu corei7 \
; RUN: | llvm-mc -triple x86_64-apple-darwin11 -filetype=obj -o - \
-; RUN: | llvm-objdump -triple x86_64-apple-darwin11 -s - \
+; RUN: | llvm-objdump -triple x86_64-apple-darwin11 -unwind-info - \
; RUN: | FileCheck -check-prefix=FROM-ASM %s
+; RUN: llc < %s -mtriple x86_64-apple-macosx10.8.0 -mcpu corei7 -filetype=obj -o - \
+; RUN: | llvm-objdump -triple x86_64-apple-macosx10.8.0 -unwind-info - \
+; RUN: | FileCheck -check-prefix=NOFP-CU %s
+; RUN: llc < %s -mtriple x86_64-apple-darwin11 -mcpu corei7 \
+; RUN: | llvm-mc -triple x86_64-apple-darwin11 -filetype=obj -o - \
+; RUN: | llvm-objdump -triple x86_64-apple-darwin11 -unwind-info - \
+; RUN: | FileCheck -check-prefix=NOFP-FROM-ASM %s
+
%ty = type { i8* }
@gv = external global i32
@@ -17,15 +25,19 @@
; Even though we can't encode %rax into the compact unwind, We still want to be
; able to generate a compact unwind encoding in this particular case.
-; CU: Contents of section __compact_unwind:
-; CU-NEXT: 0020 00000000 00000000 1e000000 01000101
-; CU-NEXT: 0030 00000000 00000000 00000000 00000000
+; CU: Contents of __compact_unwind section:
+; CU-NEXT: Entry at offset 0x0:
+; CU-NEXT: start: 0x0 _test0
+; CU-NEXT: length: 0x1e
+; CU-NEXT: compact encoding: 0x01010001
-; FROM-ASM: Contents of section __compact_unwind:
-; FROM-ASM-NEXT: 0020 00000000 00000000 1e000000 01000101
-; FROM-ASM-NEXT: 0030 00000000 00000000 00000000 00000000
+; FROM-ASM: Contents of __compact_unwind section:
+; FROM-ASM-NEXT: Entry at offset 0x0:
+; FROM-ASM-NEXT: start: 0x0 _test0
+; FROM-ASM-NEXT: length: 0x1e
+; FROM-ASM-NEXT: compact encoding: 0x01010001
-define i8* @foo(i64 %size) {
+define i8* @test0(i64 %size) {
%addr = alloca i64, align 8
%tmp20 = load i32* @gv, align 4
%tmp21 = call i32 @bar()
@@ -39,3 +51,61 @@ define i8* @foo(i64 %size) {
}
declare i32 @bar()
+
+%"struct.dyld::MappedRanges" = type { [400 x %struct.anon], %"struct.dyld::MappedRanges"* }
+%struct.anon = type { %class.ImageLoader*, i64, i64 }
+%class.ImageLoader = type { i32 (...)**, i8*, i8*, i32, i64, i64, i32, i32, %"struct.ImageLoader::recursive_lock"*, i16, i16, [4 x i8] }
+%"struct.ImageLoader::recursive_lock" = type { i32, i32 }
+
+@G1 = external hidden global %"struct.dyld::MappedRanges", align 8
+
+declare void @OSMemoryBarrier() optsize
+
+; Test the code below uses UNWIND_X86_64_MODE_STACK_IMMD compact unwind
+; encoding.
+
+; NOFP-CU: Entry at offset 0x20:
+; NOFP-CU-NEXT: start: 0x1d _test1
+; NOFP-CU-NEXT: length: 0x42
+; NOFP-CU-NEXT: compact encoding: 0x02040c0a
+
+; NOFP-FROM-ASM: Entry at offset 0x20:
+; NOFP-FROM-ASM-NEXT: start: 0x1d _test1
+; NOFP-FROM-ASM-NEXT: length: 0x42
+; NOFP-FROM-ASM-NEXT: compact encoding: 0x02040c0a
+
+define void @test1(%class.ImageLoader* %image) optsize ssp uwtable {
+entry:
+ br label %for.cond1.preheader
+
+for.cond1.preheader: ; preds = %for.inc10, %entry
+ %p.019 = phi %"struct.dyld::MappedRanges"* [ @G1, %entry ], [ %1, %for.inc10 ]
+ br label %for.body3
+
+for.body3: ; preds = %for.inc, %for.cond1.preheader
+ %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.inc ]
+ %image4 = getelementptr inbounds %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 0, i64 %indvars.iv, i32 0
+ %0 = load %class.ImageLoader** %image4, align 8
+ %cmp5 = icmp eq %class.ImageLoader* %0, %image
+ br i1 %cmp5, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body3
+ tail call void @OSMemoryBarrier() optsize
+ store %class.ImageLoader* null, %class.ImageLoader** %image4, align 8
+ br label %for.inc
+
+for.inc: ; preds = %if.then, %for.body3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 400
+ br i1 %exitcond, label %for.inc10, label %for.body3
+
+for.inc10: ; preds = %for.inc
+ %next = getelementptr inbounds %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 1
+ %1 = load %"struct.dyld::MappedRanges"** %next, align 8
+ %cmp = icmp eq %"struct.dyld::MappedRanges"* %1, null
+ br i1 %cmp, label %for.end11, label %for.cond1.preheader
+
+for.end11: ; preds = %for.inc10
+ ret void
+}
diff --git a/test/CodeGen/X86/constant-combines.ll b/test/CodeGen/X86/constant-combines.ll
new file mode 100644
index 0000000..d2a6ef4
--- /dev/null
+++ b/test/CodeGen/X86/constant-combines.ll
@@ -0,0 +1,35 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+define void @PR22524({ float, float }* %arg) {
+; Check that we can materialize the zero constants we store in two places here,
+; and at least form a legal store of the floating point value at the end.
+; The DAG combiner at one point contained bugs that given enough permutations
+; would incorrectly form an illegal operation for the last of these stores when
+; it folded it to a zero too late to legalize the zero store operation. If this
+; ever starts forming a zero store instead of movss, the test case has stopped
+; being useful.
+;
+; CHECK-LABEL: PR22524:
+entry:
+ %0 = getelementptr inbounds { float, float }* %arg, i32 0, i32 1
+ store float 0.000000e+00, float* %0, align 4
+; CHECK: movl $0, 4(%rdi)
+
+ %1 = getelementptr inbounds { float, float }* %arg, i64 0, i32 0
+ %2 = bitcast float* %1 to i64*
+ %3 = load i64* %2, align 8
+ %4 = trunc i64 %3 to i32
+ %5 = lshr i64 %3, 32
+ %6 = trunc i64 %5 to i32
+ %7 = bitcast i32 %6 to float
+ %8 = fmul float %7, 0.000000e+00
+ %9 = bitcast float* %1 to i32*
+ store i32 %6, i32* %9, align 4
+; CHECK: movl $0, (%rdi)
+ store float %8, float* %0, align 4
+; CHECK: movss %{{.*}}, 4(%rdi)
+ ret void
+}
diff --git a/test/CodeGen/X86/constant-hoisting-optnone.ll b/test/CodeGen/X86/constant-hoisting-optnone.ll
new file mode 100644
index 0000000..f61fe3f
--- /dev/null
+++ b/test/CodeGen/X86/constant-hoisting-optnone.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=generic | FileCheck %s
+;
+; Verify that pass 'Constant Hoisting' is not run on optnone functions.
+; Without optnone, Pass 'Constant Hoisting' would firstly hoist
+; constant 0xBEEBEEBEC, and then rebase the other constant
+; (i.e. constant 0xBEEBEEBF4) with respect to the previous one.
+; With optnone, we check that constants are not coalesced.
+
+define i64 @constant_hoisting_optnone() #0 {
+; CHECK-LABEL: @constant_hoisting_optnone
+; CHECK-DAG: movabsq {{.*#+}} imm = 0xBEEBEEBF4
+; CHECK-DAG: movabsq {{.*#+}} imm = 0xBEEBEEBEC
+; CHECK: ret
+entry:
+ %0 = load i64* inttoptr (i64 51250129900 to i64*)
+ %1 = load i64* inttoptr (i64 51250129908 to i64*)
+ %2 = add i64 %0, %1
+ ret i64 %2
+}
+
+attributes #0 = { optnone noinline }
diff --git a/test/CodeGen/X86/copysign-constant-magnitude.ll b/test/CodeGen/X86/copysign-constant-magnitude.ll
new file mode 100644
index 0000000..537d629
--- /dev/null
+++ b/test/CodeGen/X86/copysign-constant-magnitude.ll
@@ -0,0 +1,105 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+define void @test_copysign_const_magnitude_d(double %X) {
+; CHECK: [[SIGNMASK:L.+]]:
+; CHECK-NEXT: .quad -9223372036854775808 ## double -0.000000e+00
+; CHECK-NEXT: .quad 0 ## double 0.000000e+00
+; CHECK: [[ZERO:L.+]]:
+; CHECK-NEXT: .space 16
+; CHECK: [[ONE:L.+]]:
+; CHECK-NEXT: .quad 4607182418800017408 ## double 1.000000e+00
+; CHECK-NEXT: .quad 0 ## double 0.000000e+00
+; CHECK-LABEL: test_copysign_const_magnitude_d:
+
+; CHECK: id
+ %iX = call double @id_d(double %X)
+
+; CHECK-NEXT: andpd [[SIGNMASK]](%rip), %xmm0
+ %d0 = call double @copysign(double 0.000000e+00, double %iX)
+
+; CHECK-NEXT: id
+ %id0 = call double @id_d(double %d0)
+
+; CHECK-NEXT: andpd [[SIGNMASK]](%rip), %xmm0
+; CHECK-NEXT: orpd [[ZERO]](%rip), %xmm0
+ %dn0 = call double @copysign(double -0.000000e+00, double %id0)
+
+; CHECK-NEXT: id
+ %idn0 = call double @id_d(double %dn0)
+
+; CHECK-NEXT: andpd [[SIGNMASK]](%rip), %xmm0
+; CHECK-NEXT: orpd [[ONE]](%rip), %xmm0
+ %d1 = call double @copysign(double 1.000000e+00, double %idn0)
+
+; CHECK-NEXT: id
+ %id1 = call double @id_d(double %d1)
+
+; CHECK-NEXT: andpd [[SIGNMASK]](%rip), %xmm0
+; CHECK-NEXT: orpd [[ONE]](%rip), %xmm0
+ %dn1 = call double @copysign(double -1.000000e+00, double %id1)
+
+; CHECK-NEXT: id
+ %idn1 = call double @id_d(double %dn1)
+
+; CHECK: retq
+ ret void
+}
+
+define void @test_copysign_const_magnitude_f(float %X) {
+; CHECK: [[SIGNMASK:L.+]]:
+; CHECK-NEXT: .long 2147483648 ## float -0.000000e+00
+; CHECK-NEXT: .long 0 ## float 0.000000e+00
+; CHECK-NEXT: .long 0 ## float 0.000000e+00
+; CHECK-NEXT: .long 0 ## float 0.000000e+00
+; CHECK: [[ZERO:L.+]]:
+; CHECK-NEXT: .space 16
+; CHECK: [[ONE:L.+]]:
+; CHECK-NEXT: .long 1065353216 ## float 1.000000e+00
+; CHECK-NEXT: .long 0 ## float 0.000000e+00
+; CHECK-NEXT: .long 0 ## float 0.000000e+00
+; CHECK-NEXT: .long 0 ## float 0.000000e+00
+; CHECK-LABEL: test_copysign_const_magnitude_f:
+
+; CHECK: id
+ %iX = call float @id_f(float %X)
+
+; CHECK-NEXT: andps [[SIGNMASK]](%rip), %xmm0
+ %d0 = call float @copysignf(float 0.000000e+00, float %iX)
+
+; CHECK-NEXT: id
+ %id0 = call float @id_f(float %d0)
+
+; CHECK-NEXT: andps [[SIGNMASK]](%rip), %xmm0
+; CHECK-NEXT: orps [[ZERO]](%rip), %xmm0
+ %dn0 = call float @copysignf(float -0.000000e+00, float %id0)
+
+; CHECK-NEXT: id
+ %idn0 = call float @id_f(float %dn0)
+
+; CHECK-NEXT: andps [[SIGNMASK]](%rip), %xmm0
+; CHECK-NEXT: orps [[ONE]](%rip), %xmm0
+ %d1 = call float @copysignf(float 1.000000e+00, float %idn0)
+
+; CHECK-NEXT: id
+ %id1 = call float @id_f(float %d1)
+
+; CHECK-NEXT: andps [[SIGNMASK]](%rip), %xmm0
+; CHECK-NEXT: orps [[ONE]](%rip), %xmm0
+ %dn1 = call float @copysignf(float -1.000000e+00, float %id1)
+
+; CHECK-NEXT: id
+ %idn1 = call float @id_f(float %dn1)
+
+; CHECK: retq
+ ret void
+}
+
+declare double @copysign(double, double) nounwind readnone
+declare float @copysignf(float, float) nounwind readnone
+
+; Dummy identity functions, so we always have xmm0, and prevent optimizations.
+declare double @id_d(double)
+declare float @id_f(float)
diff --git a/test/CodeGen/X86/copysign-zero.ll b/test/CodeGen/X86/copysign-zero.ll
deleted file mode 100644
index 47522d8..0000000
--- a/test/CodeGen/X86/copysign-zero.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s | not grep orpd
-; RUN: llc < %s | grep andpd | count 1
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin8"
-
-define double @test(double %X) nounwind {
-entry:
- %tmp2 = tail call double @copysign( double 0.000000e+00, double %X ) nounwind readnone ; <double> [#uses=1]
- ret double %tmp2
-}
-
-declare double @copysign(double, double) nounwind readnone
-
diff --git a/test/CodeGen/X86/cppeh-catch-all.ll b/test/CodeGen/X86/cppeh-catch-all.ll
new file mode 100644
index 0000000..7a12b24
--- /dev/null
+++ b/test/CodeGen/X86/cppeh-catch-all.ll
@@ -0,0 +1,83 @@
+; RUN: opt -mtriple=x86_64-pc-windows-msvc -winehprepare -S -o - < %s | FileCheck %s
+
+; This test is based on the following code:
+;
+; void test()
+; {
+; try {
+; may_throw();
+; } catch (...) {
+; handle_exception();
+; }
+; }
+;
+; Parts of the IR have been hand-edited to simplify the test case.
+; The full IR will be restored when Windows C++ EH support is complete.
+
+; ModuleID = 'catch-all.cpp'
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc"
+
+; Function Attrs: uwtable
+define void @_Z4testv() #0 {
+entry:
+ %exn.slot = alloca i8*
+ %ehselector.slot = alloca i32
+ invoke void @_Z9may_throwv()
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont: ; preds = %entry
+ br label %try.cont
+
+lpad: ; preds = %entry
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*)
+ catch i8* null
+ %1 = extractvalue { i8*, i32 } %0, 0
+ store i8* %1, i8** %exn.slot
+ %2 = extractvalue { i8*, i32 } %0, 1
+ store i32 %2, i32* %ehselector.slot
+ br label %catch
+
+catch: ; preds = %lpad
+ %exn = load i8** %exn.slot
+ %3 = call i8* @llvm.eh.begincatch(i8* %exn) #3
+ call void @_Z16handle_exceptionv()
+ br label %invoke.cont2
+
+invoke.cont2: ; preds = %catch
+ call void @llvm.eh.endcatch()
+ br label %try.cont
+
+try.cont: ; preds = %invoke.cont2, %invoke.cont
+ ret void
+}
+
+; CHECK: define i8* @_Z4testv.catch(i8*, i8*) {
+; CHECK: catch.entry:
+; CHECK: %eh.alloc = call i8* @llvm.framerecover(i8* bitcast (void ()* @_Z4testv to i8*), i8* %1)
+; CHECK: %eh.data = bitcast i8* %eh.alloc to %struct._Z4testv.ehdata*
+; CHECK: %eh.obj.ptr = getelementptr inbounds %struct._Z4testv.ehdata* %eh.data, i32 0, i32 1
+; CHECK: %eh.obj = load i8** %eh.obj.ptr
+; CHECK: call void @_Z16handle_exceptionv()
+; CHECK: ret i8* blockaddress(@_Z4testv, %try.cont)
+; CHECK: }
+
+declare void @_Z9may_throwv() #1
+
+declare i32 @__CxxFrameHandler3(...)
+
+declare i8* @llvm.eh.begincatch(i8*)
+
+declare void @_Z16handle_exceptionv() #1
+
+declare void @llvm.eh.endcatch()
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { noinline noreturn nounwind }
+attributes #3 = { nounwind }
+attributes #4 = { noreturn nounwind }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 3.7.0 (trunk 226027)"}
diff --git a/test/CodeGen/X86/cppeh-catch-scalar.ll b/test/CodeGen/X86/cppeh-catch-scalar.ll
new file mode 100644
index 0000000..fd5df6c
--- /dev/null
+++ b/test/CodeGen/X86/cppeh-catch-scalar.ll
@@ -0,0 +1,123 @@
+; RUN: opt -mtriple=x86_64-pc-windows-msvc -winehprepare -S -o - < %s | FileCheck %s
+
+; This test is based on the following code:
+;
+; void test()
+; {
+; try {
+; may_throw();
+; } catch (int i) {
+; handle_int(i);
+; }
+; }
+;
+; Parts of the IR have been hand-edited to simplify the test case.
+; The full IR will be restored when Windows C++ EH support is complete.
+
+;ModuleID = 'cppeh-catch-scalar.cpp'
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc"
+
+; This is the structure that will get created for the frame allocation.
+; CHECK: %struct._Z4testv.ehdata = type { i32, i8*, i32 }
+
+@_ZTIi = external constant i8*
+
+; The function entry will be rewritten like this.
+; CHECK: define void @_Z4testv() #0 {
+; CHECK: entry:
+; CHECK: %frame.alloc = call i8* @llvm.frameallocate(i32 24)
+; CHECK: %eh.data = bitcast i8* %frame.alloc to %struct._Z4testv.ehdata*
+; CHECK: %exn.slot = alloca i8*
+; CHECK: %ehselector.slot = alloca i32
+; CHECK-NOT: %i = alloca i32, align 4
+; CHECK: %i = getelementptr inbounds %struct._Z4testv.ehdata* %eh.data, i32 0, i32 2
+
+; Function Attrs: uwtable
+define void @_Z4testv() #0 {
+entry:
+ %exn.slot = alloca i8*
+ %ehselector.slot = alloca i32
+ %i = alloca i32, align 4
+ invoke void @_Z9may_throwv()
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont: ; preds = %entry
+ br label %try.cont
+
+lpad: ; preds = %entry
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*)
+ catch i8* bitcast (i8** @_ZTIi to i8*)
+ %1 = extractvalue { i8*, i32 } %0, 0
+ store i8* %1, i8** %exn.slot
+ %2 = extractvalue { i8*, i32 } %0, 1
+ store i32 %2, i32* %ehselector.slot
+ br label %catch.dispatch
+
+catch.dispatch: ; preds = %lpad
+ %sel = load i32* %ehselector.slot
+ %3 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #3
+ %matches = icmp eq i32 %sel, %3
+ br i1 %matches, label %catch, label %eh.resume
+
+catch: ; preds = %catch.dispatch
+ %exn11 = load i8** %exn.slot
+ %4 = call i8* @llvm.eh.begincatch(i8* %exn11) #3
+ %5 = bitcast i8* %4 to i32*
+ %6 = load i32* %5, align 4
+ store i32 %6, i32* %i, align 4
+ %7 = load i32* %i, align 4
+ call void @_Z10handle_inti(i32 %7)
+ br label %invoke.cont2
+
+invoke.cont2: ; preds = %catch
+ call void @llvm.eh.endcatch() #3
+ br label %try.cont
+
+try.cont: ; preds = %invoke.cont2, %invoke.cont
+ ret void
+
+eh.resume: ; preds = %catch.dispatch
+ %exn3 = load i8** %exn.slot
+ %sel4 = load i32* %ehselector.slot
+ %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn3, 0
+ %lpad.val5 = insertvalue { i8*, i32 } %lpad.val, i32 %sel4, 1
+ resume { i8*, i32 } %lpad.val5
+}
+
+; CHECK: define i8* @_Z4testv.catch(i8*, i8*) {
+; CHECK: catch.entry:
+; CHECK: %eh.alloc = call i8* @llvm.framerecover(i8* bitcast (void ()* @_Z4testv to i8*), i8* %1)
+; CHECK: %eh.data = bitcast i8* %eh.alloc to %struct._Z4testv.ehdata*
+; CHECK: %eh.obj.ptr = getelementptr inbounds %struct._Z4testv.ehdata* %eh.data, i32 0, i32 1
+; CHECK: %eh.obj = load i8** %eh.obj.ptr
+; CHECK: %i = getelementptr inbounds %struct._Z4testv.ehdata* %eh.data, i32 0, i32 2
+; CHECK: %2 = bitcast i8* %eh.obj to i32*
+; CHECK: %3 = load i32* %2, align 4
+; CHECK: store i32 %3, i32* %i, align 4
+; CHECK: %4 = load i32* %i, align 4
+; CHECK: call void @_Z10handle_inti(i32 %4)
+; CHECK: ret i8* blockaddress(@_Z4testv, %try.cont)
+; CHECK: }
+
+declare void @_Z9may_throwv() #1
+
+declare i32 @__CxxFrameHandler3(...)
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.eh.typeid.for(i8*) #2
+
+declare i8* @llvm.eh.begincatch(i8*)
+
+declare void @llvm.eh.endcatch()
+
+declare void @_Z10handle_inti(i32) #1
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone }
+attributes #3 = { nounwind }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 3.7.0 (trunk 227474) (llvm/trunk 227508)"}
diff --git a/test/CodeGen/X86/cppeh-frame-vars.ll b/test/CodeGen/X86/cppeh-frame-vars.ll
new file mode 100644
index 0000000..667f133
--- /dev/null
+++ b/test/CodeGen/X86/cppeh-frame-vars.ll
@@ -0,0 +1,261 @@
+; RUN: opt -mtriple=x86_64-pc-windows-msvc -winehprepare -S -o - < %s | FileCheck %s
+
+; This test is based on the following code:
+;
+; struct SomeData {
+; int a;
+; int b;
+; };
+;
+; void may_throw();
+; void does_not_throw(int i);
+; void dump(int *, int, SomeData&);
+;
+; void test() {
+; int NumExceptions = 0;
+; int ExceptionVal[10];
+; SomeData Data = { 0, 0 };
+;
+; for (int i = 0; i < 10; ++i) {
+; try {
+; may_throw();
+; Data.a += i;
+; }
+; catch (int e) {
+; ExceptionVal[NumExceptions] = e;
+; ++NumExceptions;
+; if (e == i)
+; Data.b += e;
+; else
+; Data.a += e;
+; }
+; does_not_throw(NumExceptions);
+; }
+; dump(ExceptionVal, NumExceptions, Data);
+; }
+
+; ModuleID = 'cppeh-frame-vars.cpp'
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc"
+
+%rtti.TypeDescriptor2 = type { i8**, i8*, [3 x i8] }
+%struct.SomeData = type { i32, i32 }
+
+; This structure should be declared for the frame allocation block.
+; CHECK: %"struct.\01?test@@YAXXZ.ehdata" = type { i32, i8*, i32, i32, [10 x i32], i32, %struct.SomeData }
+
+$"\01??_R0H@8" = comdat any
+
+@"\01??_7type_info@@6B@" = external constant i8*
+@"\01??_R0H@8" = linkonce_odr global %rtti.TypeDescriptor2 { i8** @"\01??_7type_info@@6B@", i8* null, [3 x i8] c".H\00" }, comdat
+
+; The function entry should be rewritten like this.
+; CHECK: define void @"\01?test@@YAXXZ"() #0 {
+; CHECK: entry:
+; CHECK: %frame.alloc = call i8* @llvm.frameallocate(i32 80)
+; CHECK: %eh.data = bitcast i8* %frame.alloc to %"struct.\01?test@@YAXXZ.ehdata"*
+; CHECK-NOT: %NumExceptions = alloca i32, align 4
+; CHECK: %NumExceptions = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 3
+; CHECK-NOT: %ExceptionVal = alloca [10 x i32], align 16
+; CHECK: %ExceptionVal = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 4
+; CHECK-NOT: %Data = alloca %struct.SomeData, align 4
+; CHECK: %Data = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 6
+; CHECK: %i = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 5
+; CHECK: %exn.slot = alloca i8*
+; CHECK: %ehselector.slot = alloca i32
+; CHECK-NOT: %e = alloca i32, align 4
+; CHECK: %e = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 2
+
+; Function Attrs: uwtable
+define void @"\01?test@@YAXXZ"() #0 {
+entry:
+ %NumExceptions = alloca i32, align 4
+ %ExceptionVal = alloca [10 x i32], align 16
+ %Data = alloca %struct.SomeData, align 4
+ %i = alloca i32, align 4
+ %exn.slot = alloca i8*
+ %ehselector.slot = alloca i32
+ %e = alloca i32, align 4
+ store i32 0, i32* %NumExceptions, align 4
+ %0 = bitcast %struct.SomeData* %Data to i8*
+ call void @llvm.memset(i8* %0, i8 0, i64 8, i32 4, i1 false)
+ store i32 0, i32* %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %1 = load i32* %i, align 4
+ %cmp = icmp slt i32 %1, 10
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ invoke void @"\01?may_throw@@YAXXZ"()
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont: ; preds = %for.body
+ %2 = load i32* %i, align 4
+ %a = getelementptr inbounds %struct.SomeData* %Data, i32 0, i32 0
+ %3 = load i32* %a, align 4
+ %add = add nsw i32 %3, %2
+ store i32 %add, i32* %a, align 4
+ br label %try.cont
+
+lpad: ; preds = %for.body
+ %4 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*)
+ catch i8* bitcast (%rtti.TypeDescriptor2* @"\01??_R0H@8" to i8*)
+ %5 = extractvalue { i8*, i32 } %4, 0
+ store i8* %5, i8** %exn.slot
+ %6 = extractvalue { i8*, i32 } %4, 1
+ store i32 %6, i32* %ehselector.slot
+ br label %catch.dispatch
+
+catch.dispatch: ; preds = %lpad
+ %sel = load i32* %ehselector.slot
+ %7 = call i32 @llvm.eh.typeid.for(i8* bitcast (%rtti.TypeDescriptor2* @"\01??_R0H@8" to i8*)) #1
+ %matches = icmp eq i32 %sel, %7
+ br i1 %matches, label %catch, label %eh.resume
+
+catch: ; preds = %catch.dispatch
+ %exn = load i8** %exn.slot
+ %8 = call i8* @llvm.eh.begincatch(i8* %exn) #1
+ %9 = bitcast i8* %8 to i32*
+ %10 = load i32* %9, align 4
+ store i32 %10, i32* %e, align 4
+ %11 = load i32* %e, align 4
+ %12 = load i32* %NumExceptions, align 4
+ %idxprom = sext i32 %12 to i64
+ %arrayidx = getelementptr inbounds [10 x i32]* %ExceptionVal, i32 0, i64 %idxprom
+ store i32 %11, i32* %arrayidx, align 4
+ %13 = load i32* %NumExceptions, align 4
+ %inc = add nsw i32 %13, 1
+ store i32 %inc, i32* %NumExceptions, align 4
+ %14 = load i32* %e, align 4
+ %15 = load i32* %i, align 4
+ %cmp1 = icmp eq i32 %14, %15
+ br i1 %cmp1, label %if.then, label %if.else
+
+if.then: ; preds = %catch
+ %16 = load i32* %e, align 4
+ %b = getelementptr inbounds %struct.SomeData* %Data, i32 0, i32 1
+ %17 = load i32* %b, align 4
+ %add2 = add nsw i32 %17, %16
+ store i32 %add2, i32* %b, align 4
+ br label %if.end
+
+if.else: ; preds = %catch
+ %18 = load i32* %e, align 4
+ %a3 = getelementptr inbounds %struct.SomeData* %Data, i32 0, i32 0
+ %19 = load i32* %a3, align 4
+ %add4 = add nsw i32 %19, %18
+ store i32 %add4, i32* %a3, align 4
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ call void @llvm.eh.endcatch() #1
+ br label %try.cont
+
+try.cont: ; preds = %if.end, %invoke.cont
+ %20 = load i32* %NumExceptions, align 4
+ call void @"\01?does_not_throw@@YAXH@Z"(i32 %20)
+ br label %for.inc
+
+for.inc: ; preds = %try.cont
+ %21 = load i32* %i, align 4
+ %inc5 = add nsw i32 %21, 1
+ store i32 %inc5, i32* %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ %22 = load i32* %NumExceptions, align 4
+ %arraydecay = getelementptr inbounds [10 x i32]* %ExceptionVal, i32 0, i32 0
+ call void @"\01?dump@@YAXPEAHHAEAUSomeData@@@Z"(i32* %arraydecay, i32 %22, %struct.SomeData* dereferenceable(8) %Data)
+ ret void
+
+eh.resume: ; preds = %catch.dispatch
+ %exn6 = load i8** %exn.slot
+ %sel7 = load i32* %ehselector.slot
+ %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn6, 0
+ %lpad.val8 = insertvalue { i8*, i32 } %lpad.val, i32 %sel7, 1
+ resume { i8*, i32 } %lpad.val8
+}
+
+; The following catch handler should be outlined.
+; CHECK: define i8* @"\01?test@@YAXXZ.catch"(i8*, i8*) {
+; CHECK: catch.entry:
+; CHECK: %eh.alloc = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1)
+; CHECK: %eh.data = bitcast i8* %eh.alloc to %"struct.\01?test@@YAXXZ.ehdata"*
+; CHECK: %eh.obj.ptr = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 1
+; CHECK: %eh.obj = load i8** %eh.obj.ptr
+; CHECK: %e = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 2
+; CHECK: %NumExceptions = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 3
+; CHECK: %ExceptionVal = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 4
+; CHECK: %i = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 5
+; CHECK: %Data = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 6
+; CHECK: %2 = bitcast i8* %eh.obj to i32*
+; CHECK: %3 = load i32* %2, align 4
+; CHECK: store i32 %3, i32* %e, align 4
+; CHECK: %4 = load i32* %e, align 4
+; CHECK: %5 = load i32* %NumExceptions, align 4
+; CHECK: %idxprom = sext i32 %5 to i64
+; CHECK: %arrayidx = getelementptr inbounds [10 x i32]* %ExceptionVal, i32 0, i64 %idxprom
+; CHECK: store i32 %4, i32* %arrayidx, align 4
+; CHECK: %6 = load i32* %NumExceptions, align 4
+; CHECK: %inc = add nsw i32 %6, 1
+; CHECK: store i32 %inc, i32* %NumExceptions, align 4
+; CHECK: %7 = load i32* %e, align 4
+; CHECK: %8 = load i32* %i, align 4
+; CHECK: %cmp1 = icmp eq i32 %7, %8
+; CHECK: br i1 %cmp1, label %if.then, label %if.else
+;
+; CHECK: if.then: ; preds = %catch.entry
+; CHECK: %9 = load i32* %e, align 4
+; CHECK: %b = getelementptr inbounds %struct.SomeData* %Data, i32 0, i32 1
+; CHECK: %10 = load i32* %b, align 4
+; CHECK: %add2 = add nsw i32 %10, %9
+; CHECK: store i32 %add2, i32* %b, align 4
+; CHECK: br label %if.end
+;
+; CHECK: if.else: ; preds = %catch.entry
+; CHECK: %11 = load i32* %e, align 4
+; CHECK: %a3 = getelementptr inbounds %struct.SomeData* %Data, i32 0, i32 0
+; CHECK: %12 = load i32* %a3, align 4
+; CHECK: %add4 = add nsw i32 %12, %11
+; CHECK: store i32 %add4, i32* %a3, align 4
+; CHECK: br label %if.end
+;
+; CHECK: if.end: ; preds = %if.else, %if.then
+; CHECK: ret i8* blockaddress(@"\01?test@@YAXXZ", %try.cont)
+; CHECK: }
+
+
+
+
+
+
+; Function Attrs: nounwind
+declare void @llvm.memset(i8* nocapture, i8, i64, i32, i1) #1
+
+declare void @"\01?may_throw@@YAXXZ"() #2
+
+declare i32 @__CxxFrameHandler3(...)
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.eh.typeid.for(i8*) #3
+
+declare i8* @llvm.eh.begincatch(i8*)
+
+declare void @llvm.eh.endcatch()
+
+declare void @"\01?does_not_throw@@YAXH@Z"(i32) #2
+
+declare void @"\01?dump@@YAXPEAHHAEAUSomeData@@@Z"(i32*, i32, %struct.SomeData* dereferenceable(8)) #2
+
+attributes #0 = { uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { nounwind readnone }
+
+!llvm.module.flags = !{!0}
+!llvm.ident = !{!1}
+
+!0 = !{i32 1, !"PIC Level", i32 2}
+!1 = !{!"clang version 3.7.0 (trunk 228868)"}
diff --git a/test/CodeGen/X86/cpus.ll b/test/CodeGen/X86/cpus.ll
new file mode 100644
index 0000000..ee1f7bb
--- /dev/null
+++ b/test/CodeGen/X86/cpus.ll
@@ -0,0 +1,35 @@
+; Test that the CPU names work.
+;
+; First ensure the error message matches what we expect.
+; CHECK-ERROR: not a recognized processor for this target
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=foobar 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+;
+; Now ensure the error message doesn't occur for valid CPUs.
+; CHECK-NO-ERROR-NOT: not a recognized processor for this target
+;
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=nocona 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=core2 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=penryn 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=nehalem 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=westmere 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=sandybridge 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=ivybridge 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=haswell 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=broadwell 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=bonnell 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=silvermont 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=k8 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=opteron 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=athlon64 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=athlon-fx 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=k8-sse3 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=opteron-sse3 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=athlon64-sse3 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=amdfam10 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=barcelona 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=bdver1 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=bdver2 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=bdver3 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=bdver4 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=btver1 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=btver2 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
diff --git a/test/CodeGen/X86/crash-O0.ll b/test/CodeGen/X86/crash-O0.ll
index 956d43b..df8eaaf 100644
--- a/test/CodeGen/X86/crash-O0.ll
+++ b/test/CodeGen/X86/crash-O0.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -relocation-model=pic -disable-fp-elim < %s
+; RUN: llc -O0 -relocation-model=pic -disable-fp-elim < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin10"
@@ -29,3 +29,23 @@ entry:
"41": ; preds = %"39"
unreachable
}
+
+; When using fast isel, sdiv is lowered into a sequence of CQO + DIV64.
+; CQO defines implicitly AX and DIV64 uses it implicitly too.
+; When an instruction gets between those two, RegAllocFast was reusing
+; AX for the vreg defined in between and the compiler crashed.
+;
+; An instruction gets between CQO and DIV64 because the load is folded
+; into the division but it requires a sign extension.
+; PR21700
+; CHECK-LABEL: addressModeWith32bitIndex:
+; CHECK: cqto
+; CHECK-NEXT: movslq
+; CHECK-NEXT: idivq
+; CHECK: retq
+define i64 @addressModeWith32bitIndex(i32 %V) {
+ %gep = getelementptr i64* null, i32 %V
+ %load = load i64* %gep
+ %sdiv = sdiv i64 0, %load
+ ret i64 %sdiv
+}
diff --git a/test/CodeGen/X86/crash.ll b/test/CodeGen/X86/crash.ll
index ee73377..6b3dd36 100644
--- a/test/CodeGen/X86/crash.ll
+++ b/test/CodeGen/X86/crash.ll
@@ -108,8 +108,8 @@ do.body92: ; preds = %if.then66
ret void
}
-!0 = metadata !{i32 633550}
-!1 = metadata !{i32 634261}
+!0 = !{i32 633550}
+!1 = !{i32 634261}
; Crash during XOR optimization.
diff --git a/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll b/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
index d0791dc..16d8f97 100644
--- a/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
+++ b/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll
@@ -52,48 +52,48 @@ define void @_Z3barii(i32 %param1, i32 %param2) #0 {
entry:
%var1 = alloca %struct.AAA3, align 1
%var2 = alloca %struct.AAA3, align 1
- tail call void @llvm.dbg.value(metadata !{i32 %param1}, i64 0, metadata !30, metadata !{metadata !"0x102"}), !dbg !47
- tail call void @llvm.dbg.value(metadata !{i32 %param2}, i64 0, metadata !31, metadata !{metadata !"0x102"}), !dbg !47
- tail call void @llvm.dbg.value(metadata !48, i64 0, metadata !32, metadata !{metadata !"0x102"}), !dbg !49
+ tail call void @llvm.dbg.value(metadata i32 %param1, i64 0, metadata !30, metadata !{!"0x102"}), !dbg !47
+ tail call void @llvm.dbg.value(metadata i32 %param2, i64 0, metadata !31, metadata !{!"0x102"}), !dbg !47
+ tail call void @llvm.dbg.value(metadata i8* null, i64 0, metadata !32, metadata !{!"0x102"}), !dbg !49
%tobool = icmp eq i32 %param2, 0, !dbg !50
br i1 %tobool, label %if.end, label %if.then, !dbg !50
if.then: ; preds = %entry
%call = tail call i8* @_Z5i2stri(i32 %param2), !dbg !52
- tail call void @llvm.dbg.value(metadata !{i8* %call}, i64 0, metadata !32, metadata !{metadata !"0x102"}), !dbg !49
+ tail call void @llvm.dbg.value(metadata i8* %call, i64 0, metadata !32, metadata !{!"0x102"}), !dbg !49
br label %if.end, !dbg !54
if.end: ; preds = %entry, %if.then
- tail call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !33, metadata !{metadata !"0x102"}), !dbg !55
- tail call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !56, metadata !{metadata !"0x102"}), !dbg !57
- tail call void @llvm.dbg.value(metadata !58, i64 0, metadata !59, metadata !{metadata !"0x102"}), !dbg !60
+ tail call void @llvm.dbg.value(metadata %struct.AAA3* %var1, i64 0, metadata !33, metadata !{!"0x102"}), !dbg !55
+ tail call void @llvm.dbg.value(metadata %struct.AAA3* %var1, i64 0, metadata !56, metadata !{!"0x102"}), !dbg !57
+ tail call void @llvm.dbg.value(metadata !58, i64 0, metadata !59, metadata !{!"0x102"}), !dbg !60
%arraydecay.i = getelementptr inbounds %struct.AAA3* %var1, i64 0, i32 0, i64 0, !dbg !61
call void @_Z3fooPcjPKc(i8* %arraydecay.i, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)), !dbg !61
- call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !34, metadata !{metadata !"0x102"}), !dbg !63
- call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !64, metadata !{metadata !"0x102"}), !dbg !65
- call void @llvm.dbg.value(metadata !58, i64 0, metadata !66, metadata !{metadata !"0x102"}), !dbg !67
+ call void @llvm.dbg.value(metadata %struct.AAA3* %var2, i64 0, metadata !34, metadata !{!"0x102"}), !dbg !63
+ call void @llvm.dbg.value(metadata %struct.AAA3* %var2, i64 0, metadata !64, metadata !{!"0x102"}), !dbg !65
+ call void @llvm.dbg.value(metadata !58, i64 0, metadata !66, metadata !{!"0x102"}), !dbg !67
%arraydecay.i5 = getelementptr inbounds %struct.AAA3* %var2, i64 0, i32 0, i64 0, !dbg !68
call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)), !dbg !68
%tobool1 = icmp eq i32 %param1, 0, !dbg !69
- call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !34, metadata !{metadata !"0x102"}), !dbg !63
+ call void @llvm.dbg.value(metadata %struct.AAA3* %var2, i64 0, metadata !34, metadata !{!"0x102"}), !dbg !63
br i1 %tobool1, label %if.else, label %if.then2, !dbg !69
if.then2: ; preds = %if.end
- call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !71, metadata !{metadata !"0x102"}), !dbg !73
- call void @llvm.dbg.value(metadata !74, i64 0, metadata !75, metadata !{metadata !"0x102"}), !dbg !76
+ call void @llvm.dbg.value(metadata %struct.AAA3* %var2, i64 0, metadata !71, metadata !{!"0x102"}), !dbg !73
+ call void @llvm.dbg.value(metadata !74, i64 0, metadata !75, metadata !{!"0x102"}), !dbg !76
call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([2 x i8]* @.str1, i64 0, i64 0)), !dbg !76
br label %if.end3, !dbg !72
if.else: ; preds = %if.end
- call void @llvm.dbg.value(metadata !{%struct.AAA3* %var2}, i64 0, metadata !77, metadata !{metadata !"0x102"}), !dbg !79
- call void @llvm.dbg.value(metadata !80, i64 0, metadata !81, metadata !{metadata !"0x102"}), !dbg !82
+ call void @llvm.dbg.value(metadata %struct.AAA3* %var2, i64 0, metadata !77, metadata !{!"0x102"}), !dbg !79
+ call void @llvm.dbg.value(metadata !80, i64 0, metadata !81, metadata !{!"0x102"}), !dbg !82
call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([2 x i8]* @.str2, i64 0, i64 0)), !dbg !82
br label %if.end3
if.end3: ; preds = %if.else, %if.then2
- call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !33, metadata !{metadata !"0x102"}), !dbg !55
- call void @llvm.dbg.value(metadata !{%struct.AAA3* %var1}, i64 0, metadata !83, metadata !{metadata !"0x102"}), !dbg !85
- call void @llvm.dbg.value(metadata !58, i64 0, metadata !86, metadata !{metadata !"0x102"}), !dbg !87
+ call void @llvm.dbg.value(metadata %struct.AAA3* %var1, i64 0, metadata !33, metadata !{!"0x102"}), !dbg !55
+ call void @llvm.dbg.value(metadata %struct.AAA3* %var1, i64 0, metadata !83, metadata !{!"0x102"}), !dbg !85
+ call void @llvm.dbg.value(metadata !58, i64 0, metadata !86, metadata !{!"0x102"}), !dbg !87
call void @_Z3fooPcjPKc(i8* %arraydecay.i, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)), !dbg !87
ret void, !dbg !88
}
@@ -113,92 +113,92 @@ attributes #2 = { nounwind readnone }
!llvm.module.flags = !{!44, !45}
!llvm.ident = !{!46}
-!0 = metadata !{metadata !"0x11\004\00clang version 3.5.0 \001\00\000\00\001", metadata !1, metadata !2, metadata !3, metadata !23, metadata !2, metadata !2} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{metadata !"dbg-changes-codegen-branch-folding.cpp", metadata !"/tmp/dbginfo"}
-!2 = metadata !{}
-!3 = metadata !{metadata !4}
-!4 = metadata !{metadata !"0x13\00AAA3\004\0032\008\000\000\000", metadata !1, null, null, metadata !5, null, null, metadata !"_ZTS4AAA3"} ; [ DW_TAG_structure_type ] [AAA3] [line 4, size 32, align 8, offset 0] [def] [from ]
-!5 = metadata !{metadata !6, metadata !11, metadata !17, metadata !18}
-!6 = metadata !{metadata !"0xd\00text\008\0032\008\000\000", metadata !1, metadata !"_ZTS4AAA3", metadata !7} ; [ DW_TAG_member ] [text] [line 8, size 32, align 8, offset 0] [from ]
-!7 = metadata !{metadata !"0x1\00\000\0032\008\000\000", null, null, metadata !8, metadata !9, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 32, align 8, offset 0] [from char]
-!8 = metadata !{metadata !"0x24\00char\000\008\008\000\000\006", null, null} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
-!9 = metadata !{metadata !10}
-!10 = metadata !{metadata !"0x21\000\004"} ; [ DW_TAG_subrange_type ] [0, 3]
-!11 = metadata !{metadata !"0x2e\00AAA3\00AAA3\00\005\000\000\000\006\00256\001\005", metadata !1, metadata !"_ZTS4AAA3", metadata !12, null, null, null, i32 0, null} ; [ DW_TAG_subprogram ] [line 5] [AAA3]
-!12 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !13, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!13 = metadata !{null, metadata !14, metadata !15}
-!14 = metadata !{metadata !"0xf\00\000\0064\0064\000\001088", null, null, metadata !"_ZTS4AAA3"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS4AAA3]
-!15 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !16} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
-!16 = metadata !{metadata !"0x26\00\000\000\000\000\000", null, null, metadata !8} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from char]
-!17 = metadata !{metadata !"0x2e\00operator=\00operator=\00_ZN4AAA3aSEPKc\006\000\000\000\006\00256\001\006", metadata !1, metadata !"_ZTS4AAA3", metadata !12, null, null, null, i32 0, null} ; [ DW_TAG_subprogram ] [line 6] [operator=]
-!18 = metadata !{metadata !"0x2e\00operator const char *\00operator const char *\00_ZNK4AAA3cvPKcEv\007\000\000\000\006\00256\001\007", metadata !1, metadata !"_ZTS4AAA3", metadata !19, null, null, null, i32 0, null} ; [ DW_TAG_subprogram ] [line 7] [operator const char *]
-!19 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !20, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!20 = metadata !{metadata !15, metadata !21}
-!21 = metadata !{metadata !"0xf\00\000\0064\0064\000\001088", null, null, metadata !22} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from ]
-!22 = metadata !{metadata !"0x26\00\000\000\000\000\000", null, null, metadata !"_ZTS4AAA3"} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from _ZTS4AAA3]
-!23 = metadata !{metadata !24, metadata !35, metadata !40}
-!24 = metadata !{metadata !"0x2e\00bar\00bar\00_Z3barii\0011\000\001\000\006\00256\001\0011", metadata !1, metadata !25, metadata !26, null, void (i32, i32)* @_Z3barii, null, null, metadata !29} ; [ DW_TAG_subprogram ] [line 11] [def] [bar]
-!25 = metadata !{metadata !"0x29", metadata !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
-!26 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !27, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!27 = metadata !{null, metadata !28, metadata !28}
-!28 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!29 = metadata !{metadata !30, metadata !31, metadata !32, metadata !33, metadata !34}
-!30 = metadata !{metadata !"0x101\00param1\0016777227\000", metadata !24, metadata !25, metadata !28} ; [ DW_TAG_arg_variable ] [param1] [line 11]
-!31 = metadata !{metadata !"0x101\00param2\0033554443\000", metadata !24, metadata !25, metadata !28} ; [ DW_TAG_arg_variable ] [param2] [line 11]
-!32 = metadata !{metadata !"0x100\00temp\0012\000", metadata !24, metadata !25, metadata !15} ; [ DW_TAG_auto_variable ] [temp] [line 12]
-!33 = metadata !{metadata !"0x100\00var1\0017\000", metadata !24, metadata !25, metadata !"_ZTS4AAA3"} ; [ DW_TAG_auto_variable ] [var1] [line 17]
-!34 = metadata !{metadata !"0x100\00var2\0018\000", metadata !24, metadata !25, metadata !"_ZTS4AAA3"} ; [ DW_TAG_auto_variable ] [var2] [line 18]
-!35 = metadata !{metadata !"0x2e\00operator=\00operator=\00_ZN4AAA3aSEPKc\006\000\001\000\006\00256\001\006", metadata !1, metadata !"_ZTS4AAA3", metadata !12, null, null, null, metadata !17, metadata !36} ; [ DW_TAG_subprogram ] [line 6] [def] [operator=]
-!36 = metadata !{metadata !37, metadata !39}
-!37 = metadata !{metadata !"0x101\00this\0016777216\001088", metadata !35, null, metadata !38} ; [ DW_TAG_arg_variable ] [this] [line 0]
-!38 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !"_ZTS4AAA3"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS4AAA3]
-!39 = metadata !{metadata !"0x101\00value\0033554438\000", metadata !35, metadata !25, metadata !15} ; [ DW_TAG_arg_variable ] [value] [line 6]
-!40 = metadata !{metadata !"0x2e\00AAA3\00AAA3\00_ZN4AAA3C2EPKc\005\000\001\000\006\00256\001\005", metadata !1, metadata !"_ZTS4AAA3", metadata !12, null, null, null, metadata !11, metadata !41} ; [ DW_TAG_subprogram ] [line 5] [def] [AAA3]
-!41 = metadata !{metadata !42, metadata !43}
-!42 = metadata !{metadata !"0x101\00this\0016777216\001088", metadata !40, null, metadata !38} ; [ DW_TAG_arg_variable ] [this] [line 0]
-!43 = metadata !{metadata !"0x101\00value\0033554437\000", metadata !40, metadata !25, metadata !15} ; [ DW_TAG_arg_variable ] [value] [line 5]
-!44 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
-!45 = metadata !{i32 2, metadata !"Debug Info Version", i32 2}
-!46 = metadata !{metadata !"clang version 3.5.0 "}
-!47 = metadata !{i32 11, i32 0, metadata !24, null}
-!48 = metadata !{i8* null}
-!49 = metadata !{i32 12, i32 0, metadata !24, null}
-!50 = metadata !{i32 14, i32 0, metadata !51, null}
-!51 = metadata !{metadata !"0xb\0014\000\000", metadata !1, metadata !24} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
-!52 = metadata !{i32 15, i32 0, metadata !53, null}
-!53 = metadata !{metadata !"0xb\0014\000\000", metadata !1, metadata !51} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
-!54 = metadata !{i32 16, i32 0, metadata !53, null}
-!55 = metadata !{i32 17, i32 0, metadata !24, null}
-!56 = metadata !{metadata !"0x101\00this\0016777216\001088", metadata !40, null, metadata !38, metadata !55} ; [ DW_TAG_arg_variable ] [this] [line 0]
-!57 = metadata !{i32 0, i32 0, metadata !40, metadata !55}
-!58 = metadata !{i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)}
-!59 = metadata !{metadata !"0x101\00value\0033554437\000", metadata !40, metadata !25, metadata !15, metadata !55} ; [ DW_TAG_arg_variable ] [value] [line 5]
-!60 = metadata !{i32 5, i32 0, metadata !40, metadata !55}
-!61 = metadata !{i32 5, i32 0, metadata !62, metadata !55}
-!62 = metadata !{metadata !"0xb\005\000\000", metadata !1, metadata !40} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
-!63 = metadata !{i32 18, i32 0, metadata !24, null}
-!64 = metadata !{metadata !"0x101\00this\0016777216\001088", metadata !40, null, metadata !38, metadata !63} ; [ DW_TAG_arg_variable ] [this] [line 0]
-!65 = metadata !{i32 0, i32 0, metadata !40, metadata !63}
-!66 = metadata !{metadata !"0x101\00value\0033554437\000", metadata !40, metadata !25, metadata !15, metadata !63} ; [ DW_TAG_arg_variable ] [value] [line 5]
-!67 = metadata !{i32 5, i32 0, metadata !40, metadata !63}
-!68 = metadata !{i32 5, i32 0, metadata !62, metadata !63}
-!69 = metadata !{i32 20, i32 0, metadata !70, null}
-!70 = metadata !{metadata !"0xb\0020\000\000", metadata !1, metadata !24} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
-!71 = metadata !{metadata !"0x101\00this\0016777216\001088", metadata !35, null, metadata !38, metadata !72} ; [ DW_TAG_arg_variable ] [this] [line 0]
-!72 = metadata !{i32 21, i32 0, metadata !70, null}
-!73 = metadata !{i32 0, i32 0, metadata !35, metadata !72}
-!74 = metadata !{i8* getelementptr inbounds ([2 x i8]* @.str1, i64 0, i64 0)}
-!75 = metadata !{metadata !"0x101\00value\0033554438\000", metadata !35, metadata !25, metadata !15, metadata !72} ; [ DW_TAG_arg_variable ] [value] [line 6]
-!76 = metadata !{i32 6, i32 0, metadata !35, metadata !72}
-!77 = metadata !{metadata !"0x101\00this\0016777216\001088", metadata !35, null, metadata !38, metadata !78} ; [ DW_TAG_arg_variable ] [this] [line 0]
-!78 = metadata !{i32 23, i32 0, metadata !70, null}
-!79 = metadata !{i32 0, i32 0, metadata !35, metadata !78}
-!80 = metadata !{i8* getelementptr inbounds ([2 x i8]* @.str2, i64 0, i64 0)}
-!81 = metadata !{metadata !"0x101\00value\0033554438\000", metadata !35, metadata !25, metadata !15, metadata !78} ; [ DW_TAG_arg_variable ] [value] [line 6]
-!82 = metadata !{i32 6, i32 0, metadata !35, metadata !78}
-!83 = metadata !{metadata !"0x101\00this\0016777216\001088", metadata !35, null, metadata !38, metadata !84} ; [ DW_TAG_arg_variable ] [this] [line 0]
-!84 = metadata !{i32 24, i32 0, metadata !24, null}
-!85 = metadata !{i32 0, i32 0, metadata !35, metadata !84}
-!86 = metadata !{metadata !"0x101\00value\0033554438\000", metadata !35, metadata !25, metadata !15, metadata !84} ; [ DW_TAG_arg_variable ] [value] [line 6]
-!87 = metadata !{i32 6, i32 0, metadata !35, metadata !84}
-!88 = metadata !{i32 25, i32 0, metadata !24, null}
+!0 = !{!"0x11\004\00clang version 3.5.0 \001\00\000\00\001", !1, !2, !3, !23, !2, !2} ; [ DW_TAG_compile_unit ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp] [DW_LANG_C_plus_plus]
+!1 = !{!"dbg-changes-codegen-branch-folding.cpp", !"/tmp/dbginfo"}
+!2 = !{}
+!3 = !{!4}
+!4 = !{!"0x13\00AAA3\004\0032\008\000\000\000", !1, null, null, !5, null, null, !"_ZTS4AAA3"} ; [ DW_TAG_structure_type ] [AAA3] [line 4, size 32, align 8, offset 0] [def] [from ]
+!5 = !{!6, !11, !17, !18}
+!6 = !{!"0xd\00text\008\0032\008\000\000", !1, !"_ZTS4AAA3", !7} ; [ DW_TAG_member ] [text] [line 8, size 32, align 8, offset 0] [from ]
+!7 = !{!"0x1\00\000\0032\008\000\000", null, null, !8, !9, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 32, align 8, offset 0] [from char]
+!8 = !{!"0x24\00char\000\008\008\000\000\006", null, null} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
+!9 = !{!10}
+!10 = !{!"0x21\000\004"} ; [ DW_TAG_subrange_type ] [0, 3]
+!11 = !{!"0x2e\00AAA3\00AAA3\00\005\000\000\000\006\00256\001\005", !1, !"_ZTS4AAA3", !12, null, null, null, i32 0, null} ; [ DW_TAG_subprogram ] [line 5] [AAA3]
+!12 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !13, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!13 = !{null, !14, !15}
+!14 = !{!"0xf\00\000\0064\0064\000\001088", null, null, !"_ZTS4AAA3"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS4AAA3]
+!15 = !{!"0xf\00\000\0064\0064\000\000", null, null, !16} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from ]
+!16 = !{!"0x26\00\000\000\000\000\000", null, null, !8} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from char]
+!17 = !{!"0x2e\00operator=\00operator=\00_ZN4AAA3aSEPKc\006\000\000\000\006\00256\001\006", !1, !"_ZTS4AAA3", !12, null, null, null, i32 0, null} ; [ DW_TAG_subprogram ] [line 6] [operator=]
+!18 = !{!"0x2e\00operator const char *\00operator const char *\00_ZNK4AAA3cvPKcEv\007\000\000\000\006\00256\001\007", !1, !"_ZTS4AAA3", !19, null, null, null, i32 0, null} ; [ DW_TAG_subprogram ] [line 7] [operator const char *]
+!19 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !20, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!20 = !{!15, !21}
+!21 = !{!"0xf\00\000\0064\0064\000\001088", null, null, !22} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from ]
+!22 = !{!"0x26\00\000\000\000\000\000", null, null, !"_ZTS4AAA3"} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from _ZTS4AAA3]
+!23 = !{!24, !35, !40}
+!24 = !{!"0x2e\00bar\00bar\00_Z3barii\0011\000\001\000\006\00256\001\0011", !1, !25, !26, null, void (i32, i32)* @_Z3barii, null, null, !29} ; [ DW_TAG_subprogram ] [line 11] [def] [bar]
+!25 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!26 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !27, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!27 = !{null, !28, !28}
+!28 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!29 = !{!30, !31, !32, !33, !34}
+!30 = !{!"0x101\00param1\0016777227\000", !24, !25, !28} ; [ DW_TAG_arg_variable ] [param1] [line 11]
+!31 = !{!"0x101\00param2\0033554443\000", !24, !25, !28} ; [ DW_TAG_arg_variable ] [param2] [line 11]
+!32 = !{!"0x100\00temp\0012\000", !24, !25, !15} ; [ DW_TAG_auto_variable ] [temp] [line 12]
+!33 = !{!"0x100\00var1\0017\000", !24, !25, !"_ZTS4AAA3"} ; [ DW_TAG_auto_variable ] [var1] [line 17]
+!34 = !{!"0x100\00var2\0018\000", !24, !25, !"_ZTS4AAA3"} ; [ DW_TAG_auto_variable ] [var2] [line 18]
+!35 = !{!"0x2e\00operator=\00operator=\00_ZN4AAA3aSEPKc\006\000\001\000\006\00256\001\006", !1, !"_ZTS4AAA3", !12, null, null, null, !17, !36} ; [ DW_TAG_subprogram ] [line 6] [def] [operator=]
+!36 = !{!37, !39}
+!37 = !{!"0x101\00this\0016777216\001088", !35, null, !38} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!38 = !{!"0xf\00\000\0064\0064\000\000", null, null, !"_ZTS4AAA3"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from _ZTS4AAA3]
+!39 = !{!"0x101\00value\0033554438\000", !35, !25, !15} ; [ DW_TAG_arg_variable ] [value] [line 6]
+!40 = !{!"0x2e\00AAA3\00AAA3\00_ZN4AAA3C2EPKc\005\000\001\000\006\00256\001\005", !1, !"_ZTS4AAA3", !12, null, null, null, !11, !41} ; [ DW_TAG_subprogram ] [line 5] [def] [AAA3]
+!41 = !{!42, !43}
+!42 = !{!"0x101\00this\0016777216\001088", !40, null, !38} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!43 = !{!"0x101\00value\0033554437\000", !40, !25, !15} ; [ DW_TAG_arg_variable ] [value] [line 5]
+!44 = !{i32 2, !"Dwarf Version", i32 4}
+!45 = !{i32 2, !"Debug Info Version", i32 2}
+!46 = !{!"clang version 3.5.0 "}
+!47 = !MDLocation(line: 11, scope: !24)
+!48 = !{i8* null}
+!49 = !MDLocation(line: 12, scope: !24)
+!50 = !MDLocation(line: 14, scope: !51)
+!51 = !{!"0xb\0014\000\000", !1, !24} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!52 = !MDLocation(line: 15, scope: !53)
+!53 = !{!"0xb\0014\000\000", !1, !51} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!54 = !MDLocation(line: 16, scope: !53)
+!55 = !MDLocation(line: 17, scope: !24)
+!56 = !{!"0x101\00this\0016777216\001088", !40, null, !38, !55} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!57 = !MDLocation(line: 0, scope: !40, inlinedAt: !55)
+!58 = !{i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)}
+!59 = !{!"0x101\00value\0033554437\000", !40, !25, !15, !55} ; [ DW_TAG_arg_variable ] [value] [line 5]
+!60 = !MDLocation(line: 5, scope: !40, inlinedAt: !55)
+!61 = !MDLocation(line: 5, scope: !62, inlinedAt: !55)
+!62 = !{!"0xb\005\000\000", !1, !40} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!63 = !MDLocation(line: 18, scope: !24)
+!64 = !{!"0x101\00this\0016777216\001088", !40, null, !38, !63} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!65 = !MDLocation(line: 0, scope: !40, inlinedAt: !63)
+!66 = !{!"0x101\00value\0033554437\000", !40, !25, !15, !63} ; [ DW_TAG_arg_variable ] [value] [line 5]
+!67 = !MDLocation(line: 5, scope: !40, inlinedAt: !63)
+!68 = !MDLocation(line: 5, scope: !62, inlinedAt: !63)
+!69 = !MDLocation(line: 20, scope: !70)
+!70 = !{!"0xb\0020\000\000", !1, !24} ; [ DW_TAG_lexical_block ] [/tmp/dbginfo/dbg-changes-codegen-branch-folding.cpp]
+!71 = !{!"0x101\00this\0016777216\001088", !35, null, !38, !72} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!72 = !MDLocation(line: 21, scope: !70)
+!73 = !MDLocation(line: 0, scope: !35, inlinedAt: !72)
+!74 = !{i8* getelementptr inbounds ([2 x i8]* @.str1, i64 0, i64 0)}
+!75 = !{!"0x101\00value\0033554438\000", !35, !25, !15, !72} ; [ DW_TAG_arg_variable ] [value] [line 6]
+!76 = !MDLocation(line: 6, scope: !35, inlinedAt: !72)
+!77 = !{!"0x101\00this\0016777216\001088", !35, null, !38, !78} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!78 = !MDLocation(line: 23, scope: !70)
+!79 = !MDLocation(line: 0, scope: !35, inlinedAt: !78)
+!80 = !{i8* getelementptr inbounds ([2 x i8]* @.str2, i64 0, i64 0)}
+!81 = !{!"0x101\00value\0033554438\000", !35, !25, !15, !78} ; [ DW_TAG_arg_variable ] [value] [line 6]
+!82 = !MDLocation(line: 6, scope: !35, inlinedAt: !78)
+!83 = !{!"0x101\00this\0016777216\001088", !35, null, !38, !84} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!84 = !MDLocation(line: 24, scope: !24)
+!85 = !MDLocation(line: 0, scope: !35, inlinedAt: !84)
+!86 = !{!"0x101\00value\0033554438\000", !35, !25, !15, !84} ; [ DW_TAG_arg_variable ] [value] [line 6]
+!87 = !MDLocation(line: 6, scope: !35, inlinedAt: !84)
+!88 = !MDLocation(line: 25, scope: !24)
diff --git a/test/CodeGen/X86/dbg-changes-codegen.ll b/test/CodeGen/X86/dbg-changes-codegen.ll
index aae95e8..2179667 100644
--- a/test/CodeGen/X86/dbg-changes-codegen.ll
+++ b/test/CodeGen/X86/dbg-changes-codegen.ll
@@ -44,7 +44,7 @@
define zeroext i1 @_ZN3Foo3batEv(%struct.Foo* %this) #0 align 2 {
entry:
%0 = load %struct.Foo** @pfoo, align 8
- tail call void @llvm.dbg.value(metadata !{%struct.Foo* %0}, i64 0, metadata !62, metadata !{metadata !"0x102"})
+ tail call void @llvm.dbg.value(metadata %struct.Foo* %0, i64 0, metadata !62, metadata !{!"0x102"})
%cmp.i = icmp eq %struct.Foo* %0, %this
ret i1 %cmp.i
}
@@ -53,7 +53,7 @@ entry:
define void @_Z3bazv() #1 {
entry:
%0 = load %struct.Wibble** @wibble1, align 8
- tail call void @llvm.dbg.value(metadata !64, i64 0, metadata !65, metadata !{metadata !"0x102"})
+ tail call void @llvm.dbg.value(metadata %struct.Flibble* undef, i64 0, metadata !65, metadata !{!"0x102"})
%1 = load %struct.Wibble** @wibble2, align 8
%cmp.i = icmp ugt %struct.Wibble* %1, %0
br i1 %cmp.i, label %if.then.i, label %_ZN7Flibble3barEP6Wibble.exit
@@ -76,8 +76,8 @@ attributes #1 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointe
attributes #2 = { nounwind readnone }
-!17 = metadata !{metadata !"0x10\00\000\000\000\000\000", null, null, null} ; [ DW_TAG_reference_type ] [line 0, size 0, align 0, offset 0] [from Foo]
-!45 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, null} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from Flibble]
-!62 = metadata !{metadata !"0x101\00arg\0033554436\000", null, null, metadata !17} ; [ DW_TAG_arg_variable ] [arg] [line 4]
-!64 = metadata !{%struct.Flibble* undef}
-!65 = metadata !{metadata !"0x101\00this\0016777229\001088", null, null, metadata !45} ; [ DW_TAG_arg_variable ] [this] [line 13]
+!17 = !{!"0x10\00\000\000\000\000\000", null, null, null} ; [ DW_TAG_reference_type ] [line 0, size 0, align 0, offset 0] [from Foo]
+!45 = !{!"0xf\00\000\0064\0064\000\000", null, null, null} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from Flibble]
+!62 = !{!"0x101\00arg\0033554436\000", null, null, !17} ; [ DW_TAG_arg_variable ] [arg] [line 4]
+!64 = !{%struct.Flibble* undef}
+!65 = !{!"0x101\00this\0016777229\001088", null, null, !45} ; [ DW_TAG_arg_variable ] [this] [line 13]
diff --git a/test/CodeGen/X86/dbg-combine.ll b/test/CodeGen/X86/dbg-combine.ll
new file mode 100644
index 0000000..f6b9565
--- /dev/null
+++ b/test/CodeGen/X86/dbg-combine.ll
@@ -0,0 +1,113 @@
+; RUN: llc -mtriple x86_64-pc-linux -O0 < %s | FileCheck %s
+
+; Make sure that the sequence of debug locations for function foo is correctly
+; generated. More specifically, .loc entries for lines 4,5,6,7 must appear in
+; the correct sequence.
+
+; $ clang -emit-llvm -S -g dbg-combine.c
+; 1. int foo()
+; 2. {
+; 3. int elems = 3;
+; 4. int array1[elems];
+; 5. array1[0]=0;
+; 6. array1[1]=1;
+; 7. array1[2]=2;
+; 8. int array2[elems];
+; 9. array2[0]=1;
+; 10. return array2[0];
+; 11. }
+
+; CHECK: .loc 1 4
+; CHECK: .loc 1 5
+; CHECK: .loc 1 6
+; CHECK: .loc 1 7
+
+; ModuleID = 'dbg-combine.c'
+; Function Attrs: nounwind uwtable
+define i32 @foo() #0 {
+entry:
+ %elems = alloca i32, align 4
+ %saved_stack = alloca i8*
+ %cleanup.dest.slot = alloca i32
+ call void @llvm.dbg.declare(metadata i32* %elems, metadata !12, metadata !13), !dbg !14
+ store i32 3, i32* %elems, align 4, !dbg !14
+ %0 = load i32* %elems, align 4, !dbg !15
+ %1 = zext i32 %0 to i64, !dbg !16
+ %2 = call i8* @llvm.stacksave(), !dbg !16
+ store i8* %2, i8** %saved_stack, !dbg !16
+ %vla = alloca i32, i64 %1, align 16, !dbg !16
+ call void @llvm.dbg.declare(metadata i32* %vla, metadata !17, metadata !21), !dbg !22
+ %arrayidx = getelementptr inbounds i32* %vla, i64 0, !dbg !23
+ store i32 0, i32* %arrayidx, align 4, !dbg !24
+ %arrayidx1 = getelementptr inbounds i32* %vla, i64 1, !dbg !25
+ store i32 1, i32* %arrayidx1, align 4, !dbg !26
+ %arrayidx2 = getelementptr inbounds i32* %vla, i64 2, !dbg !27
+ store i32 2, i32* %arrayidx2, align 4, !dbg !28
+ %3 = load i32* %elems, align 4, !dbg !29
+ %4 = zext i32 %3 to i64, !dbg !30
+ %vla3 = alloca i32, i64 %4, align 16, !dbg !30
+ call void @llvm.dbg.declare(metadata i32* %vla3, metadata !31, metadata !21), !dbg !32
+ %arrayidx4 = getelementptr inbounds i32* %vla3, i64 0, !dbg !33
+ store i32 1, i32* %arrayidx4, align 4, !dbg !34
+ %arrayidx5 = getelementptr inbounds i32* %vla3, i64 0, !dbg !35
+ %5 = load i32* %arrayidx5, align 4, !dbg !35
+ store i32 1, i32* %cleanup.dest.slot
+ %6 = load i8** %saved_stack, !dbg !36
+ call void @llvm.stackrestore(i8* %6), !dbg !36
+ ret i32 %5, !dbg !36
+}
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+; Function Attrs: nounwind
+declare i8* @llvm.stacksave() #2
+
+; Function Attrs: nounwind
+declare void @llvm.stackrestore(i8*) #2
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!9, !10}
+!llvm.ident = !{!11}
+
+!0 = !{!"0x11\0012\00clang version 3.7.0 (trunk 227074)\000\00\000\00\001", !1, !2, !2, !3, !2, !2} ; [ DW_TAG_compile_unit ] [/home/probinson/projects/scratch/dbg-combine.c] [DW_LANG_C99]
+!1 = !{!"dbg-combine.c", !"/home/probinson/projects/scratch"}
+!2 = !{}
+!3 = !{!4}
+!4 = !{!"0x2e\00foo\00foo\00\001\000\001\000\000\000\000\002", !1, !5, !6, null, i32 ()* @foo, null, null, !2} ; [ DW_TAG_subprogram ] [line 1] [def] [scope 2] [foo]
+!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/home/probinson/projects/scratch/dbg-combine.c]
+!6 = !{!"0x15\00\000\000\000\000\000\000", null, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = !{!8}
+!8 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = !{i32 2, !"Dwarf Version", i32 4}
+!10 = !{i32 2, !"Debug Info Version", i32 2}
+!11 = !{!"clang version 3.7.0 (trunk 227074)"}
+!12 = !{!"0x100\00elems\003\000", !4, !5, !8} ; [ DW_TAG_auto_variable ] [elems] [line 3]
+!13 = !{!"0x102"} ; [ DW_TAG_expression ]
+!14 = !MDLocation(line: 3, column: 8, scope: !4)
+!15 = !MDLocation(line: 4, column: 15, scope: !4)
+!16 = !MDLocation(line: 4, column: 4, scope: !4)
+!17 = !{!"0x100\00array1\004\000", !4, !5, !18} ; [ DW_TAG_auto_variable ] [array1] [line 4]
+!18 = !{!"0x1\00\000\000\0032\000\000\000", null, null, !8, !19, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 0, align 32, offset 0] [from int]
+!19 = !{!20}
+!20 = !{!"0x21\000\00-1"} ; [ DW_TAG_subrange_type ] [unbounded]
+!21 = !{!"0x102\006"} ; [ DW_TAG_expression ] [DW_OP_deref]
+!22 = !MDLocation(line: 4, column: 8, scope: !4)
+!23 = !MDLocation(line: 5, column: 4, scope: !4)
+!24 = !MDLocation(line: 5, column: 13, scope: !4)
+!25 = !MDLocation(line: 6, column: 4, scope: !4)
+!26 = !MDLocation(line: 6, column: 13, scope: !4)
+!27 = !MDLocation(line: 7, column: 4, scope: !4)
+!28 = !MDLocation(line: 7, column: 13, scope: !4)
+!29 = !MDLocation(line: 8, column: 15, scope: !4)
+!30 = !MDLocation(line: 8, column: 4, scope: !4)
+!31 = !{!"0x100\00array2\008\000", !4, !5, !18} ; [ DW_TAG_auto_variable ] [array2] [line 8]
+!32 = !MDLocation(line: 8, column: 8, scope: !4)
+!33 = !MDLocation(line: 9, column: 4, scope: !4)
+!34 = !MDLocation(line: 9, column: 13, scope: !4)
+!35 = !MDLocation(line: 10, column: 11, scope: !4)
+!36 = !MDLocation(line: 11, column: 1, scope: !4)
diff --git a/test/CodeGen/X86/dllexport-x86_64.ll b/test/CodeGen/X86/dllexport-x86_64.ll
index c673f5d..629a557 100644
--- a/test/CodeGen/X86/dllexport-x86_64.ll
+++ b/test/CodeGen/X86/dllexport-x86_64.ll
@@ -17,19 +17,16 @@ define dllexport void @f2() unnamed_addr {
ret void
}
-; CHECK: .section .text,"xr",discard,lnk1
; CHECK: .globl lnk1
define linkonce_odr dllexport void @lnk1() {
ret void
}
-; CHECK: .section .text,"xr",discard,lnk2
; CHECK: .globl lnk2
define linkonce_odr dllexport void @lnk2() alwaysinline {
ret void
}
-; CHECK: .section .text,"xr",discard,weak1
; CHECK: .globl weak1
define weak_odr dllexport void @weak1() {
ret void
@@ -40,18 +37,16 @@ define weak_odr dllexport void @weak1() {
; CHECK: .globl Var1
@Var1 = dllexport global i32 1, align 4
-; CHECK: .rdata,"rd"
+; CHECK: .rdata,"dr"
; CHECK: .globl Var2
@Var2 = dllexport unnamed_addr constant i32 1
; CHECK: .comm Var3
@Var3 = common dllexport global i32 0, align 4
-; CHECK: .section .data,"wd",discard,WeakVar1
; CHECK: .globl WeakVar1
@WeakVar1 = weak_odr dllexport global i32 1, align 4
-; CHECK: .section .rdata,"rd",discard,WeakVar2
; CHECK: .globl WeakVar2
@WeakVar2 = weak_odr dllexport unnamed_addr constant i32 1
diff --git a/test/CodeGen/X86/dllexport.ll b/test/CodeGen/X86/dllexport.ll
index 5035aa1..02a83ae 100644
--- a/test/CodeGen/X86/dllexport.ll
+++ b/test/CodeGen/X86/dllexport.ll
@@ -21,6 +21,8 @@ define dllexport void @f2() unnamed_addr {
ret void
}
+declare dllexport void @not_defined()
+
; CHECK: .globl _stdfun@0
define dllexport x86_stdcallcc void @stdfun() nounwind {
ret void
@@ -36,19 +38,16 @@ define dllexport x86_thiscallcc void @thisfun() nounwind {
ret void
}
-; CHECK: .section .text,"xr",discard,_lnk1
; CHECK: .globl _lnk1
define linkonce_odr dllexport void @lnk1() {
ret void
}
-; CHECK: .section .text,"xr",discard,_lnk2
; CHECK: .globl _lnk2
define linkonce_odr dllexport void @lnk2() alwaysinline {
ret void
}
-; CHECK: .section .text,"xr",discard,_weak1
; CHECK: .globl _weak1
define weak_odr dllexport void @weak1() {
ret void
@@ -59,18 +58,16 @@ define weak_odr dllexport void @weak1() {
; CHECK: .globl _Var1
@Var1 = dllexport global i32 1, align 4
-; CHECK: .rdata,"rd"
+; CHECK: .rdata,"dr"
; CHECK: .globl _Var2
@Var2 = dllexport unnamed_addr constant i32 1
; CHECK: .comm _Var3
@Var3 = common dllexport global i32 0, align 4
-; CHECK: .section .data,"wd",discard,_WeakVar1
; CHECK: .globl _WeakVar1
@WeakVar1 = weak_odr dllexport global i32 1, align 4
-; CHECK: .section .rdata,"rd",discard,_WeakVar2
; CHECK: .globl _WeakVar2
@WeakVar2 = weak_odr dllexport unnamed_addr constant i32 1
@@ -91,7 +88,6 @@ define weak_odr dllexport void @weak1() {
; CHECK: _weak_alias = _f1
@weak_alias = weak_odr dllexport alias void()* @f1
-
; CHECK: .section .drectve
; CHECK-CL: " /EXPORT:_Var1,DATA"
; CHECK-CL: " /EXPORT:_Var2,DATA"
@@ -100,6 +96,7 @@ define weak_odr dllexport void @weak1() {
; CHECK-CL: " /EXPORT:_WeakVar2,DATA"
; CHECK-CL: " /EXPORT:_f1"
; CHECK-CL: " /EXPORT:_f2"
+; CHECK-CL-NOT: not_exported
; CHECK-CL: " /EXPORT:_stdfun@0"
; CHECK-CL: " /EXPORT:@fastfun@0"
; CHECK-CL: " /EXPORT:_thisfun"
@@ -117,6 +114,7 @@ define weak_odr dllexport void @weak1() {
; CHECK-GCC: " -export:WeakVar2,data"
; CHECK-GCC: " -export:f1"
; CHECK-GCC: " -export:f2"
+; CHECK-CL-NOT: not_exported
; CHECK-GCC: " -export:stdfun@0"
; CHECK-GCC: " -export:@fastfun@0"
; CHECK-GCC: " -export:thisfun"
diff --git a/test/CodeGen/X86/dwarf-comp-dir.ll b/test/CodeGen/X86/dwarf-comp-dir.ll
index 872f7fa..77eba63 100644
--- a/test/CodeGen/X86/dwarf-comp-dir.ll
+++ b/test/CodeGen/X86/dwarf-comp-dir.ll
@@ -7,15 +7,15 @@ target triple = "x86_64-unknown-linux-gnu"
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!5}
-!0 = metadata !{metadata !"0x11\0012\00clang version 3.1 (trunk 143523)\001\00\000\00\000", metadata !4, metadata !2, metadata !7, metadata !2, metadata !2, null} ; [ DW_TAG_compile_unit ]
-!2 = metadata !{}
-!3 = metadata !{metadata !"0x29", metadata !4} ; [ DW_TAG_file_type ]
-!4 = metadata !{metadata !"empty.c", metadata !"/home/nlewycky"}
-!6 = metadata !{metadata !"0x13\00foo\001\008\008\000\000\000", metadata !4, null, null, metadata !2, null, null, metadata !"_ZTS3foo"} ; [ DW_TAG_structure_type ] [foo] [line 1, size 8, align 8, offset 0] [def] [from ]
-!7 = metadata !{metadata !6}
+!0 = !{!"0x11\0012\00clang version 3.1 (trunk 143523)\001\00\000\00\000", !4, !2, !7, !2, !2, null} ; [ DW_TAG_compile_unit ]
+!2 = !{}
+!3 = !{!"0x29", !4} ; [ DW_TAG_file_type ]
+!4 = !{!"empty.c", !"/home/nlewycky"}
+!6 = !{!"0x13\00foo\001\008\008\000\000\000", !4, null, null, !2, null, null, !"_ZTS3foo"} ; [ DW_TAG_structure_type ] [foo] [line 1, size 8, align 8, offset 0] [def] [from ]
+!7 = !{!6}
; The important part of the following check is that dir = #0.
; Dir Mod Time File Len File Name
; ---- ---------- ---------- ---------------------------
; CHECK: file_names[ 1] 0 0x00000000 0x00000000 empty.c
-!5 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!5 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/dwarf-eh-prepare.ll b/test/CodeGen/X86/dwarf-eh-prepare.ll
new file mode 100644
index 0000000..a3a70da
--- /dev/null
+++ b/test/CodeGen/X86/dwarf-eh-prepare.ll
@@ -0,0 +1,51 @@
+; RUN: opt -mtriple=x86_64-linux-gnu -dwarfehprepare < %s -S | FileCheck %s
+
+; Check basic functionality of IR-to-IR DWARF EH preparation. This should
+; eliminate resumes. This pass requires a TargetMachine, so we put it under X86
+; and provide an x86 triple.
+
+@int_typeinfo = global i8 0
+
+declare void @might_throw()
+
+define i32 @simple_catch() {
+ invoke void @might_throw()
+ to label %cont unwind label %lpad
+
+; CHECK: define i32 @simple_catch()
+; CHECK: invoke void @might_throw()
+
+cont:
+ ret i32 0
+
+; CHECK: ret i32 0
+
+lpad:
+ %ehvals = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ catch i8* @int_typeinfo
+ %ehptr = extractvalue { i8*, i32 } %ehvals, 0
+ %ehsel = extractvalue { i8*, i32 } %ehvals, 1
+ %int_sel = call i32 @llvm.eh.typeid.for(i8* @int_typeinfo)
+ %int_match = icmp eq i32 %ehsel, %int_sel
+ br i1 %int_match, label %catch_int, label %eh.resume
+
+; CHECK: lpad:
+; CHECK: landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+; CHECK: call i32 @llvm.eh.typeid.for
+; CHECK: br i1
+
+catch_int:
+ ret i32 1
+
+; CHECK: catch_int:
+; CHECK: ret i32 1
+
+eh.resume:
+ resume { i8*, i32 } %ehvals
+
+; CHECK: eh.resume:
+; CHECK: call void @_Unwind_Resume(i8* %{{.*}})
+}
+
+declare i32 @__gxx_personality_v0(...)
+declare i32 @llvm.eh.typeid.for(i8*)
diff --git a/test/CodeGen/X86/elf-comdat.ll b/test/CodeGen/X86/elf-comdat.ll
index c7e6df7..35d8d6f 100644
--- a/test/CodeGen/X86/elf-comdat.ll
+++ b/test/CodeGen/X86/elf-comdat.ll
@@ -1,8 +1,8 @@
; RUN: llc -mtriple x86_64-pc-linux-gnu < %s | FileCheck %s
$f = comdat any
-@v = global i32 0, comdat $f
-define void @f() comdat $f {
+@v = global i32 0, comdat($f)
+define void @f() comdat($f) {
ret void
}
; CHECK: .section .text.f,"axG",@progbits,f,comdat
diff --git a/test/CodeGen/X86/elf-comdat2.ll b/test/CodeGen/X86/elf-comdat2.ll
index 209da39..786cec7 100644
--- a/test/CodeGen/X86/elf-comdat2.ll
+++ b/test/CodeGen/X86/elf-comdat2.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple x86_64-pc-linux-gnu < %s | FileCheck %s
$foo = comdat any
-@bar = global i32 42, comdat $foo
+@bar = global i32 42, comdat($foo)
@foo = global i32 42
; CHECK: .type bar,@object
diff --git a/test/CodeGen/X86/equiv_with_fndef.ll b/test/CodeGen/X86/equiv_with_fndef.ll
new file mode 100644
index 0000000..efbb8ab
--- /dev/null
+++ b/test/CodeGen/X86/equiv_with_fndef.ll
@@ -0,0 +1,10 @@
+; RUN: not llc < %s 2>&1 | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+module asm ".equiv pselect, __pselect"
+
+define void @pselect() {
+ ret void
+}
+; CHECK: 'pselect' is a protected alias
diff --git a/test/CodeGen/X86/equiv_with_vardef.ll b/test/CodeGen/X86/equiv_with_vardef.ll
new file mode 100644
index 0000000..29c19a1
--- /dev/null
+++ b/test/CodeGen/X86/equiv_with_vardef.ll
@@ -0,0 +1,8 @@
+; RUN: not llc < %s 2>&1 | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+module asm ".equiv var, __var"
+
+@var = global i32 0
+; CHECK: symbol 'var' is already defined
diff --git a/test/CodeGen/X86/extractelement-load.ll b/test/CodeGen/X86/extractelement-load.ll
index 8647599..732f698 100644
--- a/test/CodeGen/X86/extractelement-load.ll
+++ b/test/CodeGen/X86/extractelement-load.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=yonah | FileCheck %s
; RUN: llc < %s -march=x86-64 -mattr=+sse2 -mcpu=core2 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mattr=+avx -mcpu=btver2 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -29,16 +30,15 @@ undef, i32 7, i32 9, i32 undef, i32 13, i32 15, i32 1, i32 3>
; This case could easily end up inf-looping in the DAG combiner due to an
; low alignment load of the vector which prevents us from reliably forming a
; narrow load.
-; FIXME: It would be nice to detect whether the target has fast and legal
-; unaligned loads and use them here.
+
+; The expected codegen is identical for the AVX case except
+; load/store instructions will have a leading 'v', so we don't
+; need to special-case the checks.
+
define void @t3() {
; CHECK-LABEL: t3:
-;
-; This movs the entire vector, shuffling the high double down. If we fixed the
-; FIXME above it would just move the high double directly.
; CHECK: movupd
-; CHECK: shufpd
-; CHECK: movlpd
+; CHECK: movhpd
bb:
%tmp13 = load <2 x double>* undef, align 1
diff --git a/test/CodeGen/X86/f16c-intrinsics.ll b/test/CodeGen/X86/f16c-intrinsics.ll
index 514d929..802f917 100644
--- a/test/CodeGen/X86/f16c-intrinsics.ll
+++ b/test/CodeGen/X86/f16c-intrinsics.ll
@@ -2,6 +2,8 @@
; RUN: llc < %s -march=x86-64 -mattr=+avx,+f16c | FileCheck %s
define <4 x float> @test_x86_vcvtph2ps_128(<8 x i16> %a0) {
+ ; CHECK-LABEL: test_x86_vcvtph2ps_128
+ ; CHECK-NOT: vmov
; CHECK: vcvtph2ps
%res = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
@@ -10,14 +12,27 @@ declare <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16>) nounwind readonly
define <8 x float> @test_x86_vcvtph2ps_256(<8 x i16> %a0) {
+ ; CHECK-LABEL: test_x86_vcvtph2ps_256
+ ; CHECK-NOT: vmov
; CHECK: vcvtph2ps
%res = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0) ; <<8 x float>> [#uses=1]
ret <8 x float> %res
}
declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readonly
+define <8 x float> @test_x86_vcvtph2ps_256_m(<8 x i16>* nocapture %a) nounwind {
+entry:
+ ; CHECK-LABEL: test_x86_vcvtph2ps_256_m:
+ ; CHECK-NOT: vmov
+ ; CHECK: vcvtph2ps (%
+ %tmp1 = load <8 x i16>* %a, align 16
+ %0 = tail call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %tmp1)
+ ret <8 x float> %0
+}
define <8 x i16> @test_x86_vcvtps2ph_128(<4 x float> %a0) {
+ ; CHECK-LABEL: test_x86_vcvtps2ph_128
+ ; CHECK-NOT: vmov
; CHECK: vcvtps2ph
%res = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1]
ret <8 x i16> %res
@@ -26,6 +41,8 @@ declare <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float>, i32) nounwind readonly
define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0) {
+ ; CHECK-LABEL: test_x86_vcvtps2ph_256
+ ; CHECK-NOT: vmov
; CHECK: vcvtps2ph
%res = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1]
ret <8 x i16> %res
diff --git a/test/CodeGen/X86/fast-isel-branch_weights.ll b/test/CodeGen/X86/fast-isel-branch_weights.ll
index bc41395..d2b02aa 100644
--- a/test/CodeGen/X86/fast-isel-branch_weights.ll
+++ b/test/CodeGen/X86/fast-isel-branch_weights.ll
@@ -16,4 +16,4 @@ success:
ret i64 0
}
-!0 = metadata !{metadata !"branch_weights", i32 0, i32 2147483647}
+!0 = !{!"branch_weights", i32 0, i32 2147483647}
diff --git a/test/CodeGen/X86/fast-isel-call-bool.ll b/test/CodeGen/X86/fast-isel-call-bool.ll
new file mode 100644
index 0000000..5cdb2c9
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-call-bool.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -fast-isel -mcpu=core2 -mtriple=x86_64-unknown-unknown -O1 | FileCheck %s
+; See PR21557
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+declare i64 @bar(i1)
+
+define i64 @foo(i8* %arg) {
+; CHECK-LABEL: foo:
+top:
+ %0 = load i8* %arg
+; CHECK: movb
+ %1 = trunc i8 %0 to i1
+; CHECK: andb $1,
+ %2 = call i64 @bar(i1 %1)
+; CHECK: callq
+ ret i64 %2
+}
diff --git a/test/CodeGen/X86/fast-isel-cmp-branch.ll b/test/CodeGen/X86/fast-isel-cmp-branch.ll
index 6e408f8..684647c 100644
--- a/test/CodeGen/X86/fast-isel-cmp-branch.ll
+++ b/test/CodeGen/X86/fast-isel-cmp-branch.ll
@@ -1,5 +1,5 @@
; RUN: llc -O0 -mtriple=x86_64-linux -asm-verbose=false < %s | FileCheck %s
-; RUN: llc -O0 -mtriple=x86_64-win32 -asm-verbose=false < %s | FileCheck %s
+; RUN: llc -O0 -mtriple=x86_64-windows-itanium -asm-verbose=false < %s | FileCheck %s
; rdar://8337108
; Fast-isel shouldn't try to look through the compare because it's in a
diff --git a/test/CodeGen/X86/fast-isel-double-half-convertion.ll b/test/CodeGen/X86/fast-isel-double-half-convertion.ll
new file mode 100644
index 0000000..ade867b
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-double-half-convertion.ll
@@ -0,0 +1,23 @@
+; RUN: llc -fast-isel -fast-isel-abort -mtriple=x86_64-unknown-unknown -mattr=+f16c < %s
+
+; XFAIL: *
+
+; In the future, we might want to teach fast-isel how to expand a double-to-half
+; conversion into a double-to-float conversion immediately followed by a
+; float-to-half conversion. For now, fast-isel is expected to fail.
+
+define double @test_fp16_to_fp64(i32 %a) {
+entry:
+ %0 = trunc i32 %a to i16
+ %1 = call double @llvm.convert.from.fp16.f64(i16 %0)
+ ret float %0
+}
+
+define i16 @test_fp64_to_fp16(double %a) {
+entry:
+ %0 = call i16 @llvm.convert.to.fp16.f64(double %a)
+ ret i16 %0
+}
+
+declare i16 @llvm.convert.to.fp16.f64(double)
+declare double @llvm.convert.from.fp16.f64(i16)
diff --git a/test/CodeGen/X86/fast-isel-float-half-convertion.ll b/test/CodeGen/X86/fast-isel-float-half-convertion.ll
new file mode 100644
index 0000000..ee89bcd
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-float-half-convertion.ll
@@ -0,0 +1,28 @@
+; RUN: llc -fast-isel -fast-isel-abort -asm-verbose=false -mtriple=x86_64-unknown-unknown -mattr=+f16c < %s | FileCheck %s
+
+; Verify that fast-isel correctly expands float-half conversions.
+
+define i16 @test_fp32_to_fp16(float %a) {
+; CHECK-LABEL: test_fp32_to_fp16:
+; CHECK: vcvtps2ph $0, %xmm0, %xmm0
+; CHECK-NEXT: vmovd %xmm0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = call i16 @llvm.convert.to.fp16.f32(float %a)
+ ret i16 %0
+}
+
+define float @test_fp16_to_fp32(i32 %a) {
+; CHECK-LABEL: test_fp16_to_fp32:
+; CHECK: movswl %di, %eax
+; CHECK-NEXT: vmovd %eax, %xmm0
+; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %0 = trunc i32 %a to i16
+ %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
+ ret float %1
+}
+
+declare i16 @llvm.convert.to.fp16.f32(float)
+declare float @llvm.convert.from.fp16.f32(i16)
diff --git a/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll b/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll
new file mode 100644
index 0000000..308a4c3
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll
@@ -0,0 +1,65 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=ALL --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=ALL --check-prefix=AVX
+;
+; Verify that fast-isel doesn't select legacy SSE instructions on targets that
+; feature AVX.
+;
+; Test cases are obtained from the following code snippet:
+; ///
+; double single_to_double_rr(float x) {
+; return (double)x;
+; }
+; float double_to_single_rr(double x) {
+; return (float)x;
+; }
+; double single_to_double_rm(float *x) {
+; return (double)*x;
+; }
+; float double_to_single_rm(double *x) {
+; return (float)*x;
+; }
+; ///
+
+define double @single_to_double_rr(float %x) {
+; ALL-LABEL: single_to_double_rr:
+; SSE-NOT: vcvtss2sd
+; AVX: vcvtss2sd %xmm0, %xmm0, %xmm0
+; ALL: ret
+entry:
+ %conv = fpext float %x to double
+ ret double %conv
+}
+
+define float @double_to_single_rr(double %x) {
+; ALL-LABEL: double_to_single_rr:
+; SSE-NOT: vcvtsd2ss
+; AVX: vcvtsd2ss %xmm0, %xmm0, %xmm0
+; ALL: ret
+entry:
+ %conv = fptrunc double %x to float
+ ret float %conv
+}
+
+define double @single_to_double_rm(float* %x) {
+; ALL-LABEL: single_to_double_rm:
+; SSE: cvtss2sd (%rdi), %xmm0
+; AVX: vmovss (%rdi), %xmm0
+; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
+; ALL-NEXT: ret
+entry:
+ %0 = load float* %x, align 4
+ %conv = fpext float %0 to double
+ ret double %conv
+}
+
+define float @double_to_single_rm(double* %x) {
+; ALL-LABEL: double_to_single_rm:
+; SSE: cvtsd2ss (%rdi), %xmm0
+; AVX: vmovsd (%rdi), %xmm0
+; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
+; ALL-NEXT: ret
+entry:
+ %0 = load double* %x, align 8
+ %conv = fptrunc double %0 to float
+ ret float %conv
+}
diff --git a/test/CodeGen/X86/fast-isel-gep.ll b/test/CodeGen/X86/fast-isel-gep.ll
index 4e47c74..a65e070 100644
--- a/test/CodeGen/X86/fast-isel-gep.ll
+++ b/test/CodeGen/X86/fast-isel-gep.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -mtriple=x86_64-linux -O0 | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -mtriple=x86_64-win32 -O0 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-windows-itanium -O0 | FileCheck %s --check-prefix=X64
; RUN: llc < %s -march=x86 -O0 | FileCheck %s --check-prefix=X32
; GEP indices are interpreted as signed integers, so they
diff --git a/test/CodeGen/X86/fast-isel-int-float-conversion.ll b/test/CodeGen/X86/fast-isel-int-float-conversion.ll
new file mode 100644
index 0000000..3869722
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-int-float-conversion.ll
@@ -0,0 +1,45 @@
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=generic -mattr=+sse2 -O0 --fast-isel-abort < %s | FileCheck %s --check-prefix=ALL --check-prefix=SSE2
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=generic -mattr=+avx -O0 --fast-isel-abort < %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX
+
+
+define double @int_to_double_rr(i32 %a) {
+; ALL-LABEL: int_to_double_rr:
+; SSE2: cvtsi2sdl %edi, %xmm0
+; AVX: vcvtsi2sdl %edi, %xmm0, %xmm0
+; ALL-NEXT: ret
+entry:
+ %0 = sitofp i32 %a to double
+ ret double %0
+}
+
+define double @int_to_double_rm(i32* %a) {
+; ALL-LABEL: int_to_double_rm:
+; SSE2: cvtsi2sdl (%rdi), %xmm0
+; AVX: vcvtsi2sdl (%rdi), %xmm0, %xmm0
+; ALL-NEXT: ret
+entry:
+ %0 = load i32* %a
+ %1 = sitofp i32 %0 to double
+ ret double %1
+}
+
+define float @int_to_float_rr(i32 %a) {
+; ALL-LABEL: int_to_float_rr:
+; SSE2: cvtsi2ssl %edi, %xmm0
+; AVX: vcvtsi2ssl %edi, %xmm0, %xmm0
+; ALL-NEXT: ret
+entry:
+ %0 = sitofp i32 %a to float
+ ret float %0
+}
+
+define float @int_to_float_rm(i32* %a) {
+; ALL-LABEL: int_to_float_rm:
+; SSE2: cvtsi2ssl (%rdi), %xmm0
+; AVX: vcvtsi2ssl (%rdi), %xmm0, %xmm0
+; ALL-NEXT: ret
+entry:
+ %0 = load i32* %a
+ %1 = sitofp i32 %0 to float
+ ret float %1
+}
diff --git a/test/CodeGen/X86/fastmath-float-half-conversion.ll b/test/CodeGen/X86/fastmath-float-half-conversion.ll
new file mode 100644
index 0000000..2930873
--- /dev/null
+++ b/test/CodeGen/X86/fastmath-float-half-conversion.ll
@@ -0,0 +1,52 @@
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+f16c < %s | FileCheck %s --check-prefix=ALL --check-prefix=F16C
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX
+
+define zeroext i16 @test1_fast(double %d) #0 {
+; ALL-LABEL: test1_fast:
+; F16C-NOT: callq {{_+}}truncdfhf2
+; F16C: vcvtsd2ss %xmm0, %xmm0, %xmm0
+; F16C-NEXT: vcvtps2ph $0, %xmm0, %xmm0
+; AVX: callq {{_+}}truncdfhf2
+; ALL: ret
+entry:
+ %0 = tail call i16 @llvm.convert.to.fp16.f64(double %d)
+ ret i16 %0
+}
+
+define zeroext i16 @test2_fast(x86_fp80 %d) #0 {
+; ALL-LABEL: test2_fast:
+; F16C-NOT: callq {{_+}}truncxfhf2
+; F16C: fldt
+; F16C-NEXT: fstps
+; F16C-NEXT: vmovss
+; F16C-NEXT: vcvtps2ph $0, %xmm0, %xmm0
+; AVX: callq {{_+}}truncxfhf2
+; ALL: ret
+entry:
+ %0 = tail call i16 @llvm.convert.to.fp16.f80(x86_fp80 %d)
+ ret i16 %0
+}
+
+define zeroext i16 @test1(double %d) #1 {
+; ALL-LABEL: test1:
+; ALL: callq {{_+}}truncdfhf2
+; ALL: ret
+entry:
+ %0 = tail call i16 @llvm.convert.to.fp16.f64(double %d)
+ ret i16 %0
+}
+
+define zeroext i16 @test2(x86_fp80 %d) #1 {
+; ALL-LABEL: test2:
+; ALL: callq {{_+}}truncxfhf2
+; ALL: ret
+entry:
+ %0 = tail call i16 @llvm.convert.to.fp16.f80(x86_fp80 %d)
+ ret i16 %0
+}
+
+declare i16 @llvm.convert.to.fp16.f64(double)
+declare i16 @llvm.convert.to.fp16.f80(x86_fp80)
+
+attributes #0 = { nounwind readnone uwtable "unsafe-fp-math"="true" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone uwtable "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/X86/float-conv-elim.ll b/test/CodeGen/X86/float-conv-elim.ll
new file mode 100644
index 0000000..3feff85
--- /dev/null
+++ b/test/CodeGen/X86/float-conv-elim.ll
@@ -0,0 +1,32 @@
+; RUN: llc -march=x86-64 -mcpu=x86-64 < %s | FileCheck %s
+
+; Make sure the float conversion is folded away as it should be.
+; CHECK-LABEL: foo
+; CHECK-NOT: cvt
+; CHECK: movzbl
+define i32 @foo(i8 %a) #0 {
+ %conv = uitofp i8 %a to float
+ %conv1 = fptosi float %conv to i32
+ ret i32 %conv1
+}
+
+; CHECK-LABEL: foo2
+; CHECK-NOT: cvt
+; CHECK: movsbl
+define i32 @foo2(i8 %a) #0 {
+ %conv = sitofp i8 %a to float
+ %conv1 = fptosi float %conv to i32
+ ret i32 %conv1
+}
+
+; CHECK-LABEL: bar
+; CHECK-NOT: cvt
+; CHECK: movl
+define zeroext i8 @bar(i8 zeroext %a) #0 {
+ %conv = uitofp i8 %a to float
+ %conv1 = fptoui float %conv to i8
+ ret i8 %conv1
+}
+
+attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
diff --git a/test/CodeGen/X86/fold-load-unops.ll b/test/CodeGen/X86/fold-load-unops.ll
new file mode 100644
index 0000000..0b2e6c7
--- /dev/null
+++ b/test/CodeGen/X86/fold-load-unops.ll
@@ -0,0 +1,57 @@
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s
+
+; Verify that we're folding the load into the math instruction.
+
+; FIXME: The folding should also happen without the avx attribute;
+; ie, when generating SSE (non-VEX-prefixed) instructions.
+
+define float @rcpss(float* %a) {
+; CHECK-LABEL: rcpss:
+; CHECK: vrcpss (%rdi), %xmm0, %xmm0
+
+ %ld = load float* %a
+ %ins = insertelement <4 x float> undef, float %ld, i32 0
+ %res = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %ins)
+ %ext = extractelement <4 x float> %res, i32 0
+ ret float %ext
+}
+
+define float @rsqrtss(float* %a) {
+; CHECK-LABEL: rsqrtss:
+; CHECK: vrsqrtss (%rdi), %xmm0, %xmm0
+
+ %ld = load float* %a
+ %ins = insertelement <4 x float> undef, float %ld, i32 0
+ %res = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %ins)
+ %ext = extractelement <4 x float> %res, i32 0
+ ret float %ext
+}
+
+define float @sqrtss(float* %a) {
+; CHECK-LABEL: sqrtss:
+; CHECK: vsqrtss (%rdi), %xmm0, %xmm0
+
+ %ld = load float* %a
+ %ins = insertelement <4 x float> undef, float %ld, i32 0
+ %res = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %ins)
+ %ext = extractelement <4 x float> %res, i32 0
+ ret float %ext
+}
+
+define double @sqrtsd(double* %a) {
+; CHECK-LABEL: sqrtsd:
+; CHECK: vsqrtsd (%rdi), %xmm0, %xmm0
+
+ %ld = load double* %a
+ %ins = insertelement <2 x double> undef, double %ld, i32 0
+ %res = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %ins)
+ %ext = extractelement <2 x double> %res, i32 0
+ ret double %ext
+}
+
+
+declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
+declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
+declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone
+
diff --git a/test/CodeGen/X86/fold-tied-op.ll b/test/CodeGen/X86/fold-tied-op.ll
index a643d86..5bf5dbd 100644
--- a/test/CodeGen/X86/fold-tied-op.ll
+++ b/test/CodeGen/X86/fold-tied-op.ll
@@ -1,84 +1,84 @@
-; RUN: llc -verify-machineinstrs -mtriple=i386--netbsd < %s | FileCheck %s
-; Regression test for http://reviews.llvm.org/D5701
-
-; ModuleID = 'xxhash.i'
-target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
-target triple = "i386--netbsd"
-
-; CHECK-LABEL: fn1
-; CHECK: shldl {{.*#+}} 4-byte Folded Spill
-; CHECK: orl {{.*#+}} 4-byte Folded Reload
-; CHECK: shldl {{.*#+}} 4-byte Folded Spill
-; CHECK: orl {{.*#+}} 4-byte Folded Reload
-; CHECK: addl {{.*#+}} 4-byte Folded Reload
-; CHECK: imull {{.*#+}} 4-byte Folded Reload
-; CHECK: orl {{.*#+}} 4-byte Folded Reload
-; CHECK: retl
-
-%struct.XXH_state64_t = type { i32, i32, i64, i64, i64 }
-
-@a = common global i32 0, align 4
-@b = common global i64 0, align 8
-
-; Function Attrs: nounwind uwtable
-define i64 @fn1() #0 {
-entry:
- %0 = load i32* @a, align 4, !tbaa !1
- %1 = inttoptr i32 %0 to %struct.XXH_state64_t*
- %total_len = getelementptr inbounds %struct.XXH_state64_t* %1, i32 0, i32 0
- %2 = load i32* %total_len, align 4, !tbaa !5
- %tobool = icmp eq i32 %2, 0
- br i1 %tobool, label %if.else, label %if.then
-
-if.then: ; preds = %entry
- %v3 = getelementptr inbounds %struct.XXH_state64_t* %1, i32 0, i32 3
- %3 = load i64* %v3, align 4, !tbaa !8
- %v4 = getelementptr inbounds %struct.XXH_state64_t* %1, i32 0, i32 4
- %4 = load i64* %v4, align 4, !tbaa !9
- %v2 = getelementptr inbounds %struct.XXH_state64_t* %1, i32 0, i32 2
- %5 = load i64* %v2, align 4, !tbaa !10
- %shl = shl i64 %5, 1
- %or = or i64 %shl, %5
- %shl2 = shl i64 %3, 2
- %shr = lshr i64 %3, 1
- %or3 = or i64 %shl2, %shr
- %add = add i64 %or, %or3
- %mul = mul i64 %4, -4417276706812531889
- %shl4 = mul i64 %4, -8834553413625063778
- %shr5 = ashr i64 %mul, 3
- %or6 = or i64 %shr5, %shl4
- %mul7 = mul nsw i64 %or6, 1400714785074694791
- %xor = xor i64 %add, %mul7
- store i64 %xor, i64* @b, align 8, !tbaa !11
- %mul8 = mul nsw i64 %xor, 1400714785074694791
- br label %if.end
-
-if.else: ; preds = %entry
- %6 = load i64* @b, align 8, !tbaa !11
- %xor10 = xor i64 %6, -4417276706812531889
- %mul11 = mul nsw i64 %xor10, 400714785074694791
- br label %if.end
-
-if.end: ; preds = %if.else, %if.then
- %storemerge.in = phi i64 [ %mul11, %if.else ], [ %mul8, %if.then ]
- %storemerge = add i64 %storemerge.in, -8796714831421723037
- store i64 %storemerge, i64* @b, align 8, !tbaa !11
- ret i64 undef
-}
-
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
-!llvm.ident = !{!0}
-
-!0 = metadata !{metadata !"clang version 3.6 (trunk 219587)"}
-!1 = metadata !{metadata !2, metadata !2, i64 0}
-!2 = metadata !{metadata !"int", metadata !3, i64 0}
-!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
-!4 = metadata !{metadata !"Simple C/C++ TBAA"}
-!5 = metadata !{metadata !6, metadata !2, i64 0}
-!6 = metadata !{metadata !"XXH_state64_t", metadata !2, i64 0, metadata !2, i64 4, metadata !7, i64 8, metadata !7, i64 16, metadata !7, i64 24}
-!7 = metadata !{metadata !"long long", metadata !3, i64 0}
-!8 = metadata !{metadata !6, metadata !7, i64 16}
-!9 = metadata !{metadata !6, metadata !7, i64 24}
-!10 = metadata !{metadata !6, metadata !7, i64 8}
-!11 = metadata !{metadata !7, metadata !7, i64 0}
+; RUN: llc -verify-machineinstrs -mtriple=i386--netbsd < %s | FileCheck %s
+; Regression test for http://reviews.llvm.org/D5701
+
+; ModuleID = 'xxhash.i'
+target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
+target triple = "i386--netbsd"
+
+; CHECK-LABEL: fn1
+; CHECK: shldl {{.*#+}} 4-byte Folded Spill
+; CHECK: orl {{.*#+}} 4-byte Folded Reload
+; CHECK: shldl {{.*#+}} 4-byte Folded Spill
+; CHECK: orl {{.*#+}} 4-byte Folded Reload
+; CHECK: addl {{.*#+}} 4-byte Folded Reload
+; CHECK: imull {{.*#+}} 4-byte Folded Reload
+; CHECK: orl {{.*#+}} 4-byte Folded Reload
+; CHECK: retl
+
+%struct.XXH_state64_t = type { i32, i32, i64, i64, i64 }
+
+@a = common global i32 0, align 4
+@b = common global i64 0, align 8
+
+; Function Attrs: nounwind uwtable
+define i64 @fn1() #0 {
+entry:
+ %0 = load i32* @a, align 4, !tbaa !1
+ %1 = inttoptr i32 %0 to %struct.XXH_state64_t*
+ %total_len = getelementptr inbounds %struct.XXH_state64_t* %1, i32 0, i32 0
+ %2 = load i32* %total_len, align 4, !tbaa !5
+ %tobool = icmp eq i32 %2, 0
+ br i1 %tobool, label %if.else, label %if.then
+
+if.then: ; preds = %entry
+ %v3 = getelementptr inbounds %struct.XXH_state64_t* %1, i32 0, i32 3
+ %3 = load i64* %v3, align 4, !tbaa !8
+ %v4 = getelementptr inbounds %struct.XXH_state64_t* %1, i32 0, i32 4
+ %4 = load i64* %v4, align 4, !tbaa !9
+ %v2 = getelementptr inbounds %struct.XXH_state64_t* %1, i32 0, i32 2
+ %5 = load i64* %v2, align 4, !tbaa !10
+ %shl = shl i64 %5, 1
+ %or = or i64 %shl, %5
+ %shl2 = shl i64 %3, 2
+ %shr = lshr i64 %3, 1
+ %or3 = or i64 %shl2, %shr
+ %add = add i64 %or, %or3
+ %mul = mul i64 %4, -4417276706812531889
+ %shl4 = mul i64 %4, -8834553413625063778
+ %shr5 = ashr i64 %mul, 3
+ %or6 = or i64 %shr5, %shl4
+ %mul7 = mul nsw i64 %or6, 1400714785074694791
+ %xor = xor i64 %add, %mul7
+ store i64 %xor, i64* @b, align 8, !tbaa !11
+ %mul8 = mul nsw i64 %xor, 1400714785074694791
+ br label %if.end
+
+if.else: ; preds = %entry
+ %6 = load i64* @b, align 8, !tbaa !11
+ %xor10 = xor i64 %6, -4417276706812531889
+ %mul11 = mul nsw i64 %xor10, 400714785074694791
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ %storemerge.in = phi i64 [ %mul11, %if.else ], [ %mul8, %if.then ]
+ %storemerge = add i64 %storemerge.in, -8796714831421723037
+ store i64 %storemerge, i64* @b, align 8, !tbaa !11
+ ret i64 undef
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 3.6 (trunk 219587)"}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{!6, !2, i64 0}
+!6 = !{!"XXH_state64_t", !2, i64 0, !2, i64 4, !7, i64 8, !7, i64 16, !7, i64 24}
+!7 = !{!"long long", !3, i64 0}
+!8 = !{!6, !7, i64 16}
+!9 = !{!6, !7, i64 24}
+!10 = !{!6, !7, i64 8}
+!11 = !{!7, !7, i64 0}
diff --git a/test/CodeGen/X86/fold-vex.ll b/test/CodeGen/X86/fold-vex.ll
index 2bb5b44..5a8b1d8 100644
--- a/test/CodeGen/X86/fold-vex.ll
+++ b/test/CodeGen/X86/fold-vex.ll
@@ -1,16 +1,31 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx | FileCheck %s
+; Use CPU parameters to ensure that a CPU-specific attribute is not overriding the AVX definition.
-;CHECK: @test
-; No need to load from memory. The operand will be loaded as part of th AND instr.
-;CHECK-NOT: vmovaps
-;CHECK: vandps
-;CHECK: ret
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-avx | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx -mattr=-avx | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2 -mattr=-avx | FileCheck %s --check-prefix=SSE
-define void @test1(<8 x i32>* %p0, <8 x i32> %in1) nounwind {
-entry:
- %in0 = load <8 x i32>* %p0, align 2
- %a = and <8 x i32> %in0, %in1
- store <8 x i32> %a, <8 x i32>* undef
- ret void
+; No need to load unaligned operand from memory using an explicit instruction with AVX.
+; The operand should be folded into the AND instr.
+
+; With SSE, folding memory operands into math/logic ops requires 16-byte alignment
+; unless specially configured on some CPUs such as AMD Family 10H.
+
+define <4 x i32> @test1(<4 x i32>* %p0, <4 x i32> %in1) nounwind {
+ %in0 = load <4 x i32>* %p0, align 2
+ %a = and <4 x i32> %in0, %in1
+ ret <4 x i32> %a
+
+; CHECK-LABEL: @test1
+; CHECK-NOT: vmovups
+; CHECK: vandps (%rdi), %xmm0, %xmm0
+; CHECK-NEXT: ret
+
+; SSE-LABEL: @test1
+; SSE: movups (%rdi), %xmm1
+; SSE-NEXT: andps %xmm1, %xmm0
+; SSE-NEXT: ret
}
diff --git a/test/CodeGen/X86/force-align-stack-alloca.ll b/test/CodeGen/X86/force-align-stack-alloca.ll
index 95defc8..bd98069 100644
--- a/test/CodeGen/X86/force-align-stack-alloca.ll
+++ b/test/CodeGen/X86/force-align-stack-alloca.ll
@@ -33,14 +33,14 @@ define i64 @g(i32 %i) nounwind {
; CHECK-NOT: {{[^ ,]*}}, %esp
;
; Next we set up the memset call, and then undo it.
-; CHECK: subl $32, %esp
+; CHECK: subl $20, %esp
; CHECK-NOT: {{[^ ,]*}}, %esp
; CHECK: calll memset
; CHECK-NEXT: addl $32, %esp
; CHECK-NOT: {{[^ ,]*}}, %esp
;
; Next we set up the call to 'f'.
-; CHECK: subl $32, %esp
+; CHECK: subl $28, %esp
; CHECK-NOT: {{[^ ,]*}}, %esp
; CHECK: calll f
; CHECK-NEXT: addl $32, %esp
diff --git a/test/CodeGen/X86/fp-double-rounding.ll b/test/CodeGen/X86/fp-double-rounding.ll
new file mode 100644
index 0000000..030cb9a
--- /dev/null
+++ b/test/CodeGen/X86/fp-double-rounding.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s | FileCheck %s --check-prefix=CHECK --check-prefix=SAFE
+; RUN: llc < %s -enable-unsafe-fp-math | FileCheck %s --check-prefix=CHECK --check-prefix=UNSAFE
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64--"
+
+; CHECK-LABEL: double_rounding:
+; SAFE: callq __trunctfdf2
+; SAFE-NEXT: cvtsd2ss %xmm0
+; UNSAFE: callq __trunctfsf2
+; UNSAFE-NOT: cvt
+define void @double_rounding(fp128* %x, float* %f) {
+entry:
+ %0 = load fp128* %x, align 16
+ %1 = fptrunc fp128 %0 to double
+ %2 = fptrunc double %1 to float
+ store float %2, float* %f, align 4
+ ret void
+}
+
+; CHECK-LABEL: double_rounding_precise_first:
+; CHECK: fstps (%
+; CHECK-NOT: fstpl
+define void @double_rounding_precise_first(float* %f) {
+entry:
+ ; Hack, to generate a precise FP_ROUND to double
+ %precise = call double asm sideeffect "fld %st(0)", "={st(0)}"()
+ %0 = fptrunc double %precise to float
+ store float %0, float* %f, align 4
+ ret void
+}
diff --git a/test/CodeGen/X86/fpstack-debuginstr-kill.ll b/test/CodeGen/X86/fpstack-debuginstr-kill.ll
index dfc59a3..e3180f4 100644
--- a/test/CodeGen/X86/fpstack-debuginstr-kill.ll
+++ b/test/CodeGen/X86/fpstack-debuginstr-kill.ll
@@ -32,7 +32,7 @@ sw.bb735: ; preds = %if.end511
unreachable
if.end41.i2210: ; preds = %if.end511
- call void @llvm.dbg.value(metadata !{x86_fp80 %src.sroa.0.0.src.sroa.0.0.2280}, i64 0, metadata !20, metadata !{metadata !"0x102"})
+ call void @llvm.dbg.value(metadata x86_fp80 %src.sroa.0.0.src.sroa.0.0.2280, i64 0, metadata !20, metadata !{!"0x102"})
unreachable
sw.bb992: ; preds = %if.end511
@@ -43,29 +43,29 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!24, !25}
-!0 = metadata !{metadata !"0x11\004\00clang version 3.6.0 (http://llvm.org/git/clang 8444ae7cfeaefae031f8fedf0d1435ca3b14d90b) (http://llvm.org/git/llvm 886f0101a7d176543b831f5efb74c03427244a55)\001\00\000\00\001", metadata !1, metadata !2, metadata !2, metadata !3, metadata !21, metadata !2} ; [ DW_TAG_compile_unit ] [x87stackifier/fpu_ieee.cpp] [DW_LANG_C_plus_plus]
-!1 = metadata !{metadata !"fpu_ieee.cpp", metadata !"x87stackifier"}
-!2 = metadata !{}
-!3 = metadata !{metadata !4}
-!4 = metadata !{metadata !"0x2e\00fpuop_arithmetic\00fpuop_arithmetic\00_Z16fpuop_arithmeticjj\0011\000\001\000\006\00256\001\0013", metadata !5, metadata !6, metadata !7, null, void (i32, i32)* @_Z16fpuop_arithmeticjj, null, null, metadata !10} ; [ DW_TAG_subprogram ] [line 11] [def] [scope 13] [fpuop_arithmetic]
-!5 = metadata !{metadata !"f1.cpp", metadata !"x87stackifier"}
-!6 = metadata !{metadata !"0x29", metadata !5} ; [ DW_TAG_file_type ] [x87stackifier/f1.cpp]
-!7 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !8, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!8 = metadata !{null, metadata !9, metadata !9}
-!9 = metadata !{metadata !"0x24\00unsigned int\000\0032\0032\000\000\007", null, null} ; [ DW_TAG_base_type ] [unsigned int] [line 0, size 32, align 32, offset 0, enc DW_ATE_unsigned]
-!10 = metadata !{metadata !11, metadata !12, metadata !13, metadata !18, metadata !20}
-!11 = metadata !{metadata !"0x101\00\0016777227\000", metadata !4, metadata !6, metadata !9} ; [ DW_TAG_arg_variable ] [line 11]
-!12 = metadata !{metadata !"0x101\00\0033554443\000", metadata !4, metadata !6, metadata !9} ; [ DW_TAG_arg_variable ] [line 11]
-!13 = metadata !{metadata !"0x100\00x\0014\000", metadata !4, metadata !6, metadata !14} ; [ DW_TAG_auto_variable ] [x] [line 14]
-!14 = metadata !{metadata !"0x16\00fpu_extended\003\000\000\000\000", metadata !5, null, metadata !15} ; [ DW_TAG_typedef ] [fpu_extended] [line 3, size 0, align 0, offset 0] [from fpu_register]
-!15 = metadata !{metadata !"0x16\00fpu_register\002\000\000\000\000", metadata !5, null, metadata !16} ; [ DW_TAG_typedef ] [fpu_register] [line 2, size 0, align 0, offset 0] [from uae_f64]
-!16 = metadata !{metadata !"0x16\00uae_f64\001\000\000\000\000", metadata !5, null, metadata !17} ; [ DW_TAG_typedef ] [uae_f64] [line 1, size 0, align 0, offset 0] [from double]
-!17 = metadata !{metadata !"0x24\00double\000\0064\0064\000\000\004", null, null} ; [ DW_TAG_base_type ] [double] [line 0, size 64, align 64, offset 0, enc DW_ATE_float]
-!18 = metadata !{metadata !"0x100\00a\0015\000", metadata !4, metadata !6, metadata !19} ; [ DW_TAG_auto_variable ] [a] [line 15]
-!19 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!20 = metadata !{metadata !"0x100\00value\0016\000", metadata !4, metadata !6, metadata !14} ; [ DW_TAG_auto_variable ] [value] [line 16]
-!21 = metadata !{metadata !22, metadata !23}
-!22 = metadata !{metadata !"0x34\00g1\00g1\00\005\000\001", null, metadata !6, metadata !14, double* @g1, null} ; [ DW_TAG_variable ] [g1] [line 5] [def]
-!23 = metadata !{metadata !"0x34\00g2\00g2\00\006\000\001", null, metadata !6, metadata !19, i32* @g2, null} ; [ DW_TAG_variable ] [g2] [line 6] [def]
-!24 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
-!25 = metadata !{i32 2, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x11\004\00clang version 3.6.0 (http://llvm.org/git/clang 8444ae7cfeaefae031f8fedf0d1435ca3b14d90b) (http://llvm.org/git/llvm 886f0101a7d176543b831f5efb74c03427244a55)\001\00\000\00\001", !1, !2, !2, !3, !21, !2} ; [ DW_TAG_compile_unit ] [x87stackifier/fpu_ieee.cpp] [DW_LANG_C_plus_plus]
+!1 = !{!"fpu_ieee.cpp", !"x87stackifier"}
+!2 = !{}
+!3 = !{!4}
+!4 = !{!"0x2e\00fpuop_arithmetic\00fpuop_arithmetic\00_Z16fpuop_arithmeticjj\0011\000\001\000\006\00256\001\0013", !5, !6, !7, null, void (i32, i32)* @_Z16fpuop_arithmeticjj, null, null, !10} ; [ DW_TAG_subprogram ] [line 11] [def] [scope 13] [fpuop_arithmetic]
+!5 = !{!"f1.cpp", !"x87stackifier"}
+!6 = !{!"0x29", !5} ; [ DW_TAG_file_type ] [x87stackifier/f1.cpp]
+!7 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !8, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = !{null, !9, !9}
+!9 = !{!"0x24\00unsigned int\000\0032\0032\000\000\007", null, null} ; [ DW_TAG_base_type ] [unsigned int] [line 0, size 32, align 32, offset 0, enc DW_ATE_unsigned]
+!10 = !{!11, !12, !13, !18, !20}
+!11 = !{!"0x101\00\0016777227\000", !4, !6, !9} ; [ DW_TAG_arg_variable ] [line 11]
+!12 = !{!"0x101\00\0033554443\000", !4, !6, !9} ; [ DW_TAG_arg_variable ] [line 11]
+!13 = !{!"0x100\00x\0014\000", !4, !6, !14} ; [ DW_TAG_auto_variable ] [x] [line 14]
+!14 = !{!"0x16\00fpu_extended\003\000\000\000\000", !5, null, !15} ; [ DW_TAG_typedef ] [fpu_extended] [line 3, size 0, align 0, offset 0] [from fpu_register]
+!15 = !{!"0x16\00fpu_register\002\000\000\000\000", !5, null, !16} ; [ DW_TAG_typedef ] [fpu_register] [line 2, size 0, align 0, offset 0] [from uae_f64]
+!16 = !{!"0x16\00uae_f64\001\000\000\000\000", !5, null, !17} ; [ DW_TAG_typedef ] [uae_f64] [line 1, size 0, align 0, offset 0] [from double]
+!17 = !{!"0x24\00double\000\0064\0064\000\000\004", null, null} ; [ DW_TAG_base_type ] [double] [line 0, size 64, align 64, offset 0, enc DW_ATE_float]
+!18 = !{!"0x100\00a\0015\000", !4, !6, !19} ; [ DW_TAG_auto_variable ] [a] [line 15]
+!19 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!20 = !{!"0x100\00value\0016\000", !4, !6, !14} ; [ DW_TAG_auto_variable ] [value] [line 16]
+!21 = !{!22, !23}
+!22 = !{!"0x34\00g1\00g1\00\005\000\001", null, !6, !14, double* @g1, null} ; [ DW_TAG_variable ] [g1] [line 5] [def]
+!23 = !{!"0x34\00g2\00g2\00\006\000\001", null, !6, !19, i32* @g2, null} ; [ DW_TAG_variable ] [g2] [line 6] [def]
+!24 = !{i32 2, !"Dwarf Version", i32 2}
+!25 = !{i32 2, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/frameaddr.ll b/test/CodeGen/X86/frameaddr.ll
index 452c8e5..5646196 100644
--- a/test/CodeGen/X86/frameaddr.ll
+++ b/test/CodeGen/X86/frameaddr.ll
@@ -1,9 +1,12 @@
; RUN: llc < %s -march=x86 | FileCheck %s --check-prefix=CHECK-32
; RUN: llc < %s -march=x86 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=CHECK-32
-; RUN: llc < %s -march=x86-64 | FileCheck %s --check-prefix=CHECK-64
-; RUN: llc < %s -march=x86-64 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=CHECK-64
+; RUN: llc < %s -mtriple=x86_64-pc-win32 -fast-isel | FileCheck %s --check-prefix=CHECK-W64
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=CHECK-64
+; RUN: llc < %s -mtriple=x86_64-unknown -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=CHECK-64
; RUN: llc < %s -mtriple=x86_64-gnux32 | FileCheck %s --check-prefix=CHECK-X32ABI
; RUN: llc < %s -mtriple=x86_64-gnux32 -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=CHECK-X32ABI
+; RUN: llc < %s -mtriple=x86_64-nacl | FileCheck %s --check-prefix=CHECK-NACL64
+; RUN: llc < %s -mtriple=x86_64-nacl -fast-isel -fast-isel-abort | FileCheck %s --check-prefix=CHECK-NACL64
define i8* @test1() nounwind {
entry:
@@ -13,6 +16,12 @@ entry:
; CHECK-32-NEXT: movl %ebp, %eax
; CHECK-32-NEXT: pop
; CHECK-32-NEXT: ret
+; CHECK-W64-LABEL: test1
+; CHECK-W64: push
+; CHECK-W64-NEXT: movq %rsp, %rbp
+; CHECK-W64-NEXT: leaq (%rbp), %rax
+; CHECK-W64-NEXT: pop
+; CHECK-W64-NEXT: ret
; CHECK-64-LABEL: test1
; CHECK-64: push
; CHECK-64-NEXT: movq %rsp, %rbp
@@ -25,6 +34,10 @@ entry:
; CHECK-X32ABI-NEXT: movl %ebp, %eax
; CHECK-X32ABI-NEXT: popq %rbp
; CHECK-X32ABI-NEXT: ret
+; CHECK-NACL64-LABEL: test1
+; CHECK-NACL64: pushq %rbp
+; CHECK-NACL64-NEXT: movq %rsp, %rbp
+; CHECK-NACL64-NEXT: movl %ebp, %eax
%0 = tail call i8* @llvm.frameaddress(i32 0)
ret i8* %0
}
@@ -38,6 +51,12 @@ entry:
; CHECK-32-NEXT: movl (%eax), %eax
; CHECK-32-NEXT: pop
; CHECK-32-NEXT: ret
+; CHECK-W64-LABEL: test2
+; CHECK-W64: push
+; CHECK-W64-NEXT: movq %rsp, %rbp
+; CHECK-W64-NEXT: leaq (%rbp), %rax
+; CHECK-W64-NEXT: pop
+; CHECK-W64-NEXT: ret
; CHECK-64-LABEL: test2
; CHECK-64: push
; CHECK-64-NEXT: movq %rsp, %rbp
@@ -52,6 +71,11 @@ entry:
; CHECK-X32ABI-NEXT: movl (%eax), %eax
; CHECK-X32ABI-NEXT: popq %rbp
; CHECK-X32ABI-NEXT: ret
+; CHECK-NACL64-LABEL: test2
+; CHECK-NACL64: pushq %rbp
+; CHECK-NACL64-NEXT: movq %rsp, %rbp
+; CHECK-NACL64-NEXT: movl (%ebp), %eax
+; CHECK-NACL64-NEXT: movl (%eax), %eax
%0 = tail call i8* @llvm.frameaddress(i32 2)
ret i8* %0
}
diff --git a/test/CodeGen/X86/frameallocate.ll b/test/CodeGen/X86/frameallocate.ll
new file mode 100644
index 0000000..7a2f9e3
--- /dev/null
+++ b/test/CodeGen/X86/frameallocate.ll
@@ -0,0 +1,43 @@
+; RUN: llc -mtriple=x86_64-windows-msvc < %s | FileCheck %s
+
+declare i8* @llvm.frameallocate(i32)
+declare i8* @llvm.frameaddress(i32)
+declare i8* @llvm.framerecover(i8*, i8*)
+declare i32 @printf(i8*, ...)
+
+@str = internal constant [10 x i8] c"asdf: %d\0A\00"
+
+define void @print_framealloc_from_fp(i8* %fp) {
+ %alloc = call i8* @llvm.framerecover(i8* bitcast (void(i32*, i32*)* @alloc_func to i8*), i8* %fp)
+ %alloc_i32 = bitcast i8* %alloc to i32*
+ %r = load i32* %alloc_i32
+ call i32 (i8*, ...)* @printf(i8* getelementptr ([10 x i8]* @str, i32 0, i32 0), i32 %r)
+ ret void
+}
+
+; CHECK-LABEL: print_framealloc_from_fp:
+; CHECK: movabsq $.Lframeallocation_alloc_func, %[[offs:[a-z]+]]
+; CHECK: movl (%rcx,%[[offs]]), %edx
+; CHECK: leaq {{.*}}(%rip), %rcx
+; CHECK: callq printf
+; CHECK: retq
+
+define void @alloc_func(i32* %s, i32* %d) {
+ %alloc = call i8* @llvm.frameallocate(i32 16)
+ %alloc_i32 = bitcast i8* %alloc to i32*
+ store i32 42, i32* %alloc_i32
+ %fp = call i8* @llvm.frameaddress(i32 0)
+ call void @print_framealloc_from_fp(i8* %fp)
+ ret void
+}
+
+; CHECK-LABEL: alloc_func:
+; CHECK: subq $48, %rsp
+; CHECK: .seh_stackalloc 48
+; CHECK: leaq 48(%rsp), %rbp
+; CHECK: .seh_setframe 5, 48
+; CHECK: .Lframeallocation_alloc_func = -[[offs:[0-9]+]]
+; CHECK: movl $42, -[[offs]](%rbp)
+; CHECK: leaq -48(%rbp), %rcx
+; CHECK: callq print_framealloc_from_fp
+; CHECK: retq
diff --git a/test/CodeGen/X86/gather-addresses.ll b/test/CodeGen/X86/gather-addresses.ll
index 5f48b1e..6d397b2 100644
--- a/test/CodeGen/X86/gather-addresses.ll
+++ b/test/CodeGen/X86/gather-addresses.ll
@@ -1,35 +1,38 @@
; RUN: llc -mtriple=x86_64-linux -mcpu=nehalem < %s | FileCheck %s --check-prefix=LIN
; RUN: llc -mtriple=x86_64-win32 -mcpu=nehalem < %s | FileCheck %s --check-prefix=WIN
+; RUN: llc -mtriple=i686-win32 -mcpu=nehalem < %s | FileCheck %s --check-prefix=LIN32
; rdar://7398554
; When doing vector gather-scatter index calculation with 32-bit indices,
-; bounce the vector off of cache rather than shuffling each individual
+; use an efficient mov/shift sequence rather than shuffling each individual
; element out of the index vector.
-; CHECK: foo:
-; LIN: movaps (%rsi), %xmm0
-; LIN: andps (%rdx), %xmm0
-; LIN: movaps %xmm0, -24(%rsp)
-; LIN: movslq -24(%rsp), %[[REG1:r.+]]
-; LIN: movslq -20(%rsp), %[[REG2:r.+]]
-; LIN: movslq -16(%rsp), %[[REG3:r.+]]
-; LIN: movslq -12(%rsp), %[[REG4:r.+]]
-; LIN: movsd (%rdi,%[[REG1]],8), %xmm0
-; LIN: movhpd (%rdi,%[[REG2]],8), %xmm0
-; LIN: movsd (%rdi,%[[REG3]],8), %xmm1
-; LIN: movhpd (%rdi,%[[REG4]],8), %xmm1
+; CHECK-LABEL: foo:
+; LIN: movdqa (%rsi), %xmm0
+; LIN: pand (%rdx), %xmm0
+; LIN: pextrq $1, %xmm0, %r[[REG4:.+]]
+; LIN: movd %xmm0, %r[[REG2:.+]]
+; LIN: movslq %e[[REG2]], %r[[REG1:.+]]
+; LIN: sarq $32, %r[[REG2]]
+; LIN: movslq %e[[REG4]], %r[[REG3:.+]]
+; LIN: sarq $32, %r[[REG4]]
+; LIN: movsd (%rdi,%r[[REG1]],8), %xmm0
+; LIN: movhpd (%rdi,%r[[REG2]],8), %xmm0
+; LIN: movsd (%rdi,%r[[REG3]],8), %xmm1
+; LIN: movhpd (%rdi,%r[[REG4]],8), %xmm1
-; WIN: movaps (%rdx), %xmm0
-; WIN: andps (%r8), %xmm0
-; WIN: movaps %xmm0, (%rsp)
-; WIN: movslq (%rsp), %[[REG1:r.+]]
-; WIN: movslq 4(%rsp), %[[REG2:r.+]]
-; WIN: movslq 8(%rsp), %[[REG3:r.+]]
-; WIN: movslq 12(%rsp), %[[REG4:r.+]]
-; WIN: movsd (%rcx,%[[REG1]],8), %xmm0
-; WIN: movhpd (%rcx,%[[REG2]],8), %xmm0
-; WIN: movsd (%rcx,%[[REG3]],8), %xmm1
-; WIN: movhpd (%rcx,%[[REG4]],8), %xmm1
+; WIN: movdqa (%rdx), %xmm0
+; WIN: pand (%r8), %xmm0
+; WIN: pextrq $1, %xmm0, %r[[REG4:.+]]
+; WIN: movd %xmm0, %r[[REG2:.+]]
+; WIN: movslq %e[[REG2]], %r[[REG1:.+]]
+; WIN: sarq $32, %r[[REG2]]
+; WIN: movslq %e[[REG4]], %r[[REG3:.+]]
+; WIN: sarq $32, %r[[REG4]]
+; WIN: movsd (%rcx,%r[[REG1]],8), %xmm0
+; WIN: movhpd (%rcx,%r[[REG2]],8), %xmm0
+; WIN: movsd (%rcx,%r[[REG3]],8), %xmm1
+; WIN: movhpd (%rcx,%r[[REG4]],8), %xmm1
define <4 x double> @foo(double* %p, <4 x i32>* %i, <4 x i32>* %h) nounwind {
%a = load <4 x i32>* %i
@@ -53,3 +56,35 @@ define <4 x double> @foo(double* %p, <4 x i32>* %i, <4 x i32>* %h) nounwind {
%v3 = insertelement <4 x double> %v2, double %r3, i32 3
ret <4 x double> %v3
}
+
+; Check that the sequence previously used above, which bounces the vector off the
+; cache works for x86-32. Note that in this case it will not be used for index
+; calculation, since indexes are 32-bit, not 64.
+; CHECK-LABEL: old:
+; LIN32: movaps %xmm0, (%esp)
+; LIN32-DAG: {{(mov|and)}}l (%esp),
+; LIN32-DAG: {{(mov|and)}}l 4(%esp),
+; LIN32-DAG: {{(mov|and)}}l 8(%esp),
+; LIN32-DAG: {{(mov|and)}}l 12(%esp),
+define <4 x i64> @old(double* %p, <4 x i32>* %i, <4 x i32>* %h, i64 %f) nounwind {
+ %a = load <4 x i32>* %i
+ %b = load <4 x i32>* %h
+ %j = and <4 x i32> %a, %b
+ %d0 = extractelement <4 x i32> %j, i32 0
+ %d1 = extractelement <4 x i32> %j, i32 1
+ %d2 = extractelement <4 x i32> %j, i32 2
+ %d3 = extractelement <4 x i32> %j, i32 3
+ %q0 = zext i32 %d0 to i64
+ %q1 = zext i32 %d1 to i64
+ %q2 = zext i32 %d2 to i64
+ %q3 = zext i32 %d3 to i64
+ %r0 = and i64 %q0, %f
+ %r1 = and i64 %q1, %f
+ %r2 = and i64 %q2, %f
+ %r3 = and i64 %q3, %f
+ %v0 = insertelement <4 x i64> undef, i64 %r0, i32 0
+ %v1 = insertelement <4 x i64> %v0, i64 %r1, i32 1
+ %v2 = insertelement <4 x i64> %v1, i64 %r2, i32 2
+ %v3 = insertelement <4 x i64> %v2, i64 %r3, i32 3
+ ret <4 x i64> %v3
+}
diff --git a/test/CodeGen/X86/gcc_except_table.ll b/test/CodeGen/X86/gcc_except_table.ll
index a732eb1..abce130 100644
--- a/test/CodeGen/X86/gcc_except_table.ll
+++ b/test/CodeGen/X86/gcc_except_table.ll
@@ -15,7 +15,7 @@ define i32 @main() uwtable optsize ssp {
; MINGW64: .seh_proc
; MINGW64: .seh_handler __gxx_personality_v0
-; MINGW64: .seh_setframe 5, 0
+; MINGW64: .seh_setframe 5, 32
; MINGW64: callq _Unwind_Resume
; MINGW64: .seh_handlerdata
; MINGW64: GCC_except_table0:
diff --git a/test/CodeGen/X86/ghc-cc.ll b/test/CodeGen/X86/ghc-cc.ll
index 4dba2c0..3ada8c8 100644
--- a/test/CodeGen/X86/ghc-cc.ll
+++ b/test/CodeGen/X86/ghc-cc.ll
@@ -12,13 +12,13 @@ entry:
; CHECK: movl {{[0-9]*}}(%esp), %ebx
; CHECK-NEXT: movl {{[0-9]*}}(%esp), %ebp
; CHECK-NEXT: calll addtwo
- %0 = call cc 10 i32 @addtwo(i32 %a, i32 %b)
+ %0 = call ghccc i32 @addtwo(i32 %a, i32 %b)
; CHECK: calll foo
call void @foo() nounwind
ret void
}
-define cc 10 i32 @addtwo(i32 %x, i32 %y) nounwind {
+define ghccc i32 @addtwo(i32 %x, i32 %y) nounwind {
entry:
; CHECK: leal (%ebx,%ebp), %eax
%0 = add i32 %x, %y
@@ -26,7 +26,7 @@ entry:
ret i32 %0
}
-define cc 10 void @foo() nounwind {
+define ghccc void @foo() nounwind {
entry:
; CHECK: movl r1, %esi
; CHECK-NEXT: movl hp, %edi
@@ -37,8 +37,8 @@ entry:
%2 = load i32* @sp
%3 = load i32* @base
; CHECK: jmp bar
- tail call cc 10 void @bar( i32 %3, i32 %2, i32 %1, i32 %0 ) nounwind
+ tail call ghccc void @bar( i32 %3, i32 %2, i32 %1, i32 %0 ) nounwind
ret void
}
-declare cc 10 void @bar(i32, i32, i32, i32)
+declare ghccc void @bar(i32, i32, i32, i32)
diff --git a/test/CodeGen/X86/ghc-cc64.ll b/test/CodeGen/X86/ghc-cc64.ll
index 403391e..7251dd6 100644
--- a/test/CodeGen/X86/ghc-cc64.ll
+++ b/test/CodeGen/X86/ghc-cc64.ll
@@ -25,13 +25,13 @@ entry:
; CHECK: movq %rdi, %r13
; CHECK-NEXT: movq %rsi, %rbp
; CHECK-NEXT: callq addtwo
- %0 = call cc 10 i64 @addtwo(i64 %a, i64 %b)
+ %0 = call ghccc i64 @addtwo(i64 %a, i64 %b)
; CHECK: callq foo
call void @foo() nounwind
ret void
}
-define cc 10 i64 @addtwo(i64 %x, i64 %y) nounwind {
+define ghccc i64 @addtwo(i64 %x, i64 %y) nounwind {
entry:
; CHECK: leaq (%r13,%rbp), %rax
%0 = add i64 %x, %y
@@ -39,7 +39,7 @@ entry:
ret i64 %0
}
-define cc 10 void @foo() nounwind {
+define ghccc void @foo() nounwind {
entry:
; CHECK: movsd d2(%rip), %xmm6
; CHECK-NEXT: movsd d1(%rip), %xmm5
@@ -74,12 +74,12 @@ entry:
%14 = load i64* @sp
%15 = load i64* @base
; CHECK: jmp bar
- tail call cc 10 void @bar( i64 %15, i64 %14, i64 %13, i64 %12, i64 %11,
+ tail call ghccc void @bar( i64 %15, i64 %14, i64 %13, i64 %12, i64 %11,
i64 %10, i64 %9, i64 %8, i64 %7, i64 %6,
float %5, float %4, float %3, float %2, double %1,
double %0 ) nounwind
ret void
}
-declare cc 10 void @bar(i64, i64, i64, i64, i64, i64, i64, i64, i64, i64,
+declare ghccc void @bar(i64, i64, i64, i64, i64, i64, i64, i64, i64, i64,
float, float, float, float, double, double)
diff --git a/test/CodeGen/X86/global-sections-comdat.ll b/test/CodeGen/X86/global-sections-comdat.ll
new file mode 100644
index 0000000..730050d
--- /dev/null
+++ b/test/CodeGen/X86/global-sections-comdat.ll
@@ -0,0 +1,46 @@
+; RUN: llc < %s -mtriple=i386-unknown-linux | FileCheck %s -check-prefix=LINUX
+; RUN: llc < %s -mtriple=i386-unknown-linux -data-sections -function-sections | FileCheck %s -check-prefix=LINUX-SECTIONS
+; RUN: llc < %s -mtriple=i386-unknown-linux -data-sections -function-sections -unique-section-names=false | FileCheck %s -check-prefix=LINUX-SECTIONS-SHORT
+
+$F1 = comdat any
+define void @F1(i32 %y) comdat {
+bb0:
+switch i32 %y, label %bb5 [
+ i32 1, label %bb1
+ i32 2, label %bb2
+ i32 3, label %bb3
+ i32 4, label %bb4
+ ]
+bb1:
+ ret void
+bb2:
+ ret void
+bb3:
+ ret void
+bb4:
+ ret void
+bb5:
+ ret void
+}
+
+; LINUX: .section .text.F1,"axG",@progbits,F1,comdat
+; LINUX: .size F1,
+; LINUX-NEXT: .cfi_endproc
+; LINUX-NEXT: .section .rodata.F1,"aG",@progbits,F1,comdat
+
+; LINUX-SECTIONS: .section .text.F1,"axG",@progbits,F1,comdat
+; LINUX-SECTIONS: .size F1,
+; LINUX-SECTIONS-NEXT: .cfi_endproc
+; LINUX-SECTIONS-NEXT: .section .rodata.F1,"aG",@progbits,F1,comdat
+
+; LINUX-SECTIONS-SHORT: .section .text,"axG",@progbits,F1,comdat
+; LINUX-SECTIONS-SHORT: .size F1,
+; LINUX-SECTIONS-SHORT-NEXT: .cfi_endproc
+; LINUX-SECTIONS-SHORT-NEXT: .section .rodata,"aG",@progbits,F1,comdat
+
+$G16 = comdat any
+@G16 = unnamed_addr constant i32 42, comdat
+
+; LINUX: .section .rodata.cst4.G16,"aGM",@progbits,4,G16,comdat
+; LINUX-SECTIONS: .section .rodata.cst4.G16,"aGM",@progbits,4,G16,comdat
+; LINUX-SECTIONS-SHORT: .section .rodata.cst4,"aGM",@progbits,4,G16,comdat
diff --git a/test/CodeGen/X86/global-sections.ll b/test/CodeGen/X86/global-sections.ll
index fa1169d..c2f4b65 100644
--- a/test/CodeGen/X86/global-sections.ll
+++ b/test/CodeGen/X86/global-sections.ll
@@ -2,7 +2,8 @@
; RUN: llc < %s -mtriple=i386-apple-darwin9.7 | FileCheck %s -check-prefix=DARWIN
; RUN: llc < %s -mtriple=i386-apple-darwin10 -relocation-model=static | FileCheck %s -check-prefix=DARWIN-STATIC
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s -check-prefix=DARWIN64
-; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -data-sections | FileCheck %s -check-prefix=LINUX-SECTIONS
+; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -data-sections -function-sections | FileCheck %s -check-prefix=LINUX-SECTIONS
+; RUN: llc < %s -mtriple=x86_64-pc-linux -data-sections -function-sections -relocation-model=pic | FileCheck %s -check-prefix=LINUX-SECTIONS-PIC
; RUN: llc < %s -mtriple=i686-pc-win32 -data-sections -function-sections | FileCheck %s -check-prefix=WIN32-SECTIONS
define void @F1() {
@@ -12,6 +13,79 @@ define void @F1() {
; WIN32-SECTIONS: .section .text,"xr",one_only,_F1
; WIN32-SECTIONS: .globl _F1
+define void @F2(i32 %y) {
+bb0:
+switch i32 %y, label %bb5 [
+ i32 1, label %bb1
+ i32 2, label %bb2
+ i32 3, label %bb3
+ i32 4, label %bb4
+ ]
+bb1:
+ ret void
+bb2:
+ ret void
+bb3:
+ ret void
+bb4:
+ ret void
+bb5:
+ ret void
+}
+
+; LINUX: .size F2,
+; LINUX-NEX: .cfi_endproc
+; LINUX-NEX: .section .rodata,"a",@progbits
+
+; LINUX-SECTIONS: .section .text.F2,"ax",@progbits
+; LINUX-SECTIONS: .size F2,
+; LINUX-SECTIONS-NEXT: .cfi_endproc
+; LINUX-SECTIONS-NEXT: .section .rodata.F2,"a",@progbits
+
+; LINUX-SECTIONS-PIC: .section .text.F2,"ax",@progbits
+; LINUX-SECTIONS-PIC: .size F2,
+; LINUX-SECTIONS-PIC-NEXT: .cfi_endproc
+; LINUX-SECTIONS-PIC-NEXT: .section .rodata.F2,"a",@progbits
+
+declare void @G()
+
+define void @F3(i32 %y) {
+bb0:
+ invoke void @G()
+ to label %bb2 unwind label %bb1
+bb1:
+ landingpad { i8*, i32 } personality i8* bitcast (void ()* @G to i8*)
+ catch i8* null
+ br label %bb2
+bb2:
+
+switch i32 %y, label %bb7 [
+ i32 1, label %bb3
+ i32 2, label %bb4
+ i32 3, label %bb5
+ i32 4, label %bb6
+ ]
+bb3:
+ ret void
+bb4:
+ ret void
+bb5:
+ ret void
+bb6:
+ ret void
+bb7:
+ ret void
+}
+
+; DARWIN64: _F3:
+; DARWIN64: .cfi_endproc
+; DARWIN64-NEXT: Leh_func_end
+; DARWIN64-NEXT: .section __TEXT,__gcc_except_tab
+; DARWIN64-NOT: .section
+; DARWIN64: .section __TEXT,__text,regular,pure_instructions
+; DARWIN64-NOT: .section
+; DARWIN64: LJTI{{.*}}:
+
; int G1;
@G1 = common global i32 0
@@ -48,7 +122,7 @@ define void @F1() {
; LINUX-SECTIONS: .section .rodata.G3,"a",@progbits
; LINUX-SECTIONS: .globl G3
-; WIN32-SECTIONS: .section .rdata,"rd",one_only,_G3
+; WIN32-SECTIONS: .section .rdata,"dr",one_only,_G3
; WIN32-SECTIONS: .globl _G3
@@ -85,7 +159,6 @@ define void @F1() {
@"foo bar" = linkonce global i32 42
; LINUX: .type "foo bar",@object
-; LINUX: .section ".data.foo bar","aGw",@progbits,"foo bar",comdat
; LINUX: .weak "foo bar"
; LINUX: "foo bar":
@@ -98,7 +171,6 @@ define void @F1() {
@G6 = weak_odr unnamed_addr constant [1 x i8] c"\01"
; LINUX: .type G6,@object
-; LINUX: .section .rodata.G6,"aG",@progbits,G6,comdat
; LINUX: .weak G6
; LINUX: G6:
; LINUX: .byte 1
@@ -123,10 +195,10 @@ define void @F1() {
; LINUX: G7:
; LINUX: .asciz "abcdefghi"
-; LINUX-SECTIONS: .section .rodata.G7,"aMS",@progbits,1
+; LINUX-SECTIONS: .section .rodata.str1.1,"aMS",@progbits,1
; LINUX-SECTIONS: .globl G7
-; WIN32-SECTIONS: .section .rdata,"rd",one_only,_G7
+; WIN32-SECTIONS: .section .rdata,"dr",one_only,_G7
; WIN32-SECTIONS: .globl _G7
@@ -184,12 +256,12 @@ define void @F1() {
@G14 = private unnamed_addr constant [4 x i8] c"foo\00", align 1
; LINUX-SECTIONS: .type .LG14,@object # @G14
-; LINUX-SECTIONS: .section .rodata..LG14,"aMS",@progbits,1
+; LINUX-SECTIONS: .section .rodata.str1.1,"aMS",@progbits,1
; LINUX-SECTIONS: .LG14:
; LINUX-SECTIONS: .asciz "foo"
; LINUX-SECTIONS: .size .LG14, 4
-; WIN32-SECTIONS: .section .rdata,"rd"
+; WIN32-SECTIONS: .section .rdata,"dr"
; WIN32-SECTIONS: L_G14:
; WIN32-SECTIONS: .asciz "foo"
@@ -208,8 +280,8 @@ define void @F1() {
; DARWIN64: .section __TEXT,__const
; DARWIN64: _G15:
-; LINUX-SECTIONS: .section .rodata.G15,"aM",@progbits,8
+; LINUX-SECTIONS: .section .rodata.cst8,"aM",@progbits,8
; LINUX-SECTIONS: G15:
-; WIN32-SECTIONS: .section .rdata,"rd",one_only,_G15
+; WIN32-SECTIONS: .section .rdata,"dr",one_only,_G15
; WIN32-SECTIONS: _G15:
diff --git a/test/CodeGen/X86/hoist-invariant-load.ll b/test/CodeGen/X86/hoist-invariant-load.ll
index 34191e3..c9e5290 100644
--- a/test/CodeGen/X86/hoist-invariant-load.ll
+++ b/test/CodeGen/X86/hoist-invariant-load.ll
@@ -27,4 +27,4 @@ for.end: ; preds = %for.body
declare i8* @objc_msgSend(i8*, i8*, ...) nonlazybind
-!0 = metadata !{}
+!0 = !{}
diff --git a/test/CodeGen/X86/huge-stack-offset.ll b/test/CodeGen/X86/huge-stack-offset.ll
new file mode 100644
index 0000000..6195161
--- /dev/null
+++ b/test/CodeGen/X86/huge-stack-offset.ll
@@ -0,0 +1,59 @@
+; RUN: llc < %s -mtriple=x86_64-linux-unknown | FileCheck %s --check-prefix=CHECK-64
+; RUN: llc < %s -mtriple=i386-linux-unknown | FileCheck %s --check-prefix=CHECK-32
+
+; Test that a large stack offset uses a single add/sub instruction to
+; adjust the stack pointer.
+
+define void @foo() nounwind {
+; CHECK-64-LABEL: foo:
+; CHECK-64: movabsq $50000000{{..}}, %rax
+; CHECK-64-NEXT: subq %rax, %rsp
+; CHECK-64-NOT: subq $2147483647, %rsp
+; CHECK-64: movabsq $50000000{{..}}, [[RAX:%r..]]
+; CHECK-64-NEXT: addq [[RAX]], %rsp
+
+; CHECK-32-LABEL: foo:
+; CHECK-32: movl $50000000{{..}}, %eax
+; CHECK-32-NEXT: subl %eax, %esp
+; CHECK-32-NOT: subl $2147483647, %esp
+; CHECK-32: movl $50000000{{..}}, [[EAX:%e..]]
+; CHECK-32-NEXT: addl [[EAX]], %esp
+ %1 = alloca [5000000000 x i8], align 16
+ %2 = getelementptr inbounds [5000000000 x i8]* %1, i32 0, i32 0
+ call void @bar(i8* %2)
+ ret void
+}
+
+; Verify that we do not clobber the return value.
+
+define i32 @foo2() nounwind {
+; CHECK-64-LABEL: foo2:
+; CHECK-64: movl $10, %eax
+; CHECK-64-NOT: movabsq ${{.*}}, %rax
+
+; CHECK-32-LABEL: foo2:
+; CHECK-32: movl $10, %eax
+; CHECK-32-NOT: movl ${{.*}}, %eax
+ %1 = alloca [5000000000 x i8], align 16
+ %2 = getelementptr inbounds [5000000000 x i8]* %1, i32 0, i32 0
+ call void @bar(i8* %2)
+ ret i32 10
+}
+
+; Verify that we do not clobber EAX when using inreg attribute
+
+define i32 @foo3(i32 inreg %x) nounwind {
+; CHECK-64-LABEL: foo3:
+; CHECK-64: movabsq $50000000{{..}}, %rax
+; CHECK-64-NEXT: subq %rax, %rsp
+
+; CHECK-32-LABEL: foo3:
+; CHECK-32: subl $2147483647, %esp
+; CHECK-32-NOT: movl ${{.*}}, %eax
+ %1 = alloca [5000000000 x i8], align 16
+ %2 = getelementptr inbounds [5000000000 x i8]* %1, i32 0, i32 0
+ call void @bar(i8* %2)
+ ret i32 %x
+}
+
+declare void @bar(i8*)
diff --git a/test/CodeGen/X86/i1narrowfail.ll b/test/CodeGen/X86/i1narrowfail.ll
new file mode 100644
index 0000000..e280f3c
--- /dev/null
+++ b/test/CodeGen/X86/i1narrowfail.ll
@@ -0,0 +1,10 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
+
+; CHECK-LABEL: @foo
+; CHECK: orb $16
+define void @foo(i64* %ptr) {
+ %r11 = load i64* %ptr, align 8
+ %r12 = or i64 16, %r11
+ store i64 %r12, i64* %ptr, align 8
+ ret void
+}
diff --git a/test/CodeGen/X86/ident-metadata.ll b/test/CodeGen/X86/ident-metadata.ll
index a568673..e08738f 100644
--- a/test/CodeGen/X86/ident-metadata.ll
+++ b/test/CodeGen/X86/ident-metadata.ll
@@ -5,5 +5,5 @@
; CHECK: .ident "clang version x.x"
; CHECK-NEXT: .ident "something else"
!llvm.ident = !{!0, !1}
-!0 = metadata !{metadata !"clang version x.x"}
-!1 = metadata !{metadata !"something else"}
+!0 = !{!"clang version x.x"}
+!1 = !{!"something else"}
diff --git a/test/CodeGen/X86/imul.ll b/test/CodeGen/X86/imul.ll
new file mode 100644
index 0000000..c64b4e3
--- /dev/null
+++ b/test/CodeGen/X86/imul.ll
@@ -0,0 +1,110 @@
+; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-pc-linux-gnux32 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-pc-linux | FileCheck %s --check-prefix=X86
+
+define i32 @mul4_32(i32 %A) {
+; X64-LABEL: mul4_32:
+; X64: leal
+; X86-LABEL: mul4_32:
+; X86: shll
+ %mul = mul i32 %A, 4
+ ret i32 %mul
+}
+
+define i64 @mul4_64(i64 %A) {
+; X64-LABEL: mul4_64:
+; X64: leaq
+; X86-LABEL: mul4_64:
+; X86: shldl
+; X86: shll
+ %mul = mul i64 %A, 4
+ ret i64 %mul
+}
+
+define i32 @mul4096_32(i32 %A) {
+; X64-LABEL: mul4096_32:
+; X64: shll
+; X86-LABEL: mul4096_32:
+; X86: shll
+ %mul = mul i32 %A, 4096
+ ret i32 %mul
+}
+
+define i64 @mul4096_64(i64 %A) {
+; X64-LABEL: mul4096_64:
+; X64: shlq
+; X86-LABEL: mul4096_64:
+; X86: shldl
+; X86: shll
+ %mul = mul i64 %A, 4096
+ ret i64 %mul
+}
+
+define i32 @mulmin4096_32(i32 %A) {
+; X64-LABEL: mulmin4096_32:
+; X64: shll
+; X64-NEXT: negl
+; X86-LABEL: mulmin4096_32:
+; X86: shll
+; X86-NEXT: negl
+ %mul = mul i32 %A, -4096
+ ret i32 %mul
+}
+
+define i64 @mulmin4096_64(i64 %A) {
+; X64-LABEL: mulmin4096_64:
+; X64: shlq
+; X64-NEXT: negq
+; X86-LABEL: mulmin4096_64:
+; X86: shldl
+; X86-NEXT: shll
+; X86-NEXT: xorl
+; X86-NEXT: negl
+; X86-NEXT: sbbl
+ %mul = mul i64 %A, -4096
+ ret i64 %mul
+}
+
+define i32 @mul3_32(i32 %A) {
+; X64-LABEL: mul3_32:
+; X64: leal
+; X86-LABEL: mul3_32:
+; But why?!
+; X86: imull
+ %mul = mul i32 %A, 3
+ ret i32 %mul
+}
+
+define i64 @mul3_64(i64 %A) {
+; X64-LABEL: mul3_64:
+; X64: leaq
+; X86-LABEL: mul3_64:
+; X86: mull
+; X86-NEXT: imull
+ %mul = mul i64 %A, 3
+ ret i64 %mul
+}
+
+define i32 @mul40_32(i32 %A) {
+; X64-LABEL: mul40_32:
+; X64: shll
+; X64-NEXT: leal
+; X86-LABEL: mul40_32:
+; X86: shll
+; X86-NEXT: leal
+ %mul = mul i32 %A, 40
+ ret i32 %mul
+}
+
+define i64 @mul40_64(i64 %A) {
+; X64-LABEL: mul40_64:
+; X64: shlq
+; X64-NEXT: leaq
+; X86-LABEL: mul40_64:
+; X86: leal
+; X86-NEXT: movl
+; X86-NEXT: mull
+; X86-NEXT: leal
+ %mul = mul i64 %A, 40
+ ret i64 %mul
+}
diff --git a/test/CodeGen/X86/imul64-lea.ll b/test/CodeGen/X86/imul64-lea.ll
deleted file mode 100644
index 047c129..0000000
--- a/test/CodeGen/X86/imul64-lea.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-pc-linux-gnux32 | FileCheck %s
-
-; Test that 64-bit LEAs are generated for both LP64 and ILP32 in 64-bit mode.
-declare i64 @foo64()
-
-define i64 @test64() {
- %tmp.0 = tail call i64 @foo64( )
- %tmp.1 = mul i64 %tmp.0, 9
-; CHECK-NOT: mul
-; CHECK: leaq
- ret i64 %tmp.1
-}
-
-; Test that 32-bit LEAs are generated for both LP64 and ILP32 in 64-bit mode.
-declare i32 @foo32()
-
-define i32 @test32() {
- %tmp.0 = tail call i32 @foo32( )
- %tmp.1 = mul i32 %tmp.0, 9
-; CHECK-NOT: mul
-; CHECK: leal
- ret i32 %tmp.1
-}
-
diff --git a/test/CodeGen/X86/inalloca-ctor.ll b/test/CodeGen/X86/inalloca-ctor.ll
index 7cfa929..b1781d3 100644
--- a/test/CodeGen/X86/inalloca-ctor.ll
+++ b/test/CodeGen/X86/inalloca-ctor.ll
@@ -17,16 +17,16 @@ entry:
; CHECK: movl %esp,
call void @Foo_ctor(%Foo* %c)
; CHECK: leal 12(%{{.*}}),
-; CHECK: subl $4, %esp
-; CHECK: calll _Foo_ctor
+; CHECK-NEXT: pushl
+; CHECK-NEXT: calll _Foo_ctor
; CHECK: addl $4, %esp
%b = getelementptr %frame* %args, i32 0, i32 1
store i32 42, i32* %b
; CHECK: movl $42,
%a = getelementptr %frame* %args, i32 0, i32 0
call void @Foo_ctor(%Foo* %a)
-; CHECK: subl $4, %esp
-; CHECK: calll _Foo_ctor
+; CHECK-NEXT: pushl
+; CHECK-NEXT: calll _Foo_ctor
; CHECK: addl $4, %esp
call void @f(%frame* inalloca %args)
; CHECK: calll _f
diff --git a/test/CodeGen/X86/inalloca-invoke.ll b/test/CodeGen/X86/inalloca-invoke.ll
index 6cff9ac..cc11ab3 100644
--- a/test/CodeGen/X86/inalloca-invoke.ll
+++ b/test/CodeGen/X86/inalloca-invoke.ll
@@ -31,13 +31,13 @@ blah:
to label %invoke.cont unwind label %lpad
; Uses end as sret param.
-; CHECK: movl %[[end]], (%esp)
+; CHECK: pushl %[[end]]
; CHECK: calll _plus
invoke.cont:
call void @begin(%Iter* sret %beg)
-; CHECK: movl %[[beg]],
+; CHECK: pushl %[[beg]]
; CHECK: calll _begin
invoke void @reverse(%frame.reverse* inalloca align 4 %rev_args)
diff --git a/test/CodeGen/X86/inalloca-stdcall.ll b/test/CodeGen/X86/inalloca-stdcall.ll
index 54f97d9..65a0f77 100644
--- a/test/CodeGen/X86/inalloca-stdcall.ll
+++ b/test/CodeGen/X86/inalloca-stdcall.ll
@@ -6,6 +6,7 @@ declare x86_stdcallcc void @f(%Foo* inalloca %a)
declare x86_stdcallcc void @i(i32 %a)
define void @g() {
+; CHECK-LABEL: _g:
%b = alloca inalloca %Foo
; CHECK: movl $8, %eax
; CHECK: calll __chkstk
@@ -19,7 +20,7 @@ define void @g() {
call x86_stdcallcc void @f(%Foo* inalloca %b)
; CHECK: calll _f@8
; CHECK-NOT: %esp
-; CHECK: subl $4, %esp
+; CHECK: pushl
; CHECK: calll _i@4
call x86_stdcallcc void @i(i32 0)
ret void
diff --git a/test/CodeGen/X86/init-priority.ll b/test/CodeGen/X86/init-priority.ll
new file mode 100644
index 0000000..a0cff23
--- /dev/null
+++ b/test/CodeGen/X86/init-priority.ll
@@ -0,0 +1,51 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-freebsd9 | FileCheck %s
+
+; Check that our compiler never emits global constructors
+; inside the .init_array section when building for a non-Linux ELF target.
+; Because of this, the test depends on UseInitArray behavior under FreeBSD
+; as found in Generic_ELF::addClangTargetOptions().
+
+; This is to workaround a Visual Studio bug which causes field
+; UseInitArray to be left uninitialized instead of being
+; zero-initialized (as specified in [dcl.init]p7).
+; This workaround consists in providing a user default constructor
+; that explicitly initializes field UseInitArray.
+
+%class.C = type { i8 }
+%class.D = type { i8 }
+
+@c1 = global %class.C zeroinitializer, align 1
+@d1 = global %class.D zeroinitializer, align 1
+@llvm.global_ctors = appending global [2 x { i32, void ()* }] [{ i32, void ()* } { i32 101, void ()* @_GLOBAL__I_000101 }, { i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }]
+
+define linkonce_odr void @_ZN1CC1Ev(%class.C* nocapture %this) {
+entry:
+ ret void
+}
+
+define linkonce_odr void @_ZN1DC1Ev(%class.D* nocapture %this) {
+entry:
+ ret void
+}
+
+define linkonce_odr void @_ZN1DC2Ev(%class.D* nocapture %this) {
+entry:
+ ret void
+}
+
+define linkonce_odr void @_ZN1CC2Ev(%class.C* nocapture %this) {
+entry:
+ ret void
+}
+
+define internal void @_GLOBAL__I_000101() nounwind readnone {
+entry:
+ ret void
+}
+
+define internal void @_GLOBAL__I_a() nounwind readnone {
+entry:
+ ret void
+}
+
+; CHECK-NOT: .init_array
diff --git a/test/CodeGen/X86/inline-asm-flag-clobber.ll b/test/CodeGen/X86/inline-asm-flag-clobber.ll
index bb7c33e..0874b51 100644
--- a/test/CodeGen/X86/inline-asm-flag-clobber.ll
+++ b/test/CodeGen/X86/inline-asm-flag-clobber.ll
@@ -29,4 +29,4 @@ entry:
ret i32 %1
}
-!0 = metadata !{i64 935930}
+!0 = !{i64 935930}
diff --git a/test/CodeGen/X86/insertps-O0-bug.ll b/test/CodeGen/X86/insertps-O0-bug.ll
new file mode 100644
index 0000000..e89ac26
--- /dev/null
+++ b/test/CodeGen/X86/insertps-O0-bug.ll
@@ -0,0 +1,52 @@
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 -O0 < %s | FileCheck %s
+
+; Check that at -O0, the backend doesn't attempt to canonicalize a vector load
+; used by an INSERTPS into a scalar load plus scalar_to_vector.
+;
+; In order to fold a load into the memory operand of an INSERTPSrm, the backend
+; tries to canonicalize a vector load in input to an INSERTPS node into a
+; scalar load plus scalar_to_vector. This would allow ISel to match the
+; INSERTPSrm variant rather than a load plus INSERTPSrr.
+;
+; However, ISel can only select an INSERTPSrm if folding a load into the operand
+; of an insertps is considered to be profitable.
+;
+; In the example below:
+;
+; __m128 test(__m128 a, __m128 *b) {
+; __m128 c = _mm_insert_ps(a, *b, 1 << 6);
+; return c;
+; }
+;
+; At -O0, the backend would attempt to canonicalize the load to 'b' into
+; a scalar load in the hope of matching an INSERTPSrm.
+; However, ISel would fail to recognize an INSERTPSrm since load folding is
+; always considered unprofitable at -O0. This would leave the insertps mask
+; in an invalid state.
+;
+; The problem with the canonicalization rule performed by the backend is that
+; it assumes ISel to always be able to match an INSERTPSrm. This assumption is
+; not always correct at -O0. In this example, FastISel fails to lower the
+; arguments needed by the entry block. This is enough to enable the DAGCombiner
+; and eventually trigger the canonicalization on the INSERTPS node.
+;
+; This test checks that the vector load in input to the insertps is not
+; canonicalized into a scalar load plus scalar_to_vector (a movss).
+
+define <4 x float> @test(<4 x float> %a, <4 x float>* %b) {
+; CHECK-LABEL: test:
+; CHECK: movaps (%rdi), [[REG:%[a-z0-9]+]]
+; CHECK-NOT: movss
+; CHECK: insertps $64, [[REG]],
+; CHECK: ret
+entry:
+ %0 = load <4 x float>* %b, align 16
+ %1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %0, i32 64)
+ %2 = alloca <4 x float>, align 16
+ store <4 x float> %1, <4 x float>* %2, align 16
+ %3 = load <4 x float>* %2, align 16
+ ret <4 x float> %3
+}
+
+
+declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32)
diff --git a/test/CodeGen/X86/large-code-model-isel.ll b/test/CodeGen/X86/large-code-model-isel.ll
new file mode 100644
index 0000000..3c283d9
--- /dev/null
+++ b/test/CodeGen/X86/large-code-model-isel.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -code-model=large -mcpu=core2 -march=x86-64 -O0 | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+@.str10 = external unnamed_addr constant [2 x i8], align 1
+
+define void @foo() {
+; CHECK-LABEL: foo:
+entry:
+; CHECK: callq
+ %call = call i64* undef(i64* undef, i8* getelementptr inbounds ([2 x i8]* @.str10, i32 0, i32 0))
+ ret void
+}
diff --git a/test/CodeGen/X86/lea-2.ll b/test/CodeGen/X86/lea-2.ll
index 6fb3879..98c57c7 100644
--- a/test/CodeGen/X86/lea-2.ll
+++ b/test/CodeGen/X86/lea-2.ll
@@ -10,7 +10,7 @@ define i32 @test1(i32 %A, i32 %B) {
; The above computation of %tmp4 should match a single lea, without using
; actual add instructions.
; CHECK-NOT: add
-; CHECK: lea {{[a-z]+}}, dword ptr [{{[a-z]+}} + 4*{{[a-z]+}} - 5]
+; CHECK: lea {{[a-z]+}}, [{{[a-z]+}} + 4*{{[a-z]+}} - 5]
ret i32 %tmp4
}
diff --git a/test/CodeGen/X86/logical-load-fold.ll b/test/CodeGen/X86/logical-load-fold.ll
new file mode 100644
index 0000000..5aac2d7
--- /dev/null
+++ b/test/CodeGen/X86/logical-load-fold.ll
@@ -0,0 +1,53 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2,sse-unaligned-mem | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX
+
+; Although we have the ability to fold an unaligned load with AVX
+; and under special conditions with some SSE implementations, we
+; can not fold the load under any circumstances in these test
+; cases because they are not 16-byte loads. The load must be
+; executed as a scalar ('movs*') with a zero extension to
+; 128-bits and then used in the packed logical ('andp*') op.
+; PR22371 - http://llvm.org/bugs/show_bug.cgi?id=22371
+
+define double @load_double_no_fold(double %x, double %y) {
+; SSE2-LABEL: load_double_no_fold:
+; SSE2: BB#0:
+; SSE2-NEXT: cmplesd %xmm0, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: andpd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: load_double_no_fold:
+; AVX: BB#0:
+; AVX-NEXT: vcmplesd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+
+ %cmp = fcmp oge double %x, %y
+ %zext = zext i1 %cmp to i32
+ %conv = sitofp i32 %zext to double
+ ret double %conv
+}
+
+define float @load_float_no_fold(float %x, float %y) {
+; SSE2-LABEL: load_float_no_fold:
+; SSE2: BB#0:
+; SSE2-NEXT: cmpless %xmm0, %xmm1
+; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: load_float_no_fold:
+; AVX: BB#0:
+; AVX-NEXT: vcmpless %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+
+ %cmp = fcmp oge float %x, %y
+ %zext = zext i1 %cmp to i32
+ %conv = sitofp i32 %zext to float
+ ret float %conv
+}
+
diff --git a/test/CodeGen/X86/lower-vec-shift-2.ll b/test/CodeGen/X86/lower-vec-shift-2.ll
new file mode 100644
index 0000000..fb8fbba
--- /dev/null
+++ b/test/CodeGen/X86/lower-vec-shift-2.ll
@@ -0,0 +1,157 @@
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE2
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=AVX
+
+define <8 x i16> @test1(<8 x i16> %A, <8 x i16> %B) {
+; SSE2-LABEL: test1:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: movzwl %ax, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: psllw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test1:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
+; AVX-NEXT: vpsllw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %vecinit14 = shufflevector <8 x i16> %B, <8 x i16> undef, <8 x i32> zeroinitializer
+ %shl = shl <8 x i16> %A, %vecinit14
+ ret <8 x i16> %shl
+}
+
+define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
+; SSE2-LABEL: test2:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
+; SSE2-NEXT: pslld %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test2:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; AVX-NEXT: vpslld %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %vecinit6 = shufflevector <4 x i32> %B, <4 x i32> undef, <4 x i32> zeroinitializer
+ %shl = shl <4 x i32> %A, %vecinit6
+ ret <4 x i32> %shl
+}
+
+define <2 x i64> @test3(<2 x i64> %A, <2 x i64> %B) {
+; SSE2-LABEL: test3:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: psllq %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test3:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpsllq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %vecinit2 = shufflevector <2 x i64> %B, <2 x i64> undef, <2 x i32> zeroinitializer
+ %shl = shl <2 x i64> %A, %vecinit2
+ ret <2 x i64> %shl
+}
+
+define <8 x i16> @test4(<8 x i16> %A, <8 x i16> %B) {
+; SSE2-LABEL: test4:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: movzwl %ax, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: psrlw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test4:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
+; AVX-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %vecinit14 = shufflevector <8 x i16> %B, <8 x i16> undef, <8 x i32> zeroinitializer
+ %shr = lshr <8 x i16> %A, %vecinit14
+ ret <8 x i16> %shr
+}
+
+define <4 x i32> @test5(<4 x i32> %A, <4 x i32> %B) {
+; SSE2-LABEL: test5:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
+; SSE2-NEXT: psrld %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test5:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; AVX-NEXT: vpsrld %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %vecinit6 = shufflevector <4 x i32> %B, <4 x i32> undef, <4 x i32> zeroinitializer
+ %shr = lshr <4 x i32> %A, %vecinit6
+ ret <4 x i32> %shr
+}
+
+define <2 x i64> @test6(<2 x i64> %A, <2 x i64> %B) {
+; SSE2-LABEL: test6:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: psrlq %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test6:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %vecinit2 = shufflevector <2 x i64> %B, <2 x i64> undef, <2 x i32> zeroinitializer
+ %shr = lshr <2 x i64> %A, %vecinit2
+ ret <2 x i64> %shr
+}
+
+define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) {
+; SSE2-LABEL: test7:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: movzwl %ax, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: psraw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test7:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
+; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %vecinit14 = shufflevector <8 x i16> %B, <8 x i16> undef, <8 x i32> zeroinitializer
+ %shr = ashr <8 x i16> %A, %vecinit14
+ ret <8 x i16> %shr
+}
+
+define <4 x i32> @test8(<4 x i32> %A, <4 x i32> %B) {
+; SSE2-LABEL: test8:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
+; SSE2-NEXT: psrad %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test8:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; AVX-NEXT: vpsrad %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %vecinit6 = shufflevector <4 x i32> %B, <4 x i32> undef, <4 x i32> zeroinitializer
+ %shr = ashr <4 x i32> %A, %vecinit6
+ ret <4 x i32> %shr
+}
diff --git a/test/CodeGen/X86/lzcnt-tzcnt.ll b/test/CodeGen/X86/lzcnt-tzcnt.ll
index 07e4b9d..e98764a 100644
--- a/test/CodeGen/X86/lzcnt-tzcnt.ll
+++ b/test/CodeGen/X86/lzcnt-tzcnt.ll
@@ -437,6 +437,137 @@ define i64 @test18_cttz(i64* %ptr) {
; CHECK: tzcnt
; CHECK-NEXT: ret
+define i16 @test1b_ctlz(i16 %v) {
+ %cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
+ %tobool = icmp ne i16 %v, 0
+ %cond = select i1 %tobool, i16 16, i16 %cnt
+ ret i16 %cond
+}
+; CHECK-LABEL: test1b_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test2b_ctlz(i32 %v) {
+ %cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
+ %tobool = icmp ne i32 %v, 0
+ %cond = select i1 %tobool, i32 32, i32 %cnt
+ ret i32 %cond
+}
+; CHECK-LABEL: test2b_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test3b_ctlz(i64 %v) {
+ %cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
+ %tobool = icmp ne i64 %v, 0
+ %cond = select i1 %tobool, i64 64, i64 %cnt
+ ret i64 %cond
+}
+; CHECK-LABEL: test3b_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test4b_ctlz(i16 %v) {
+ %cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
+ %tobool = icmp ne i16 %v, 0
+ %cond = select i1 %tobool, i16 %cnt, i16 16
+ ret i16 %cond
+}
+; CHECK-LABEL: test4b_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test5b_ctlz(i32 %v) {
+ %cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
+ %tobool = icmp ne i32 %v, 0
+ %cond = select i1 %tobool, i32 %cnt, i32 32
+ ret i32 %cond
+}
+; CHECK-LABEL: test5b_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test6b_ctlz(i64 %v) {
+ %cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
+ %tobool = icmp ne i64 %v, 0
+ %cond = select i1 %tobool, i64 %cnt, i64 64
+ ret i64 %cond
+}
+; CHECK-LABEL: test6b_ctlz
+; CHECK: lzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test1b_cttz(i16 %v) {
+ %cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
+ %tobool = icmp ne i16 %v, 0
+ %cond = select i1 %tobool, i16 16, i16 %cnt
+ ret i16 %cond
+}
+; CHECK-LABEL: test1b_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test2b_cttz(i32 %v) {
+ %cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
+ %tobool = icmp ne i32 %v, 0
+ %cond = select i1 %tobool, i32 32, i32 %cnt
+ ret i32 %cond
+}
+; CHECK-LABEL: test2b_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test3b_cttz(i64 %v) {
+ %cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
+ %tobool = icmp ne i64 %v, 0
+ %cond = select i1 %tobool, i64 64, i64 %cnt
+ ret i64 %cond
+}
+; CHECK-LABEL: test3b_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i16 @test4b_cttz(i16 %v) {
+ %cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
+ %tobool = icmp ne i16 %v, 0
+ %cond = select i1 %tobool, i16 %cnt, i16 16
+ ret i16 %cond
+}
+; CHECK-LABEL: test4b_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i32 @test5b_cttz(i32 %v) {
+ %cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
+ %tobool = icmp ne i32 %v, 0
+ %cond = select i1 %tobool, i32 %cnt, i32 32
+ ret i32 %cond
+}
+; CHECK-LABEL: test5b_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
+
+define i64 @test6b_cttz(i64 %v) {
+ %cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
+ %tobool = icmp ne i64 %v, 0
+ %cond = select i1 %tobool, i64 %cnt, i64 64
+ ret i64 %cond
+}
+; CHECK-LABEL: test6b_cttz
+; CHECK: tzcnt
+; CHECK-NEXT: ret
+
declare i64 @llvm.cttz.i64(i64, i1)
declare i32 @llvm.cttz.i32(i32, i1)
diff --git a/test/CodeGen/X86/macho-comdat.ll b/test/CodeGen/X86/macho-comdat.ll
index 3c2d997..6056047 100644
--- a/test/CodeGen/X86/macho-comdat.ll
+++ b/test/CodeGen/X86/macho-comdat.ll
@@ -2,5 +2,5 @@
; RUN: FileCheck < %t %s
$f = comdat any
-@v = global i32 0, comdat $f
+@v = global i32 0, comdat($f)
; CHECK: LLVM ERROR: MachO doesn't support COMDATs, 'f' cannot be lowered.
diff --git a/test/CodeGen/X86/masked_memop.ll b/test/CodeGen/X86/masked_memop.ll
new file mode 100644
index 0000000..f268c57
--- /dev/null
+++ b/test/CodeGen/X86/masked_memop.ll
@@ -0,0 +1,219 @@
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=knl < %s | FileCheck %s -check-prefix=AVX512
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=core-avx2 < %s | FileCheck %s -check-prefix=AVX2
+; RUN: opt -mtriple=x86_64-apple-darwin -codegenprepare -mcpu=corei7-avx -S < %s | FileCheck %s -check-prefix=AVX_SCALAR
+
+; AVX512-LABEL: test1
+; AVX512: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
+
+; AVX2-LABEL: test1
+; AVX2: vpmaskmovd 32(%rdi)
+; AVX2: vpmaskmovd (%rdi)
+; AVX2-NOT: blend
+
+; AVX_SCALAR-LABEL: test1
+; AVX_SCALAR-NOT: masked
+; AVX_SCALAR: extractelement
+; AVX_SCALAR: insertelement
+; AVX_SCALAR: extractelement
+; AVX_SCALAR: insertelement
+define <16 x i32> @test1(<16 x i32> %trigger, <16 x i32>* %addr) {
+ %mask = icmp eq <16 x i32> %trigger, zeroinitializer
+ %res = call <16 x i32> @llvm.masked.load.v16i32(<16 x i32>* %addr, i32 4, <16 x i1>%mask, <16 x i32>undef)
+ ret <16 x i32> %res
+}
+
+; AVX512-LABEL: test2
+; AVX512: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
+
+; AVX2-LABEL: test2
+; AVX2: vpmaskmovd {{.*}}(%rdi)
+; AVX2: vpmaskmovd {{.*}}(%rdi)
+; AVX2-NOT: blend
+define <16 x i32> @test2(<16 x i32> %trigger, <16 x i32>* %addr) {
+ %mask = icmp eq <16 x i32> %trigger, zeroinitializer
+ %res = call <16 x i32> @llvm.masked.load.v16i32(<16 x i32>* %addr, i32 4, <16 x i1>%mask, <16 x i32>zeroinitializer)
+ ret <16 x i32> %res
+}
+
+; AVX512-LABEL: test3
+; AVX512: vmovdqu32 %zmm1, (%rdi) {%k1}
+
+; AVX_SCALAR-LABEL: test3
+; AVX_SCALAR-NOT: masked
+; AVX_SCALAR: extractelement
+; AVX_SCALAR: store
+; AVX_SCALAR: extractelement
+; AVX_SCALAR: store
+; AVX_SCALAR: extractelement
+; AVX_SCALAR: store
+define void @test3(<16 x i32> %trigger, <16 x i32>* %addr, <16 x i32> %val) {
+ %mask = icmp eq <16 x i32> %trigger, zeroinitializer
+ call void @llvm.masked.store.v16i32(<16 x i32>%val, <16 x i32>* %addr, i32 4, <16 x i1>%mask)
+ ret void
+}
+
+; AVX512-LABEL: test4
+; AVX512: vmovups (%rdi), %zmm{{.*{%k[1-7]}}}
+
+; AVX2-LABEL: test4
+; AVX2: vmaskmovps {{.*}}(%rdi)
+; AVX2: vmaskmovps {{.*}}(%rdi)
+; AVX2: blend
+define <16 x float> @test4(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %dst) {
+ %mask = icmp eq <16 x i32> %trigger, zeroinitializer
+ %res = call <16 x float> @llvm.masked.load.v16f32(<16 x float>* %addr, i32 4, <16 x i1>%mask, <16 x float> %dst)
+ ret <16 x float> %res
+}
+
+; AVX512-LABEL: test5
+; AVX512: vmovupd (%rdi), %zmm1 {%k1}
+
+; AVX2-LABEL: test5
+; AVX2: vmaskmovpd
+; AVX2: vblendvpd
+; AVX2: vmaskmovpd
+; AVX2: vblendvpd
+define <8 x double> @test5(<8 x i32> %trigger, <8 x double>* %addr, <8 x double> %dst) {
+ %mask = icmp eq <8 x i32> %trigger, zeroinitializer
+ %res = call <8 x double> @llvm.masked.load.v8f64(<8 x double>* %addr, i32 4, <8 x i1>%mask, <8 x double>%dst)
+ ret <8 x double> %res
+}
+
+; AVX2-LABEL: test6
+; AVX2: vmaskmovpd
+; AVX2: vblendvpd
+define <2 x double> @test6(<2 x i64> %trigger, <2 x double>* %addr, <2 x double> %dst) {
+ %mask = icmp eq <2 x i64> %trigger, zeroinitializer
+ %res = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %addr, i32 4, <2 x i1>%mask, <2 x double>%dst)
+ ret <2 x double> %res
+}
+
+; AVX2-LABEL: test7
+; AVX2: vmaskmovps {{.*}}(%rdi)
+; AVX2: blend
+define <4 x float> @test7(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %dst) {
+ %mask = icmp eq <4 x i32> %trigger, zeroinitializer
+ %res = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %addr, i32 4, <4 x i1>%mask, <4 x float>%dst)
+ ret <4 x float> %res
+}
+
+; AVX2-LABEL: test8
+; AVX2: vpmaskmovd {{.*}}(%rdi)
+; AVX2: blend
+define <4 x i32> @test8(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) {
+ %mask = icmp eq <4 x i32> %trigger, zeroinitializer
+ %res = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %addr, i32 4, <4 x i1>%mask, <4 x i32>%dst)
+ ret <4 x i32> %res
+}
+
+; AVX2-LABEL: test9
+; AVX2: vpmaskmovd %xmm
+define void @test9(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
+ %mask = icmp eq <4 x i32> %trigger, zeroinitializer
+ call void @llvm.masked.store.v4i32(<4 x i32>%val, <4 x i32>* %addr, i32 4, <4 x i1>%mask)
+ ret void
+}
+
+; AVX2-LABEL: test10
+; AVX2: vmaskmovpd (%rdi), %ymm
+; AVX2: blend
+define <4 x double> @test10(<4 x i32> %trigger, <4 x double>* %addr, <4 x double> %dst) {
+ %mask = icmp eq <4 x i32> %trigger, zeroinitializer
+ %res = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %addr, i32 4, <4 x i1>%mask, <4 x double>%dst)
+ ret <4 x double> %res
+}
+
+; AVX2-LABEL: test11
+; AVX2: vmaskmovps
+; AVX2: vblendvps
+define <8 x float> @test11(<8 x i32> %trigger, <8 x float>* %addr, <8 x float> %dst) {
+ %mask = icmp eq <8 x i32> %trigger, zeroinitializer
+ %res = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %addr, i32 4, <8 x i1>%mask, <8 x float>%dst)
+ ret <8 x float> %res
+}
+
+; AVX2-LABEL: test12
+; AVX2: vpmaskmovd %ymm
+define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
+ %mask = icmp eq <8 x i32> %trigger, zeroinitializer
+ call void @llvm.masked.store.v8i32(<8 x i32>%val, <8 x i32>* %addr, i32 4, <8 x i1>%mask)
+ ret void
+}
+
+; AVX512-LABEL: test13
+; AVX512: vmovups %zmm1, (%rdi) {%k1}
+
+define void @test13(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %val) {
+ %mask = icmp eq <16 x i32> %trigger, zeroinitializer
+ call void @llvm.masked.store.v16f32(<16 x float>%val, <16 x float>* %addr, i32 4, <16 x i1>%mask)
+ ret void
+}
+
+; AVX2-LABEL: test14
+; AVX2: vpshufd
+; AVX2: vmovq
+; AVX2: vmaskmovps
+define void @test14(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) {
+ %mask = icmp eq <2 x i32> %trigger, zeroinitializer
+ call void @llvm.masked.store.v2f32(<2 x float>%val, <2 x float>* %addr, i32 4, <2 x i1>%mask)
+ ret void
+}
+
+; AVX2-LABEL: test15
+; AVX2: vpmaskmovd
+define void @test15(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %val) {
+ %mask = icmp eq <2 x i32> %trigger, zeroinitializer
+ call void @llvm.masked.store.v2i32(<2 x i32>%val, <2 x i32>* %addr, i32 4, <2 x i1>%mask)
+ ret void
+}
+
+; AVX2-LABEL: test16
+; AVX2: vmaskmovps
+; AVX2: vblendvps
+define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %dst) {
+ %mask = icmp eq <2 x i32> %trigger, zeroinitializer
+ %res = call <2 x float> @llvm.masked.load.v2f32(<2 x float>* %addr, i32 4, <2 x i1>%mask, <2 x float>%dst)
+ ret <2 x float> %res
+}
+
+; AVX2-LABEL: test17
+; AVX2: vpmaskmovd
+; AVX2: vblendvps
+; AVX2: vpmovsxdq
+define <2 x i32> @test17(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) {
+ %mask = icmp eq <2 x i32> %trigger, zeroinitializer
+ %res = call <2 x i32> @llvm.masked.load.v2i32(<2 x i32>* %addr, i32 4, <2 x i1>%mask, <2 x i32>%dst)
+ ret <2 x i32> %res
+}
+
+; AVX2-LABEL: test18
+; AVX2: vmaskmovps
+; AVX2-NOT: blend
+define <2 x float> @test18(<2 x i32> %trigger, <2 x float>* %addr) {
+ %mask = icmp eq <2 x i32> %trigger, zeroinitializer
+ %res = call <2 x float> @llvm.masked.load.v2f32(<2 x float>* %addr, i32 4, <2 x i1>%mask, <2 x float>undef)
+ ret <2 x float> %res
+}
+
+
+declare <16 x i32> @llvm.masked.load.v16i32(<16 x i32>*, i32, <16 x i1>, <16 x i32>)
+declare <4 x i32> @llvm.masked.load.v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
+declare <2 x i32> @llvm.masked.load.v2i32(<2 x i32>*, i32, <2 x i1>, <2 x i32>)
+declare void @llvm.masked.store.v16i32(<16 x i32>, <16 x i32>*, i32, <16 x i1>)
+declare void @llvm.masked.store.v8i32(<8 x i32>, <8 x i32>*, i32, <8 x i1>)
+declare void @llvm.masked.store.v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
+declare void @llvm.masked.store.v2f32(<2 x float>, <2 x float>*, i32, <2 x i1>)
+declare void @llvm.masked.store.v2i32(<2 x i32>, <2 x i32>*, i32, <2 x i1>)
+declare void @llvm.masked.store.v16f32(<16 x float>, <16 x float>*, i32, <16 x i1>)
+declare void @llvm.masked.store.v16f32p(<16 x float>*, <16 x float>**, i32, <16 x i1>)
+declare <16 x float> @llvm.masked.load.v16f32(<16 x float>*, i32, <16 x i1>, <16 x float>)
+declare <8 x float> @llvm.masked.load.v8f32(<8 x float>*, i32, <8 x i1>, <8 x float>)
+declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
+declare <2 x float> @llvm.masked.load.v2f32(<2 x float>*, i32, <2 x i1>, <2 x float>)
+declare <8 x double> @llvm.masked.load.v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>)
+declare <4 x double> @llvm.masked.load.v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>)
+declare <2 x double> @llvm.masked.load.v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>)
+declare void @llvm.masked.store.v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>)
+declare void @llvm.masked.store.v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>)
+declare void @llvm.masked.store.v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>)
+
diff --git a/test/CodeGen/X86/mem-intrin-base-reg.ll b/test/CodeGen/X86/mem-intrin-base-reg.ll
index dd7f396..9a6de3d 100644
--- a/test/CodeGen/X86/mem-intrin-base-reg.ll
+++ b/test/CodeGen/X86/mem-intrin-base-reg.ll
@@ -63,7 +63,7 @@ spill_vectors:
; CHECK-LABEL: _memcpy_vla_vector:
; CHECK: andl $-16, %esp
; CHECK: movl %esp, %esi
-; CHECK: movl $128, {{.*}}(%esp)
+; CHECK: pushl $128
; CHECK: calll _memcpy
; CHECK: calll __chkstk
diff --git a/test/CodeGen/X86/misched-code-difference-with-debug.ll b/test/CodeGen/X86/misched-code-difference-with-debug.ll
new file mode 100644
index 0000000..fb2a986
--- /dev/null
+++ b/test/CodeGen/X86/misched-code-difference-with-debug.ll
@@ -0,0 +1,90 @@
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-unknown -mcpu=generic | FileCheck %s
+; Both functions should produce the same code. The presence of debug values
+; should not affect the scheduling strategy.
+; Generated from:
+; char argc;
+; class C {
+; public:
+; int test(char ,char ,char ,...);
+; };
+; void foo() {
+; C c;
+; char lc = argc;
+; c.test(0,argc,0,lc);
+; c.test(0,argc,0,lc);
+; }
+;
+; with
+; clang -O2 -c test.cpp -emit-llvm -S
+; clang -O2 -c test.cpp -emit-llvm -S -g
+;
+
+
+%class.C = type { i8 }
+
+@argc = global i8 0, align 1
+
+declare i32 @test_function(%class.C*, i8 signext, i8 signext, i8 signext, ...)
+
+; CHECK-LABEL: test_without_debug
+; CHECK: movl [[A:%[a-z]+]], [[B:%[a-z]+]]
+; CHECK-NEXT: movl [[A]], [[C:%[a-z]+]]
+define void @test_without_debug() {
+entry:
+ %c = alloca %class.C, align 1
+ %0 = load i8* @argc, align 1
+ %conv = sext i8 %0 to i32
+ %call = call i32 (%class.C*, i8, i8, i8, ...)* @test_function(%class.C* %c, i8 signext 0, i8 signext %0, i8 signext 0, i32 %conv)
+ %1 = load i8* @argc, align 1
+ %call2 = call i32 (%class.C*, i8, i8, i8, ...)* @test_function(%class.C* %c, i8 signext 0, i8 signext %1, i8 signext 0, i32 %conv)
+ ret void
+}
+
+; CHECK-LABEL: test_with_debug
+; CHECK: movl [[A]], [[B]]
+; CHECK-NEXT: movl [[A]], [[C]]
+define void @test_with_debug() {
+entry:
+ %c = alloca %class.C, align 1
+ %0 = load i8* @argc, align 1
+ tail call void @llvm.dbg.value(metadata i8 %0, i64 0, metadata !19, metadata !29)
+ %conv = sext i8 %0 to i32
+ tail call void @llvm.dbg.value(metadata %class.C* %c, i64 0, metadata !18, metadata !29)
+ %call = call i32 (%class.C*, i8, i8, i8, ...)* @test_function(%class.C* %c, i8 signext 0, i8 signext %0, i8 signext 0, i32 %conv)
+ %1 = load i8* @argc, align 1
+ call void @llvm.dbg.value(metadata %class.C* %c, i64 0, metadata !18, metadata !29)
+ %call2 = call i32 (%class.C*, i8, i8, i8, ...)* @test_function(%class.C* %c, i8 signext 0, i8 signext %1, i8 signext 0, i32 %conv)
+ ret void
+}
+
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!22, !23}
+
+!0 = !{!"", !1, !2, !3, !12, !20, !2} ; [ DW_TAG_compile_unit ] [test.cpp] [DW_LANG_C_plus_plus]
+!1 = !{!"test.cpp", !""}
+!2 = !{}
+!3 = !{!4}
+!4 = !{!"0x2\00C\002\008\008\000\000\000", !1, null, null, !5, null, null, !"_ZTS1C"} ; [ DW_TAG_class_type ] [C] [line 2, size 8, align 8, offset 0] [def] [from ]
+!5 = !{!6}
+!6 = !{!"", !1, !"_ZTS1C", !7, null, null, null, null, null} ; [ DW_TAG_subprogram ] [line 4] [public] [test]
+!7 = !{!"", null, null, null, !8, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!8 = !{!9, !10, !11, !11, !11, null}
+!9 = !{!"", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!10 = !{!"", null, null, !"_ZTS1C"} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from _ZTS1C]
+!11 = !{!"0x24\00char\000\008\008\000\000\006", null, null} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char]
+!12 = !{!13}
+!13 = !{!"0x2e\00test_with_debug\00test_with_debug\00test_with_debug\006\000\001\000\000\00256\001\006", !1, !14, !15, null, void ()* @test_with_debug, null, null, !17} ; [ DW_TAG_subprogram ] [line 6] [def] [test_with_debug]
+!14 = !{!"0x29", !1}
+!15 = !{!"0x15\00\000\000\000\000\000\000", null, null, null, !16, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!16 = !{null}
+!17 = !{!18, !19}
+!18 = !{!"0x100\00c\007\000", !13, !14, !"_ZTS1C"} ; [ DW_TAG_auto_variable ] [c] [line 7]
+!19 = !{!"0x100\00lc\008\000", !13, !14, !11} ; [ DW_TAG_auto_variable ] [lc] [line 8]
+!20 = !{!21}
+!21 = !{!"0x34\00argc\00argc\00\001\000\001", null, !14, !11, i8* @argc, null} ; [ DW_TAG_variable ] [argc] [line 1] [def]
+!22 = !{i32 2, !"Dwarf Version", i32 4}
+!23 = !{i32 2, !"Debug Info Version", i32 2}
+!25 = !MDLocation(line: 8, column: 3, scope: !13)
+!29 = !{!"0x102"} ; [ DW_TAG_expression ]
diff --git a/test/CodeGen/X86/misched-copy.ll b/test/CodeGen/X86/misched-copy.ll
index 4485b8a..3e37292 100644
--- a/test/CodeGen/X86/misched-copy.ll
+++ b/test/CodeGen/X86/misched-copy.ll
@@ -1,5 +1,5 @@
; REQUIRES: asserts
-; RUN: llc < %s -march=x86 -mcpu=core2 -pre-RA-sched=source -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -march=x86 -mcpu=core2 -pre-RA-sched=source -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
;
; Test scheduling of copy instructions.
;
@@ -44,6 +44,6 @@ end:
attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-!0 = metadata !{metadata !"float", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!0 = !{!"float", !1}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/X86/misched-crash.ll b/test/CodeGen/X86/misched-crash.ll
index 7644ee0..21c3fa3 100644
--- a/test/CodeGen/X86/misched-crash.ll
+++ b/test/CodeGen/X86/misched-crash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -enable-misched -verify-misched
+; RUN: llc < %s -verify-machineinstrs -enable-misched -verify-misched
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10"
diff --git a/test/CodeGen/X86/mmx-arg-passing-x86-64.ll b/test/CodeGen/X86/mmx-arg-passing-x86-64.ll
new file mode 100644
index 0000000..c536a39
--- /dev/null
+++ b/test/CodeGen/X86/mmx-arg-passing-x86-64.ll
@@ -0,0 +1,56 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86-64
+;
+; On Darwin x86-64, v8i8, v4i16, v2i32 values are passed in XMM[0-7].
+; On Darwin x86-64, v1i64 values are passed in 64-bit GPRs.
+
+@g_v8qi = external global <8 x i8>
+
+define void @t3() nounwind {
+; X86-64-LABEL: t3:
+; X86-64: ## BB#0:
+; X86-64-NEXT: movq _g_v8qi@{{.*}}(%rip), %rax
+; X86-64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-64-NEXT: movb $1, %al
+; X86-64-NEXT: jmp _pass_v8qi ## TAILCALL
+ %tmp3 = load <8 x i8>* @g_v8qi, align 8
+ %tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx
+ %tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind
+ ret void
+}
+
+define void @t4(x86_mmx %v1, x86_mmx %v2) nounwind {
+; X86-64-LABEL: t4:
+; X86-64: ## BB#0:
+; X86-64-NEXT: movdq2q %xmm1, %mm0
+; X86-64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
+; X86-64-NEXT: movdq2q %xmm0, %mm0
+; X86-64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
+; X86-64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86-64-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; X86-64-NEXT: paddb %xmm0, %xmm1
+; X86-64-NEXT: movd %xmm1, %rax
+; X86-64-NEXT: movd %rax, %xmm0
+; X86-64-NEXT: movb $1, %al
+; X86-64-NEXT: jmp _pass_v8qi ## TAILCALL
+ %v1a = bitcast x86_mmx %v1 to <8 x i8>
+ %v2b = bitcast x86_mmx %v2 to <8 x i8>
+ %tmp3 = add <8 x i8> %v1a, %v2b
+ %tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx
+ %tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind
+ ret void
+}
+
+define void @t5() nounwind {
+; X86-64-LABEL: t5:
+; X86-64: ## BB#0:
+; X86-64-NEXT: pushq %rax
+; X86-64-NEXT: xorl %edi, %edi
+; X86-64-NEXT: callq _pass_v1di
+; X86-64-NEXT: popq %rax
+; X86-64-NEXT: retq
+ call void @pass_v1di( <1 x i64> zeroinitializer )
+ ret void
+}
+
+declare i32 @pass_v8qi(...)
+declare void @pass_v1di(<1 x i64>)
diff --git a/test/CodeGen/X86/mmx-arg-passing.ll b/test/CodeGen/X86/mmx-arg-passing.ll
index 3a0fb95..4e00310 100644
--- a/test/CodeGen/X86/mmx-arg-passing.ll
+++ b/test/CodeGen/X86/mmx-arg-passing.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+mmx | FileCheck %s -check-prefix=X86-32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | FileCheck %s -check-prefix=X86-64
+; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+mmx | FileCheck %s --check-prefix=X86-32
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86-64
;
; On Darwin x86-32, v8i8, v4i16, v2i32 values are passed in MM[0-2].
; On Darwin x86-32, v1i64 values are passed in memory. In this example, they
@@ -10,29 +10,40 @@
@u1 = external global x86_mmx
define void @t1(x86_mmx %v1) nounwind {
- store x86_mmx %v1, x86_mmx* @u1, align 8
- ret void
-
; X86-32-LABEL: t1:
-; X86-32: movq %mm0
-
+; X86-32: ## BB#0:
+; X86-32-NEXT: movl L_u1$non_lazy_ptr, %eax
+; X86-32-NEXT: movq %mm0, (%eax)
+; X86-32-NEXT: retl
+;
; X86-64-LABEL: t1:
-; X86-64: movdq2q %xmm0
-; X86-64: movq %mm0
+; X86-64: ## BB#0:
+; X86-64-NEXT: movdq2q %xmm0, %mm0
+; X86-64-NEXT: movq _u1@{{.*}}(%rip), %rax
+; X86-64-NEXT: movq %mm0, (%rax)
+; X86-64-NEXT: retq
+ store x86_mmx %v1, x86_mmx* @u1, align 8
+ ret void
}
@u2 = external global x86_mmx
define void @t2(<1 x i64> %v1) nounwind {
+; X86-32-LABEL: t2:
+; X86-32: ## BB#0:
+; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-32-NEXT: movl L_u2$non_lazy_ptr, %edx
+; X86-32-NEXT: movl %ecx, 4(%edx)
+; X86-32-NEXT: movl %eax, (%edx)
+; X86-32-NEXT: retl
+;
+; X86-64-LABEL: t2:
+; X86-64: ## BB#0:
+; X86-64-NEXT: movq _u2@{{.*}}(%rip), %rax
+; X86-64-NEXT: movq %rdi, (%rax)
+; X86-64-NEXT: retq
%tmp = bitcast <1 x i64> %v1 to x86_mmx
store x86_mmx %tmp, x86_mmx* @u2, align 8
ret void
-
-; X86-32-LABEL: t2:
-; X86-32: movl 4(%esp)
-; X86-32: movl 8(%esp)
-
-; X86-64-LABEL: t2:
-; X86-64: movq %rdi
}
-
diff --git a/test/CodeGen/X86/mmx-arg-passing2.ll b/test/CodeGen/X86/mmx-arg-passing2.ll
deleted file mode 100644
index c132d31..0000000
--- a/test/CodeGen/X86/mmx-arg-passing2.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movdq2q | count 2
-; Since the add is not an MMX add, we don't have a movq2dq any more.
-
-@g_v8qi = external global <8 x i8>
-
-define void @t1() nounwind {
- %tmp3 = load <8 x i8>* @g_v8qi, align 8
- %tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx
- %tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind
- ret void
-}
-
-define void @t2(x86_mmx %v1, x86_mmx %v2) nounwind {
- %v1a = bitcast x86_mmx %v1 to <8 x i8>
- %v2b = bitcast x86_mmx %v2 to <8 x i8>
- %tmp3 = add <8 x i8> %v1a, %v2b
- %tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx
- %tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind
- ret void
-}
-
-define void @t3() nounwind {
- call void @pass_v1di( <1 x i64> zeroinitializer )
- ret void
-}
-
-declare i32 @pass_v8qi(...)
-declare void @pass_v1di(<1 x i64>)
diff --git a/test/CodeGen/X86/mmx-arith.ll b/test/CodeGen/X86/mmx-arith.ll
index 6817487..d9d1fbf 100644
--- a/test/CodeGen/X86/mmx-arith.ll
+++ b/test/CodeGen/X86/mmx-arith.ll
@@ -1,309 +1,308 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx
+; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | FileCheck -check-prefix=X32 %s
+; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck -check-prefix=X64 %s
;; A basic sanity check to make sure that MMX arithmetic actually compiles.
;; First is a straight translation of the original with bitcasts as needed.
-define void @foo(x86_mmx* %A, x86_mmx* %B) {
+; X32-LABEL: test0
+; X64-LABEL: test0
+define void @test0(x86_mmx* %A, x86_mmx* %B) {
entry:
- %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
- %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp1a = bitcast x86_mmx %tmp1 to <8 x i8>
- %tmp3a = bitcast x86_mmx %tmp3 to <8 x i8>
- %tmp4 = add <8 x i8> %tmp1a, %tmp3a ; <<8 x i8>> [#uses=2]
- %tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx
- store x86_mmx %tmp4a, x86_mmx* %A
- %tmp7 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b( x86_mmx %tmp4a, x86_mmx %tmp7 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp12, x86_mmx* %A
- %tmp16 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b( x86_mmx %tmp12, x86_mmx %tmp16 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp21, x86_mmx* %A
- %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp21a = bitcast x86_mmx %tmp21 to <8 x i8>
- %tmp27a = bitcast x86_mmx %tmp27 to <8 x i8>
- %tmp28 = sub <8 x i8> %tmp21a, %tmp27a ; <<8 x i8>> [#uses=2]
- %tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx
- store x86_mmx %tmp28a, x86_mmx* %A
- %tmp31 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b( x86_mmx %tmp28a, x86_mmx %tmp31 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp36, x86_mmx* %A
- %tmp40 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b( x86_mmx %tmp36, x86_mmx %tmp40 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp45, x86_mmx* %A
- %tmp51 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp45a = bitcast x86_mmx %tmp45 to <8 x i8>
- %tmp51a = bitcast x86_mmx %tmp51 to <8 x i8>
- %tmp52 = mul <8 x i8> %tmp45a, %tmp51a ; <<8 x i8>> [#uses=2]
- %tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx
- store x86_mmx %tmp52a, x86_mmx* %A
- %tmp57 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp57a = bitcast x86_mmx %tmp57 to <8 x i8>
- %tmp58 = and <8 x i8> %tmp52, %tmp57a ; <<8 x i8>> [#uses=2]
- %tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx
- store x86_mmx %tmp58a, x86_mmx* %A
- %tmp63 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp63a = bitcast x86_mmx %tmp63 to <8 x i8>
- %tmp64 = or <8 x i8> %tmp58, %tmp63a ; <<8 x i8>> [#uses=2]
- %tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx
- store x86_mmx %tmp64a, x86_mmx* %A
- %tmp69 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp69a = bitcast x86_mmx %tmp69 to <8 x i8>
- %tmp64b = bitcast x86_mmx %tmp64a to <8 x i8>
- %tmp70 = xor <8 x i8> %tmp64b, %tmp69a ; <<8 x i8>> [#uses=1]
- %tmp70a = bitcast <8 x i8> %tmp70 to x86_mmx
- store x86_mmx %tmp70a, x86_mmx* %A
- tail call void @llvm.x86.mmx.emms( )
- ret void
+ %tmp1 = load x86_mmx* %A
+ %tmp3 = load x86_mmx* %B
+ %tmp1a = bitcast x86_mmx %tmp1 to <8 x i8>
+ %tmp3a = bitcast x86_mmx %tmp3 to <8 x i8>
+ %tmp4 = add <8 x i8> %tmp1a, %tmp3a
+ %tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx
+ store x86_mmx %tmp4a, x86_mmx* %A
+ %tmp7 = load x86_mmx* %B
+ %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b(x86_mmx %tmp4a, x86_mmx %tmp7)
+ store x86_mmx %tmp12, x86_mmx* %A
+ %tmp16 = load x86_mmx* %B
+ %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %tmp12, x86_mmx %tmp16)
+ store x86_mmx %tmp21, x86_mmx* %A
+ %tmp27 = load x86_mmx* %B
+ %tmp21a = bitcast x86_mmx %tmp21 to <8 x i8>
+ %tmp27a = bitcast x86_mmx %tmp27 to <8 x i8>
+ %tmp28 = sub <8 x i8> %tmp21a, %tmp27a
+ %tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx
+ store x86_mmx %tmp28a, x86_mmx* %A
+ %tmp31 = load x86_mmx* %B
+ %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx %tmp28a, x86_mmx %tmp31)
+ store x86_mmx %tmp36, x86_mmx* %A
+ %tmp40 = load x86_mmx* %B
+ %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx %tmp36, x86_mmx %tmp40)
+ store x86_mmx %tmp45, x86_mmx* %A
+ %tmp51 = load x86_mmx* %B
+ %tmp45a = bitcast x86_mmx %tmp45 to <8 x i8>
+ %tmp51a = bitcast x86_mmx %tmp51 to <8 x i8>
+ %tmp52 = mul <8 x i8> %tmp45a, %tmp51a
+ %tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx
+ store x86_mmx %tmp52a, x86_mmx* %A
+ %tmp57 = load x86_mmx* %B
+ %tmp57a = bitcast x86_mmx %tmp57 to <8 x i8>
+ %tmp58 = and <8 x i8> %tmp52, %tmp57a
+ %tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx
+ store x86_mmx %tmp58a, x86_mmx* %A
+ %tmp63 = load x86_mmx* %B
+ %tmp63a = bitcast x86_mmx %tmp63 to <8 x i8>
+ %tmp64 = or <8 x i8> %tmp58, %tmp63a
+ %tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx
+ store x86_mmx %tmp64a, x86_mmx* %A
+ %tmp69 = load x86_mmx* %B
+ %tmp69a = bitcast x86_mmx %tmp69 to <8 x i8>
+ %tmp64b = bitcast x86_mmx %tmp64a to <8 x i8>
+ %tmp70 = xor <8 x i8> %tmp64b, %tmp69a
+ %tmp70a = bitcast <8 x i8> %tmp70 to x86_mmx
+ store x86_mmx %tmp70a, x86_mmx* %A
+ tail call void @llvm.x86.mmx.emms()
+ ret void
}
-define void @baz(x86_mmx* %A, x86_mmx* %B) {
+; X32-LABEL: test1
+; X64-LABEL: test1
+define void @test1(x86_mmx* %A, x86_mmx* %B) {
entry:
- %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
- %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp1a = bitcast x86_mmx %tmp1 to <2 x i32>
- %tmp3a = bitcast x86_mmx %tmp3 to <2 x i32>
- %tmp4 = add <2 x i32> %tmp1a, %tmp3a ; <<2 x i32>> [#uses=2]
- %tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx
- store x86_mmx %tmp4a, x86_mmx* %A
- %tmp9 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp9a = bitcast x86_mmx %tmp9 to <2 x i32>
- %tmp10 = sub <2 x i32> %tmp4, %tmp9a ; <<2 x i32>> [#uses=2]
- %tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx
- store x86_mmx %tmp10a, x86_mmx* %A
- %tmp15 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp10b = bitcast x86_mmx %tmp10a to <2 x i32>
- %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
- %tmp16 = mul <2 x i32> %tmp10b, %tmp15a ; <<2 x i32>> [#uses=2]
- %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
- store x86_mmx %tmp16a, x86_mmx* %A
- %tmp21 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp16b = bitcast x86_mmx %tmp16a to <2 x i32>
- %tmp21a = bitcast x86_mmx %tmp21 to <2 x i32>
- %tmp22 = and <2 x i32> %tmp16b, %tmp21a ; <<2 x i32>> [#uses=2]
- %tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx
- store x86_mmx %tmp22a, x86_mmx* %A
- %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp22b = bitcast x86_mmx %tmp22a to <2 x i32>
- %tmp27a = bitcast x86_mmx %tmp27 to <2 x i32>
- %tmp28 = or <2 x i32> %tmp22b, %tmp27a ; <<2 x i32>> [#uses=2]
- %tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx
- store x86_mmx %tmp28a, x86_mmx* %A
- %tmp33 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp28b = bitcast x86_mmx %tmp28a to <2 x i32>
- %tmp33a = bitcast x86_mmx %tmp33 to <2 x i32>
- %tmp34 = xor <2 x i32> %tmp28b, %tmp33a ; <<2 x i32>> [#uses=1]
- %tmp34a = bitcast <2 x i32> %tmp34 to x86_mmx
- store x86_mmx %tmp34a, x86_mmx* %A
- tail call void @llvm.x86.mmx.emms( )
- ret void
+ %tmp1 = load x86_mmx* %A
+ %tmp3 = load x86_mmx* %B
+ %tmp1a = bitcast x86_mmx %tmp1 to <2 x i32>
+ %tmp3a = bitcast x86_mmx %tmp3 to <2 x i32>
+ %tmp4 = add <2 x i32> %tmp1a, %tmp3a
+ %tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx
+ store x86_mmx %tmp4a, x86_mmx* %A
+ %tmp9 = load x86_mmx* %B
+ %tmp9a = bitcast x86_mmx %tmp9 to <2 x i32>
+ %tmp10 = sub <2 x i32> %tmp4, %tmp9a
+ %tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx
+ store x86_mmx %tmp10a, x86_mmx* %A
+ %tmp15 = load x86_mmx* %B
+ %tmp10b = bitcast x86_mmx %tmp10a to <2 x i32>
+ %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
+ %tmp16 = mul <2 x i32> %tmp10b, %tmp15a
+ %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
+ store x86_mmx %tmp16a, x86_mmx* %A
+ %tmp21 = load x86_mmx* %B
+ %tmp16b = bitcast x86_mmx %tmp16a to <2 x i32>
+ %tmp21a = bitcast x86_mmx %tmp21 to <2 x i32>
+ %tmp22 = and <2 x i32> %tmp16b, %tmp21a
+ %tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx
+ store x86_mmx %tmp22a, x86_mmx* %A
+ %tmp27 = load x86_mmx* %B
+ %tmp22b = bitcast x86_mmx %tmp22a to <2 x i32>
+ %tmp27a = bitcast x86_mmx %tmp27 to <2 x i32>
+ %tmp28 = or <2 x i32> %tmp22b, %tmp27a
+ %tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx
+ store x86_mmx %tmp28a, x86_mmx* %A
+ %tmp33 = load x86_mmx* %B
+ %tmp28b = bitcast x86_mmx %tmp28a to <2 x i32>
+ %tmp33a = bitcast x86_mmx %tmp33 to <2 x i32>
+ %tmp34 = xor <2 x i32> %tmp28b, %tmp33a
+ %tmp34a = bitcast <2 x i32> %tmp34 to x86_mmx
+ store x86_mmx %tmp34a, x86_mmx* %A
+ tail call void @llvm.x86.mmx.emms( )
+ ret void
}
-define void @bar(x86_mmx* %A, x86_mmx* %B) {
+; X32-LABEL: test2
+; X64-LABEL: test2
+define void @test2(x86_mmx* %A, x86_mmx* %B) {
entry:
- %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
- %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp1a = bitcast x86_mmx %tmp1 to <4 x i16>
- %tmp3a = bitcast x86_mmx %tmp3 to <4 x i16>
- %tmp4 = add <4 x i16> %tmp1a, %tmp3a ; <<4 x i16>> [#uses=2]
- %tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx
- store x86_mmx %tmp4a, x86_mmx* %A
- %tmp7 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w( x86_mmx %tmp4a, x86_mmx %tmp7 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp12, x86_mmx* %A
- %tmp16 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp12, x86_mmx %tmp16 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp21, x86_mmx* %A
- %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp21a = bitcast x86_mmx %tmp21 to <4 x i16>
- %tmp27a = bitcast x86_mmx %tmp27 to <4 x i16>
- %tmp28 = sub <4 x i16> %tmp21a, %tmp27a ; <<4 x i16>> [#uses=2]
- %tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx
- store x86_mmx %tmp28a, x86_mmx* %A
- %tmp31 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w( x86_mmx %tmp28a, x86_mmx %tmp31 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp36, x86_mmx* %A
- %tmp40 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w( x86_mmx %tmp36, x86_mmx %tmp40 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp45, x86_mmx* %A
- %tmp51 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp45a = bitcast x86_mmx %tmp45 to <4 x i16>
- %tmp51a = bitcast x86_mmx %tmp51 to <4 x i16>
- %tmp52 = mul <4 x i16> %tmp45a, %tmp51a ; <<4 x i16>> [#uses=2]
- %tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx
- store x86_mmx %tmp52a, x86_mmx* %A
- %tmp55 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w( x86_mmx %tmp52a, x86_mmx %tmp55 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp60, x86_mmx* %A
- %tmp64 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 ) ; <x86_mmx> [#uses=1]
- %tmp70 = bitcast x86_mmx %tmp69 to x86_mmx ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp70, x86_mmx* %A
- %tmp75 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp70a = bitcast x86_mmx %tmp70 to <4 x i16>
- %tmp75a = bitcast x86_mmx %tmp75 to <4 x i16>
- %tmp76 = and <4 x i16> %tmp70a, %tmp75a ; <<4 x i16>> [#uses=2]
- %tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx
- store x86_mmx %tmp76a, x86_mmx* %A
- %tmp81 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp76b = bitcast x86_mmx %tmp76a to <4 x i16>
- %tmp81a = bitcast x86_mmx %tmp81 to <4 x i16>
- %tmp82 = or <4 x i16> %tmp76b, %tmp81a ; <<4 x i16>> [#uses=2]
- %tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx
- store x86_mmx %tmp82a, x86_mmx* %A
- %tmp87 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp82b = bitcast x86_mmx %tmp82a to <4 x i16>
- %tmp87a = bitcast x86_mmx %tmp87 to <4 x i16>
- %tmp88 = xor <4 x i16> %tmp82b, %tmp87a ; <<4 x i16>> [#uses=1]
- %tmp88a = bitcast <4 x i16> %tmp88 to x86_mmx
- store x86_mmx %tmp88a, x86_mmx* %A
- tail call void @llvm.x86.mmx.emms( )
- ret void
+ %tmp1 = load x86_mmx* %A
+ %tmp3 = load x86_mmx* %B
+ %tmp1a = bitcast x86_mmx %tmp1 to <4 x i16>
+ %tmp3a = bitcast x86_mmx %tmp3 to <4 x i16>
+ %tmp4 = add <4 x i16> %tmp1a, %tmp3a
+ %tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx
+ store x86_mmx %tmp4a, x86_mmx* %A
+ %tmp7 = load x86_mmx* %B
+ %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w(x86_mmx %tmp4a, x86_mmx %tmp7)
+ store x86_mmx %tmp12, x86_mmx* %A
+ %tmp16 = load x86_mmx* %B
+ %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %tmp12, x86_mmx %tmp16)
+ store x86_mmx %tmp21, x86_mmx* %A
+ %tmp27 = load x86_mmx* %B
+ %tmp21a = bitcast x86_mmx %tmp21 to <4 x i16>
+ %tmp27a = bitcast x86_mmx %tmp27 to <4 x i16>
+ %tmp28 = sub <4 x i16> %tmp21a, %tmp27a
+ %tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx
+ store x86_mmx %tmp28a, x86_mmx* %A
+ %tmp31 = load x86_mmx* %B
+ %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx %tmp28a, x86_mmx %tmp31)
+ store x86_mmx %tmp36, x86_mmx* %A
+ %tmp40 = load x86_mmx* %B
+ %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx %tmp36, x86_mmx %tmp40)
+ store x86_mmx %tmp45, x86_mmx* %A
+ %tmp51 = load x86_mmx* %B
+ %tmp45a = bitcast x86_mmx %tmp45 to <4 x i16>
+ %tmp51a = bitcast x86_mmx %tmp51 to <4 x i16>
+ %tmp52 = mul <4 x i16> %tmp45a, %tmp51a
+ %tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx
+ store x86_mmx %tmp52a, x86_mmx* %A
+ %tmp55 = load x86_mmx* %B
+ %tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx %tmp52a, x86_mmx %tmp55)
+ store x86_mmx %tmp60, x86_mmx* %A
+ %tmp64 = load x86_mmx* %B
+ %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx %tmp60, x86_mmx %tmp64)
+ %tmp70 = bitcast x86_mmx %tmp69 to x86_mmx
+ store x86_mmx %tmp70, x86_mmx* %A
+ %tmp75 = load x86_mmx* %B
+ %tmp70a = bitcast x86_mmx %tmp70 to <4 x i16>
+ %tmp75a = bitcast x86_mmx %tmp75 to <4 x i16>
+ %tmp76 = and <4 x i16> %tmp70a, %tmp75a
+ %tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx
+ store x86_mmx %tmp76a, x86_mmx* %A
+ %tmp81 = load x86_mmx* %B
+ %tmp76b = bitcast x86_mmx %tmp76a to <4 x i16>
+ %tmp81a = bitcast x86_mmx %tmp81 to <4 x i16>
+ %tmp82 = or <4 x i16> %tmp76b, %tmp81a
+ %tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx
+ store x86_mmx %tmp82a, x86_mmx* %A
+ %tmp87 = load x86_mmx* %B
+ %tmp82b = bitcast x86_mmx %tmp82a to <4 x i16>
+ %tmp87a = bitcast x86_mmx %tmp87 to <4 x i16>
+ %tmp88 = xor <4 x i16> %tmp82b, %tmp87a
+ %tmp88a = bitcast <4 x i16> %tmp88 to x86_mmx
+ store x86_mmx %tmp88a, x86_mmx* %A
+ tail call void @llvm.x86.mmx.emms( )
+ ret void
}
-;; The following is modified to use MMX intrinsics everywhere they work.
+; X32-LABEL: test3
+define <1 x i64> @test3(<1 x i64>* %a, <1 x i64>* %b, i32 %count) nounwind {
+entry:
+ %tmp2942 = icmp eq i32 %count, 0
+ br i1 %tmp2942, label %bb31, label %bb26
+
+bb26:
+; X32: addl
+; X32: adcl
+ %i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ]
+ %sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]
+ %tmp13 = getelementptr <1 x i64>* %b, i32 %i.037.0
+ %tmp14 = load <1 x i64>* %tmp13
+ %tmp18 = getelementptr <1 x i64>* %a, i32 %i.037.0
+ %tmp19 = load <1 x i64>* %tmp18
+ %tmp21 = add <1 x i64> %tmp19, %tmp14
+ %tmp22 = add <1 x i64> %tmp21, %sum.035.0
+ %tmp25 = add i32 %i.037.0, 1
+ %tmp29 = icmp ult i32 %tmp25, %count
+ br i1 %tmp29, label %bb26, label %bb31
+
+bb31:
+ %sum.035.1 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]
+ ret <1 x i64> %sum.035.1
+}
-define void @fooa(x86_mmx* %A, x86_mmx* %B) {
+; There are no MMX operations here, so we use XMM or i64.
+; X64-LABEL: ti8
+define void @ti8(double %a, double %b) nounwind {
entry:
- %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
- %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.b( x86_mmx %tmp1, x86_mmx %tmp3 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp4, x86_mmx* %A
- %tmp7 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b( x86_mmx %tmp4, x86_mmx %tmp7 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp12, x86_mmx* %A
- %tmp16 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b( x86_mmx %tmp12, x86_mmx %tmp16 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp21, x86_mmx* %A
- %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp28 = tail call x86_mmx @llvm.x86.mmx.psub.b( x86_mmx %tmp21, x86_mmx %tmp27 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp28, x86_mmx* %A
- %tmp31 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b( x86_mmx %tmp28, x86_mmx %tmp31 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp36, x86_mmx* %A
- %tmp40 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b( x86_mmx %tmp36, x86_mmx %tmp40 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp45, x86_mmx* %A
- %tmp51 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp51a = bitcast x86_mmx %tmp51 to i64
- %tmp51aa = bitcast i64 %tmp51a to <8 x i8>
- %tmp51b = bitcast x86_mmx %tmp45 to <8 x i8>
- %tmp52 = mul <8 x i8> %tmp51b, %tmp51aa ; <x86_mmx> [#uses=2]
- %tmp52a = bitcast <8 x i8> %tmp52 to i64
- %tmp52aa = bitcast i64 %tmp52a to x86_mmx
- store x86_mmx %tmp52aa, x86_mmx* %A
- %tmp57 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp58 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp51, x86_mmx %tmp57 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp58, x86_mmx* %A
- %tmp63 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp64 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp58, x86_mmx %tmp63 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp64, x86_mmx* %A
- %tmp69 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp70 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp64, x86_mmx %tmp69 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp70, x86_mmx* %A
- tail call void @llvm.x86.mmx.emms( )
- ret void
+ %tmp1 = bitcast double %a to <8 x i8>
+ %tmp2 = bitcast double %b to <8 x i8>
+ %tmp3 = add <8 x i8> %tmp1, %tmp2
+; X64: paddb
+ store <8 x i8> %tmp3, <8 x i8>* null
+ ret void
}
-define void @baza(x86_mmx* %A, x86_mmx* %B) {
+; X64-LABEL: ti16
+define void @ti16(double %a, double %b) nounwind {
entry:
- %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
- %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.d( x86_mmx %tmp1, x86_mmx %tmp3 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp4, x86_mmx* %A
- %tmp9 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp10 = tail call x86_mmx @llvm.x86.mmx.psub.d( x86_mmx %tmp4, x86_mmx %tmp9 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp10, x86_mmx* %A
- %tmp15 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp10a = bitcast x86_mmx %tmp10 to <2 x i32>
- %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
- %tmp16 = mul <2 x i32> %tmp10a, %tmp15a ; <x86_mmx> [#uses=2]
- %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
- store x86_mmx %tmp16a, x86_mmx* %A
- %tmp21 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp22 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp16a, x86_mmx %tmp21 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp22, x86_mmx* %A
- %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp28 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp22, x86_mmx %tmp27 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp28, x86_mmx* %A
- %tmp33 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp34 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp28, x86_mmx %tmp33 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp34, x86_mmx* %A
- tail call void @llvm.x86.mmx.emms( )
- ret void
+ %tmp1 = bitcast double %a to <4 x i16>
+ %tmp2 = bitcast double %b to <4 x i16>
+ %tmp3 = add <4 x i16> %tmp1, %tmp2
+; X64: paddw
+ store <4 x i16> %tmp3, <4 x i16>* null
+ ret void
}
-define void @bara(x86_mmx* %A, x86_mmx* %B) {
+; X64-LABEL: ti32
+define void @ti32(double %a, double %b) nounwind {
entry:
- %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
- %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.w( x86_mmx %tmp1, x86_mmx %tmp3 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp4, x86_mmx* %A
- %tmp7 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w( x86_mmx %tmp4, x86_mmx %tmp7 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp12, x86_mmx* %A
- %tmp16 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp12, x86_mmx %tmp16 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp21, x86_mmx* %A
- %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp28 = tail call x86_mmx @llvm.x86.mmx.psub.w( x86_mmx %tmp21, x86_mmx %tmp27 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp28, x86_mmx* %A
- %tmp31 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w( x86_mmx %tmp28, x86_mmx %tmp31 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp36, x86_mmx* %A
- %tmp40 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w( x86_mmx %tmp36, x86_mmx %tmp40 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp45, x86_mmx* %A
- %tmp51 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp52 = tail call x86_mmx @llvm.x86.mmx.pmull.w( x86_mmx %tmp45, x86_mmx %tmp51 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp52, x86_mmx* %A
- %tmp55 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w( x86_mmx %tmp52, x86_mmx %tmp55 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp60, x86_mmx* %A
- %tmp64 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 ) ; <x86_mmx> [#uses=1]
- %tmp70 = bitcast x86_mmx %tmp69 to x86_mmx ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp70, x86_mmx* %A
- %tmp75 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp76 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp70, x86_mmx %tmp75 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp76, x86_mmx* %A
- %tmp81 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp82 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp76, x86_mmx %tmp81 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp82, x86_mmx* %A
- %tmp87 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
- %tmp88 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp82, x86_mmx %tmp87 ) ; <x86_mmx> [#uses=2]
- store x86_mmx %tmp88, x86_mmx* %A
- tail call void @llvm.x86.mmx.emms( )
- ret void
+ %tmp1 = bitcast double %a to <2 x i32>
+ %tmp2 = bitcast double %b to <2 x i32>
+ %tmp3 = add <2 x i32> %tmp1, %tmp2
+; X64: paddd
+ store <2 x i32> %tmp3, <2 x i32>* null
+ ret void
}
-declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx)
+; X64-LABEL: ti64
+define void @ti64(double %a, double %b) nounwind {
+entry:
+ %tmp1 = bitcast double %a to <1 x i64>
+ %tmp2 = bitcast double %b to <1 x i64>
+ %tmp3 = add <1 x i64> %tmp1, %tmp2
+; X64: addq
+ store <1 x i64> %tmp3, <1 x i64>* null
+ ret void
+}
-declare x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx, x86_mmx)
+; MMX intrinsics calls get us MMX instructions.
+; X64-LABEL: ti8a
+define void @ti8a(double %a, double %b) nounwind {
+entry:
+ %tmp1 = bitcast double %a to x86_mmx
+; X64: movdq2q
+ %tmp2 = bitcast double %b to x86_mmx
+; X64: movdq2q
+ %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2)
+ store x86_mmx %tmp3, x86_mmx* null
+ ret void
+}
-declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
+; X64-LABEL: ti16a
+define void @ti16a(double %a, double %b) nounwind {
+entry:
+ %tmp1 = bitcast double %a to x86_mmx
+; X64: movdq2q
+ %tmp2 = bitcast double %b to x86_mmx
+; X64: movdq2q
+ %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2)
+ store x86_mmx %tmp3, x86_mmx* null
+ ret void
+}
-declare x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx, x86_mmx)
+; X64-LABEL: ti32a
+define void @ti32a(double %a, double %b) nounwind {
+entry:
+ %tmp1 = bitcast double %a to x86_mmx
+; X64: movdq2q
+ %tmp2 = bitcast double %b to x86_mmx
+; X64: movdq2q
+ %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2)
+ store x86_mmx %tmp3, x86_mmx* null
+ ret void
+}
-declare x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx, x86_mmx)
+; X64-LABEL: ti64a
+define void @ti64a(double %a, double %b) nounwind {
+entry:
+ %tmp1 = bitcast double %a to x86_mmx
+; X64: movdq2q
+ %tmp2 = bitcast double %b to x86_mmx
+; X64: movdq2q
+ %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2)
+ store x86_mmx %tmp3, x86_mmx* null
+ ret void
+}
+
+declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx, x86_mmx)
declare void @llvm.x86.mmx.emms()
-declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padds.b(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padds.w(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.padds.d(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.psubs.d(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.psub.b(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.psub.w(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.psub.d(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.pmull.w(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.pand(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.por(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.pxor(x86_mmx, x86_mmx)
diff --git a/test/CodeGen/X86/mmx-bitcast-to-i64.ll b/test/CodeGen/X86/mmx-bitcast-to-i64.ll
deleted file mode 100644
index 8b1840a..0000000
--- a/test/CodeGen/X86/mmx-bitcast-to-i64.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movd | count 4
-
-define i64 @foo(x86_mmx* %p) {
- %t = load x86_mmx* %p
- %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t)
- %s = bitcast x86_mmx %u to i64
- ret i64 %s
-}
-define i64 @goo(x86_mmx* %p) {
- %t = load x86_mmx* %p
- %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t)
- %s = bitcast x86_mmx %u to i64
- ret i64 %s
-}
-define i64 @hoo(x86_mmx* %p) {
- %t = load x86_mmx* %p
- %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %t)
- %s = bitcast x86_mmx %u to i64
- ret i64 %s
-}
-define i64 @ioo(x86_mmx* %p) {
- %t = load x86_mmx* %p
- %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %t)
- %s = bitcast x86_mmx %u to i64
- ret i64 %s
-}
-
-declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
-declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
diff --git a/test/CodeGen/X86/mmx-bitcast.ll b/test/CodeGen/X86/mmx-bitcast.ll
new file mode 100644
index 0000000..a2eb96a
--- /dev/null
+++ b/test/CodeGen/X86/mmx-bitcast.ll
@@ -0,0 +1,109 @@
+; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse2 | FileCheck %s
+
+define i64 @t0(x86_mmx* %p) {
+; CHECK-LABEL: t0:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movq
+; CHECK-NEXT: paddq %mm0, %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+ %t = load x86_mmx* %p
+ %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t)
+ %s = bitcast x86_mmx %u to i64
+ ret i64 %s
+}
+
+define i64 @t1(x86_mmx* %p) {
+; CHECK-LABEL: t1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movq
+; CHECK-NEXT: paddd %mm0, %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+ %t = load x86_mmx* %p
+ %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t)
+ %s = bitcast x86_mmx %u to i64
+ ret i64 %s
+}
+
+define i64 @t2(x86_mmx* %p) {
+; CHECK-LABEL: t2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movq
+; CHECK-NEXT: paddw %mm0, %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+ %t = load x86_mmx* %p
+ %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %t)
+ %s = bitcast x86_mmx %u to i64
+ ret i64 %s
+}
+
+define i64 @t3(x86_mmx* %p) {
+; CHECK-LABEL: t3:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movq
+; CHECK-NEXT: paddb %mm0, %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+ %t = load x86_mmx* %p
+ %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %t)
+ %s = bitcast x86_mmx %u to i64
+ ret i64 %s
+}
+
+@R = external global x86_mmx
+
+define void @t4(<1 x i64> %A, <1 x i64> %B) {
+; CHECK-LABEL: t4:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: movd
+; CHECK-NEXT: movd
+; CHECK: retq
+entry:
+ %tmp2 = bitcast <1 x i64> %A to x86_mmx
+ %tmp3 = bitcast <1 x i64> %B to x86_mmx
+ %tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %tmp2, x86_mmx %tmp3)
+ store x86_mmx %tmp7, x86_mmx* @R
+ tail call void @llvm.x86.mmx.emms()
+ ret void
+}
+
+define i64 @t5(i32 %a, i32 %b) nounwind readnone {
+; CHECK-LABEL: t5:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movd
+; CHECK-NEXT: movd
+; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
+; CHECK-NEXT: movd %xmm0, %rax
+; CHECK-NEXT: retq
+ %v0 = insertelement <2 x i32> undef, i32 %a, i32 0
+ %v1 = insertelement <2 x i32> %v0, i32 %b, i32 1
+ %conv = bitcast <2 x i32> %v1 to i64
+ ret i64 %conv
+}
+
+declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
+
+define <1 x i64> @t6(i64 %t) {
+; CHECK-LABEL: t6:
+; CHECK: ## BB#0:
+; CHECK-NEXT: movd
+; CHECK-NEXT: psllq $48, %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+ %t1 = insertelement <1 x i64> undef, i64 %t, i32 0
+ %t0 = bitcast <1 x i64> %t1 to x86_mmx
+ %t2 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %t0, i32 48)
+ %t3 = bitcast x86_mmx %t2 to <1 x i64>
+ ret <1 x i64> %t3
+}
+
+declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
+declare void @llvm.x86.mmx.emms()
+
diff --git a/test/CodeGen/X86/mmx-emms.ll b/test/CodeGen/X86/mmx-emms.ll
deleted file mode 100644
index 5ff2588..0000000
--- a/test/CodeGen/X86/mmx-emms.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | grep emms
-define void @foo() {
-entry:
- call void @llvm.x86.mmx.emms( )
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare void @llvm.x86.mmx.emms()
diff --git a/test/CodeGen/X86/mmx-fold-load.ll b/test/CodeGen/X86/mmx-fold-load.ll
new file mode 100644
index 0000000..d49edac
--- /dev/null
+++ b/test/CodeGen/X86/mmx-fold-load.ll
@@ -0,0 +1,282 @@
+; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck %s
+
+define i64 @t0(<1 x i64>* %a, i32* %b) {
+; CHECK-LABEL: t0:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: movq (%[[REG1:[a-z]+]]), %mm0
+; CHECK-NEXT: psllq (%[[REG2:[a-z]+]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <1 x i64>* %a to x86_mmx*
+ %1 = load x86_mmx* %0, align 8
+ %2 = load i32* %b, align 4
+ %3 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %1, i32 %2)
+ %4 = bitcast x86_mmx %3 to i64
+ ret i64 %4
+}
+declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
+
+define i64 @t1(<1 x i64>* %a, i32* %b) {
+; CHECK-LABEL: t1:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: movq (%[[REG1]]), %mm0
+; CHECK-NEXT: psrlq (%[[REG2]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <1 x i64>* %a to x86_mmx*
+ %1 = load x86_mmx* %0, align 8
+ %2 = load i32* %b, align 4
+ %3 = tail call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx %1, i32 %2)
+ %4 = bitcast x86_mmx %3 to i64
+ ret i64 %4
+}
+declare x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx, i32)
+
+define i64 @t2(<1 x i64>* %a, i32* %b) {
+; CHECK-LABEL: t2:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: movq (%[[REG1]]), %mm0
+; CHECK-NEXT: psllw (%[[REG2]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <1 x i64>* %a to x86_mmx*
+ %1 = load x86_mmx* %0, align 8
+ %2 = load i32* %b, align 4
+ %3 = tail call x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx %1, i32 %2)
+ %4 = bitcast x86_mmx %3 to i64
+ ret i64 %4
+}
+declare x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx, i32)
+
+define i64 @t3(<1 x i64>* %a, i32* %b) {
+; CHECK-LABEL: t3:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: movq (%[[REG1]]), %mm0
+; CHECK-NEXT: psrlw (%[[REG2]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <1 x i64>* %a to x86_mmx*
+ %1 = load x86_mmx* %0, align 8
+ %2 = load i32* %b, align 4
+ %3 = tail call x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx %1, i32 %2)
+ %4 = bitcast x86_mmx %3 to i64
+ ret i64 %4
+}
+declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32)
+
+define i64 @t4(<1 x i64>* %a, i32* %b) {
+; CHECK-LABEL: t4:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: movq (%[[REG1]]), %mm0
+; CHECK-NEXT: pslld (%[[REG2]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <1 x i64>* %a to x86_mmx*
+ %1 = load x86_mmx* %0, align 8
+ %2 = load i32* %b, align 4
+ %3 = tail call x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx %1, i32 %2)
+ %4 = bitcast x86_mmx %3 to i64
+ ret i64 %4
+}
+declare x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx, i32)
+
+define i64 @t5(<1 x i64>* %a, i32* %b) {
+; CHECK-LABEL: t5:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: movq (%[[REG1]]), %mm0
+; CHECK-NEXT: psrld (%[[REG2]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <1 x i64>* %a to x86_mmx*
+ %1 = load x86_mmx* %0, align 8
+ %2 = load i32* %b, align 4
+ %3 = tail call x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx %1, i32 %2)
+ %4 = bitcast x86_mmx %3 to i64
+ ret i64 %4
+}
+declare x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx, i32)
+
+define i64 @t6(<1 x i64>* %a, i32* %b) {
+; CHECK-LABEL: t6:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: movq (%[[REG1]]), %mm0
+; CHECK-NEXT: psraw (%[[REG2]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <1 x i64>* %a to x86_mmx*
+ %1 = load x86_mmx* %0, align 8
+ %2 = load i32* %b, align 4
+ %3 = tail call x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx %1, i32 %2)
+ %4 = bitcast x86_mmx %3 to i64
+ ret i64 %4
+}
+declare x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx, i32)
+
+define i64 @t7(<1 x i64>* %a, i32* %b) {
+; CHECK-LABEL: t7:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: movq (%[[REG1]]), %mm0
+; CHECK-NEXT: psrad (%[[REG2]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <1 x i64>* %a to x86_mmx*
+ %1 = load x86_mmx* %0, align 8
+ %2 = load i32* %b, align 4
+ %3 = tail call x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx %1, i32 %2)
+ %4 = bitcast x86_mmx %3 to i64
+ ret i64 %4
+}
+declare x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx, i32)
+
+define i64 @tt0(x86_mmx %t, x86_mmx* %q) {
+; CHECK-LABEL: tt0:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: paddb (%[[REG3:[a-z]+]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: emms
+; CHECK-NEXT: retq
+entry:
+ %v = load x86_mmx* %q
+ %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %v)
+ %s = bitcast x86_mmx %u to i64
+ call void @llvm.x86.mmx.emms()
+ ret i64 %s
+}
+declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
+declare void @llvm.x86.mmx.emms()
+
+define i64 @tt1(x86_mmx %t, x86_mmx* %q) {
+; CHECK-LABEL: tt1:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: paddw (%[[REG3]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: emms
+; CHECK-NEXT: retq
+entry:
+ %v = load x86_mmx* %q
+ %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %v)
+ %s = bitcast x86_mmx %u to i64
+ call void @llvm.x86.mmx.emms()
+ ret i64 %s
+}
+declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
+
+define i64 @tt2(x86_mmx %t, x86_mmx* %q) {
+; CHECK-LABEL: tt2:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: paddd (%[[REG3]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: emms
+; CHECK-NEXT: retq
+entry:
+ %v = load x86_mmx* %q
+ %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %v)
+ %s = bitcast x86_mmx %u to i64
+ call void @llvm.x86.mmx.emms()
+ ret i64 %s
+}
+declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
+
+define i64 @tt3(x86_mmx %t, x86_mmx* %q) {
+; CHECK-LABEL: tt3:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: paddq (%[[REG3]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: emms
+; CHECK-NEXT: retq
+entry:
+ %v = load x86_mmx* %q
+ %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %v)
+ %s = bitcast x86_mmx %u to i64
+ call void @llvm.x86.mmx.emms()
+ ret i64 %s
+}
+declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
+
+define i64 @tt4(x86_mmx %t, x86_mmx* %q) {
+; CHECK-LABEL: tt4:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: paddusb (%[[REG3]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: emms
+; CHECK-NEXT: retq
+entry:
+ %v = load x86_mmx* %q
+ %u = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %t, x86_mmx %v)
+ %s = bitcast x86_mmx %u to i64
+ call void @llvm.x86.mmx.emms()
+ ret i64 %s
+}
+declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx)
+
+define i64 @tt5(x86_mmx %t, x86_mmx* %q) {
+; CHECK-LABEL: tt5:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: paddusw (%[[REG3]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: emms
+; CHECK-NEXT: retq
+entry:
+ %v = load x86_mmx* %q
+ %u = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %t, x86_mmx %v)
+ %s = bitcast x86_mmx %u to i64
+ call void @llvm.x86.mmx.emms()
+ ret i64 %s
+}
+declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
+
+define i64 @tt6(x86_mmx %t, x86_mmx* %q) {
+; CHECK-LABEL: tt6:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: psrlw (%[[REG3]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: emms
+; CHECK-NEXT: retq
+entry:
+ %v = load x86_mmx* %q
+ %u = tail call x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx %t, x86_mmx %v)
+ %s = bitcast x86_mmx %u to i64
+ call void @llvm.x86.mmx.emms()
+ ret i64 %s
+}
+declare x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx, x86_mmx)
+
+define i64 @tt7(x86_mmx %t, x86_mmx* %q) {
+; CHECK-LABEL: tt7:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: psrld (%[[REG3]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: emms
+; CHECK-NEXT: retq
+entry:
+ %v = load x86_mmx* %q
+ %u = tail call x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx %t, x86_mmx %v)
+ %s = bitcast x86_mmx %u to i64
+ call void @llvm.x86.mmx.emms()
+ ret i64 %s
+}
+declare x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx, x86_mmx)
+
+define i64 @tt8(x86_mmx %t, x86_mmx* %q) {
+; CHECK-LABEL: tt8:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: psrlq (%[[REG3]]), %mm0
+; CHECK-NEXT: movd %mm0, %rax
+; CHECK-NEXT: emms
+; CHECK-NEXT: retq
+entry:
+ %v = load x86_mmx* %q
+ %u = tail call x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx %t, x86_mmx %v)
+ %s = bitcast x86_mmx %u to i64
+ call void @llvm.x86.mmx.emms()
+ ret i64 %s
+}
+declare x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx, x86_mmx)
diff --git a/test/CodeGen/X86/mmx-insert-element.ll b/test/CodeGen/X86/mmx-insert-element.ll
deleted file mode 100644
index 348dac8..0000000
--- a/test/CodeGen/X86/mmx-insert-element.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | grep movq
-; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | grep pshufd
-; This is not an MMX operation; promoted to XMM.
-
-define x86_mmx @qux(i32 %A) nounwind {
- %tmp3 = insertelement <2 x i32> < i32 0, i32 undef >, i32 %A, i32 1 ; <<2 x i32>> [#uses=1]
- %tmp4 = bitcast <2 x i32> %tmp3 to x86_mmx
- ret x86_mmx %tmp4
-}
diff --git a/test/CodeGen/X86/mmx-builtins.ll b/test/CodeGen/X86/mmx-intrinsics.ll
index aabdd53..39d481b 100644
--- a/test/CodeGen/X86/mmx-builtins.ll
+++ b/test/CodeGen/X86/mmx-intrinsics.ll
@@ -1347,3 +1347,12 @@ define <4 x float> @test89(<4 x float> %a, x86_mmx %b) nounwind {
}
declare <4 x float> @llvm.x86.sse.cvtpi2ps(<4 x float>, x86_mmx) nounwind readnone
+
+; CHECK-LABEL: test90
+define void @test90() {
+; CHECK: emms
+ call void @llvm.x86.mmx.emms()
+ ret void
+}
+
+declare void @llvm.x86.mmx.emms()
diff --git a/test/CodeGen/X86/mmx-pinsrw.ll b/test/CodeGen/X86/mmx-pinsrw.ll
deleted file mode 100644
index 33dd2eb..0000000
--- a/test/CodeGen/X86/mmx-pinsrw.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-linux -mcpu=corei7 | FileCheck %s
-; PR2562
-
-; CHECK: pinsr
-
-external global i16 ; <i16*>:0 [#uses=1]
-external global <4 x i16> ; <<4 x i16>*>:1 [#uses=2]
-
-declare void @abort()
-
-define void @""() {
- load i16* @0 ; <i16>:1 [#uses=1]
- load <4 x i16>* @1 ; <<4 x i16>>:2 [#uses=1]
- insertelement <4 x i16> %2, i16 %1, i32 0 ; <<4 x i16>>:3 [#uses=1]
- store <4 x i16> %3, <4 x i16>* @1
- ret void
-}
diff --git a/test/CodeGen/X86/mmx-punpckhdq.ll b/test/CodeGen/X86/mmx-punpckhdq.ll
deleted file mode 100644
index 9e8f5bf..0000000
--- a/test/CodeGen/X86/mmx-punpckhdq.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx,+sse4.2 -mtriple=x86_64-apple-darwin10 | FileCheck %s
-; There are no MMX operations in bork; promoted to XMM.
-
-define void @bork(<1 x i64>* %x) {
-; CHECK: bork
-; CHECK: movlpd
-entry:
- %tmp2 = load <1 x i64>* %x ; <<1 x i64>> [#uses=1]
- %tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32> ; <<2 x i32>> [#uses=1]
- %tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 > ; <<2 x i32>> [#uses=1]
- %tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64> ; <<1 x i64>> [#uses=1]
- store <1 x i64> %tmp10, <1 x i64>* %x
- tail call void @llvm.x86.mmx.emms( )
- ret void
-}
-
-; pork uses MMX.
-
-define void @pork(x86_mmx* %x) {
-; CHECK: pork
-; CHECK: punpckhdq
-entry:
- %tmp2 = load x86_mmx* %x ; <x86_mmx> [#uses=1]
- %tmp9 = tail call x86_mmx @llvm.x86.mmx.punpckhdq (x86_mmx %tmp2, x86_mmx %tmp2)
- store x86_mmx %tmp9, x86_mmx* %x
- tail call void @llvm.x86.mmx.emms( )
- ret void
-}
-
-declare x86_mmx @llvm.x86.mmx.punpckhdq(x86_mmx, x86_mmx)
-declare void @llvm.x86.mmx.emms()
diff --git a/test/CodeGen/X86/mmx-s2v.ll b/test/CodeGen/X86/mmx-s2v.ll
deleted file mode 100644
index c98023c..0000000
--- a/test/CodeGen/X86/mmx-s2v.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx
-; PR2574
-
-define void @entry(i32 %m_task_id, i32 %start_x, i32 %end_x) {; <label>:0
- br i1 true, label %bb.nph, label %._crit_edge
-
-bb.nph: ; preds = %bb.nph, %0
- %t2206f2.0 = phi <2 x float> [ %2, %bb.nph ], [ undef, %0 ] ; <<2 x float>> [#uses=1]
- insertelement <2 x float> %t2206f2.0, float 0.000000e+00, i32 0 ; <<2 x float>>:1 [#uses=1]
- insertelement <2 x float> %1, float 0.000000e+00, i32 1 ; <<2 x float>>:2 [#uses=1]
- br label %bb.nph
-
-._crit_edge: ; preds = %0
- ret void
-}
diff --git a/test/CodeGen/X86/mmx-shift.ll b/test/CodeGen/X86/mmx-shift.ll
deleted file mode 100644
index c7c6e75..0000000
--- a/test/CodeGen/X86/mmx-shift.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | FileCheck %s
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | FileCheck %s
-
-define i64 @t1(<1 x i64> %mm1) nounwind {
-entry:
- %tmp = bitcast <1 x i64> %mm1 to x86_mmx
- %tmp6 = tail call x86_mmx @llvm.x86.mmx.pslli.q( x86_mmx %tmp, i32 32 ) ; <x86_mmx> [#uses=1]
- %retval1112 = bitcast x86_mmx %tmp6 to i64
- ret i64 %retval1112
-
-; CHECK-LABEL: t1:
-; CHECK: psllq $32
-}
-
-declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32) nounwind readnone
-
-define i64 @t2(x86_mmx %mm1, x86_mmx %mm2) nounwind {
-entry:
- %tmp7 = tail call x86_mmx @llvm.x86.mmx.psra.d( x86_mmx %mm1, x86_mmx %mm2 ) nounwind readnone ; <x86_mmx> [#uses=1]
- %retval1112 = bitcast x86_mmx %tmp7 to i64
- ret i64 %retval1112
-
-; CHECK-LABEL: t2:
-; CHECK: psrad
-}
-
-declare x86_mmx @llvm.x86.mmx.psra.d(x86_mmx, x86_mmx) nounwind readnone
-
-define i64 @t3(x86_mmx %mm1, i32 %bits) nounwind {
-entry:
- %tmp8 = tail call x86_mmx @llvm.x86.mmx.psrli.w( x86_mmx %mm1, i32 %bits ) nounwind readnone ; <x86_mmx> [#uses=1]
- %retval1314 = bitcast x86_mmx %tmp8 to i64
- ret i64 %retval1314
-
-; CHECK-LABEL: t3:
-; CHECK: psrlw
-}
-
-declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32) nounwind readnone
diff --git a/test/CodeGen/X86/mmx-shuffle.ll b/test/CodeGen/X86/mmx-shuffle.ll
deleted file mode 100644
index 869f32b..0000000
--- a/test/CodeGen/X86/mmx-shuffle.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -mcpu=yonah
-; PR1427
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-pc-linux-gnu"
- %struct.DrawHelper = type { void (i32, %struct.QT_FT_Span*, i8*)*, void (i32, %struct.QT_FT_Span*, i8*)*, void (%struct.QRasterBuffer*, i32, i32, i32, i8*, i32, i32, i32)*, void (%struct.QRasterBuffer*, i32, i32, i32, i8*, i32, i32, i32)*, void (%struct.QRasterBuffer*, i32, i32, i32, i32, i32)* }
- %struct.QBasicAtomic = type { i32 }
- %struct.QClipData = type { i32, %"struct.QClipData::ClipLine"*, i32, i32, %struct.QT_FT_Span*, i32, i32, i32, i32 }
- %"struct.QClipData::ClipLine" = type { i32, %struct.QT_FT_Span* }
- %struct.QRasterBuffer = type { %struct.QRect, %struct.QRegion, %struct.QClipData*, %struct.QClipData*, i8, i32, i32, %struct.DrawHelper*, i32, i32, i32, i8* }
- %struct.QRect = type { i32, i32, i32, i32 }
- %struct.QRegion = type { %"struct.QRegion::QRegionData"* }
- %"struct.QRegion::QRegionData" = type { %struct.QBasicAtomic, %struct._XRegion*, i8*, %struct.QRegionPrivate* }
- %struct.QRegionPrivate = type opaque
- %struct.QT_FT_Span = type { i16, i16, i16, i8 }
- %struct._XRegion = type opaque
-
-define void @_Z19qt_bitmapblit16_sseP13QRasterBufferiijPKhiii(%struct.QRasterBuffer* %rasterBuffer, i32 %x, i32 %y, i32 %color, i8* %src, i32 %width, i32 %height, i32 %stride) {
-entry:
- %tmp528 = bitcast <8 x i8> zeroinitializer to <2 x i32> ; <<2 x i32>> [#uses=1]
- %tmp529 = and <2 x i32> %tmp528, bitcast (<4 x i16> < i16 -32640, i16 16448, i16 8224, i16 4112 > to <2 x i32>) ; <<2 x i32>> [#uses=1]
- %tmp542 = bitcast <2 x i32> %tmp529 to <4 x i16> ; <<4 x i16>> [#uses=1]
- %tmp543 = add <4 x i16> %tmp542, < i16 0, i16 16448, i16 24672, i16 28784 > ; <<4 x i16>> [#uses=1]
- %tmp555 = bitcast <4 x i16> %tmp543 to <8 x i8> ; <<8 x i8>> [#uses=1]
- %tmp556 = bitcast <8 x i8> %tmp555 to x86_mmx
- %tmp557 = bitcast <8 x i8> zeroinitializer to x86_mmx
- tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp557, x86_mmx %tmp556, i8* null )
- ret void
-}
-
-declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*)
diff --git a/test/CodeGen/X86/movntdq-no-avx.ll b/test/CodeGen/X86/movntdq-no-avx.ll
index 8b7e6ef..cc35e20 100644
--- a/test/CodeGen/X86/movntdq-no-avx.ll
+++ b/test/CodeGen/X86/movntdq-no-avx.ll
@@ -9,4 +9,4 @@ entry:
ret void
}
-!0 = metadata !{i32 1}
+!0 = !{i32 1}
diff --git a/test/CodeGen/X86/movtopush.ll b/test/CodeGen/X86/movtopush.ll
new file mode 100644
index 0000000..4a5d903
--- /dev/null
+++ b/test/CodeGen/X86/movtopush.ll
@@ -0,0 +1,346 @@
+; RUN: llc < %s -mtriple=i686-windows | FileCheck %s -check-prefix=NORMAL
+; RUN: llc < %s -mtriple=x86_64-windows | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=i686-windows -force-align-stack -stack-alignment=32 | FileCheck %s -check-prefix=ALIGNED
+
+declare void @good(i32 %a, i32 %b, i32 %c, i32 %d)
+declare void @inreg(i32 %a, i32 inreg %b, i32 %c, i32 %d)
+declare void @oneparam(i32 %a)
+declare void @eightparams(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h)
+
+
+; Here, we should have a reserved frame, so we don't expect pushes
+; NORMAL-LABEL: test1:
+; NORMAL: subl $16, %esp
+; NORMAL-NEXT: movl $4, 12(%esp)
+; NORMAL-NEXT: movl $3, 8(%esp)
+; NORMAL-NEXT: movl $2, 4(%esp)
+; NORMAL-NEXT: movl $1, (%esp)
+; NORMAL-NEXT: call
+; NORMAL-NEXT: addl $16, %esp
+define void @test1() {
+entry:
+ call void @good(i32 1, i32 2, i32 3, i32 4)
+ ret void
+}
+
+; We're optimizing for code size, so we should get pushes for x86,
+; even though there is a reserved call frame.
+; Make sure we don't touch x86-64
+; NORMAL-LABEL: test1b:
+; NORMAL-NOT: subl {{.*}} %esp
+; NORMAL: pushl $4
+; NORMAL-NEXT: pushl $3
+; NORMAL-NEXT: pushl $2
+; NORMAL-NEXT: pushl $1
+; NORMAL-NEXT: call
+; NORMAL-NEXT: addl $16, %esp
+; X64-LABEL: test1b:
+; X64: movl $1, %ecx
+; X64-NEXT: movl $2, %edx
+; X64-NEXT: movl $3, %r8d
+; X64-NEXT: movl $4, %r9d
+; X64-NEXT: callq good
+define void @test1b() optsize {
+entry:
+ call void @good(i32 1, i32 2, i32 3, i32 4)
+ ret void
+}
+
+; Same as above, but for minsize
+; NORMAL-LABEL: test1c:
+; NORMAL-NOT: subl {{.*}} %esp
+; NORMAL: pushl $4
+; NORMAL-NEXT: pushl $3
+; NORMAL-NEXT: pushl $2
+; NORMAL-NEXT: pushl $1
+; NORMAL-NEXT: call
+; NORMAL-NEXT: addl $16, %esp
+define void @test1c() minsize {
+entry:
+ call void @good(i32 1, i32 2, i32 3, i32 4)
+ ret void
+}
+
+; If we have a reserved frame, we should have pushes
+; NORMAL-LABEL: test2:
+; NORMAL-NOT: subl {{.*}} %esp
+; NORMAL: pushl $4
+; NORMAL-NEXT: pushl $3
+; NORMAL-NEXT: pushl $2
+; NORMAL-NEXT: pushl $1
+; NORMAL-NEXT: call
+define void @test2(i32 %k) {
+entry:
+ %a = alloca i32, i32 %k
+ call void @good(i32 1, i32 2, i32 3, i32 4)
+ ret void
+}
+
+; Again, we expect a sequence of 4 immediate pushes
+; Checks that we generate the right pushes for >8bit immediates
+; NORMAL-LABEL: test2b:
+; NORMAL-NOT: subl {{.*}} %esp
+; NORMAL: pushl $4096
+; NORMAL-NEXT: pushl $3072
+; NORMAL-NEXT: pushl $2048
+; NORMAL-NEXT: pushl $1024
+; NORMAL-NEXT: call
+; NORMAL-NEXT: addl $16, %esp
+define void @test2b() optsize {
+entry:
+ call void @good(i32 1024, i32 2048, i32 3072, i32 4096)
+ ret void
+}
+
+; The first push should push a register
+; NORMAL-LABEL: test3:
+; NORMAL-NOT: subl {{.*}} %esp
+; NORMAL: pushl $4
+; NORMAL-NEXT: pushl $3
+; NORMAL-NEXT: pushl $2
+; NORMAL-NEXT: pushl %e{{..}}
+; NORMAL-NEXT: call
+; NORMAL-NEXT: addl $16, %esp
+define void @test3(i32 %k) optsize {
+entry:
+ %f = add i32 %k, 1
+ call void @good(i32 %f, i32 2, i32 3, i32 4)
+ ret void
+}
+
+; We don't support weird calling conventions
+; NORMAL-LABEL: test4:
+; NORMAL: subl $12, %esp
+; NORMAL-NEXT: movl $4, 8(%esp)
+; NORMAL-NEXT: movl $3, 4(%esp)
+; NORMAL-NEXT: movl $1, (%esp)
+; NORMAL-NEXT: movl $2, %eax
+; NORMAL-NEXT: call
+; NORMAL-NEXT: addl $12, %esp
+define void @test4() optsize {
+entry:
+ call void @inreg(i32 1, i32 2, i32 3, i32 4)
+ ret void
+}
+
+; When there is no reserved call frame, check that additional alignment
+; is added when the pushes don't add up to the required alignment.
+; ALIGNED-LABEL: test5:
+; ALIGNED: subl $16, %esp
+; ALIGNED-NEXT: pushl $4
+; ALIGNED-NEXT: pushl $3
+; ALIGNED-NEXT: pushl $2
+; ALIGNED-NEXT: pushl $1
+; ALIGNED-NEXT: call
+define void @test5(i32 %k) {
+entry:
+ %a = alloca i32, i32 %k
+ call void @good(i32 1, i32 2, i32 3, i32 4)
+ ret void
+}
+
+; When the alignment adds up, do the transformation
+; ALIGNED-LABEL: test5b:
+; ALIGNED: pushl $8
+; ALIGNED-NEXT: pushl $7
+; ALIGNED-NEXT: pushl $6
+; ALIGNED-NEXT: pushl $5
+; ALIGNED-NEXT: pushl $4
+; ALIGNED-NEXT: pushl $3
+; ALIGNED-NEXT: pushl $2
+; ALIGNED-NEXT: pushl $1
+; ALIGNED-NEXT: call
+define void @test5b() optsize {
+entry:
+ call void @eightparams(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8)
+ ret void
+}
+
+; When having to compensate for the alignment isn't worth it,
+; don't use pushes.
+; ALIGNED-LABEL: test5c:
+; ALIGNED: movl $1, (%esp)
+; ALIGNED-NEXT: call
+define void @test5c() optsize {
+entry:
+ call void @oneparam(i32 1)
+ ret void
+}
+
+; Check that pushing the addresses of globals (Or generally, things that
+; aren't exactly immediates) isn't broken.
+; Fixes PR21878.
+; NORMAL-LABEL: test6:
+; NORMAL: pushl $_ext
+; NORMAL-NEXT: call
+declare void @f(i8*)
+@ext = external constant i8
+
+define void @test6() {
+ call void @f(i8* @ext)
+ br label %bb
+bb:
+ alloca i32
+ ret void
+}
+
+; Check that we fold simple cases into the push
+; NORMAL-LABEL: test7:
+; NORMAL-NOT: subl {{.*}} %esp
+; NORMAL: movl 4(%esp), [[EAX:%e..]]
+; NORMAL-NEXT: pushl $4
+; NORMAL-NEXT: pushl ([[EAX]])
+; NORMAL-NEXT: pushl $2
+; NORMAL-NEXT: pushl $1
+; NORMAL-NEXT: call
+; NORMAL-NEXT: addl $16, %esp
+define void @test7(i32* %ptr) optsize {
+entry:
+ %val = load i32* %ptr
+ call void @good(i32 1, i32 2, i32 %val, i32 4)
+ ret void
+}
+
+; Fold stack-relative loads into the push, with correct offset
+; In particular, at the second push, %b was at 12(%esp) and
+; %a wast at 8(%esp), but the second push bumped %esp, so %a
+; is now it at 12(%esp)
+; NORMAL-LABEL: test8:
+; NORMAL: pushl $4
+; NORMAL-NEXT: pushl 12(%esp)
+; NORMAL-NEXT: pushl 12(%esp)
+; NORMAL-NEXT: pushl $1
+; NORMAL-NEXT: call
+; NORMAL-NEXT: addl $16, %esp
+define void @test8(i32 %a, i32 %b) optsize {
+entry:
+ call void @good(i32 1, i32 %a, i32 %b, i32 4)
+ ret void
+}
+
+; If one function is using push instructions, and the other isn't
+; (because it has frame-index references), then we must resolve
+; these references correctly.
+; NORMAL-LABEL: test9:
+; NORMAL-NOT: leal (%esp),
+; NORMAL: pushl $4
+; NORMAL-NEXT: pushl $3
+; NORMAL-NEXT: pushl $2
+; NORMAL-NEXT: pushl $1
+; NORMAL-NEXT: call
+; NORMAL-NEXT: addl $16, %esp
+; NORMAL-NEXT: subl $16, %esp
+; NORMAL-NEXT: leal 16(%esp), [[EAX:%e..]]
+; NORMAL-NEXT: movl [[EAX]], 12(%esp)
+; NORMAL-NEXT: movl $7, 8(%esp)
+; NORMAL-NEXT: movl $6, 4(%esp)
+; NORMAL-NEXT: movl $5, (%esp)
+; NORMAL-NEXT: call
+; NORMAL-NEXT: addl $16, %esp
+define void @test9() optsize {
+entry:
+ %p = alloca i32, align 4
+ call void @good(i32 1, i32 2, i32 3, i32 4)
+ %0 = ptrtoint i32* %p to i32
+ call void @good(i32 5, i32 6, i32 7, i32 %0)
+ ret void
+}
+
+; We can end up with an indirect call which gets reloaded on the spot.
+; Make sure we reference the correct stack slot - we spill into (%esp)
+; and reload from 16(%esp) due to the pushes.
+; NORMAL-LABEL: test10:
+; NORMAL: movl $_good, [[ALLOC:.*]]
+; NORMAL-NEXT: movl [[ALLOC]], [[EAX:%e..]]
+; NORMAL-NEXT: movl [[EAX]], (%esp) # 4-byte Spill
+; NORMAL: nop
+; NORMAL: pushl $4
+; NORMAL-NEXT: pushl $3
+; NORMAL-NEXT: pushl $2
+; NORMAL-NEXT: pushl $1
+; NORMAL-NEXT: calll *16(%esp)
+; NORMAL-NEXT: addl $16, %esp
+define void @test10() optsize {
+ %stack_fptr = alloca void (i32, i32, i32, i32)*
+ store void (i32, i32, i32, i32)* @good, void (i32, i32, i32, i32)** %stack_fptr
+ %good_ptr = load volatile void (i32, i32, i32, i32)** %stack_fptr
+ call void asm sideeffect "nop", "~{ax},~{bx},~{cx},~{dx},~{bp},~{si},~{di}"()
+ call void (i32, i32, i32, i32)* %good_ptr(i32 1, i32 2, i32 3, i32 4)
+ ret void
+}
+
+; We can't fold the load from the global into the push because of
+; interference from the store
+; NORMAL-LABEL: test11:
+; NORMAL: movl _the_global, [[EAX:%e..]]
+; NORMAL-NEXT: movl $42, _the_global
+; NORMAL-NEXT: pushl $4
+; NORMAL-NEXT: pushl $3
+; NORMAL-NEXT: pushl $2
+; NORMAL-NEXT: pushl [[EAX]]
+; NORMAL-NEXT: call
+; NORMAL-NEXT: addl $16, %esp
+@the_global = external global i32
+define void @test11() optsize {
+ %myload = load i32* @the_global
+ store i32 42, i32* @the_global
+ call void @good(i32 %myload, i32 2, i32 3, i32 4)
+ ret void
+}
+
+; Converting one mov into a push isn't worth it when
+; doing so forces too much overhead for other calls.
+; NORMAL-LABEL: test12:
+; NORMAL: subl $16, %esp
+; NORMAL-NEXT: movl $4, 8(%esp)
+; NORMAL-NEXT: movl $3, 4(%esp)
+; NORMAL-NEXT: movl $1, (%esp)
+; NORMAL-NEXT: movl $2, %eax
+; NORMAL-NEXT: calll _inreg
+; NORMAL-NEXT: movl $8, 12(%esp)
+; NORMAL-NEXT: movl $7, 8(%esp)
+; NORMAL-NEXT: movl $6, 4(%esp)
+; NORMAL-NEXT: movl $5, (%esp)
+; NORMAL-NEXT: calll _good
+; NORMAL-NEXT: movl $12, 8(%esp)
+; NORMAL-NEXT: movl $11, 4(%esp)
+; NORMAL-NEXT: movl $9, (%esp)
+; NORMAL-NEXT: movl $10, %eax
+; NORMAL-NEXT: calll _inreg
+; NORMAL-NEXT: addl $16, %esp
+define void @test12() optsize {
+entry:
+ call void @inreg(i32 1, i32 2, i32 3, i32 4)
+ call void @good(i32 5, i32 6, i32 7, i32 8)
+ call void @inreg(i32 9, i32 10, i32 11, i32 12)
+ ret void
+}
+
+; But if the gains outweigh the overhead, we should do it
+; NORMAL-LABEL: test12b:
+; NORMAL: pushl $4
+; NORMAL-NEXT: pushl $3
+; NORMAL-NEXT: pushl $2
+; NORMAL-NEXT: pushl $1
+; NORMAL-NEXT: calll _good
+; NORMAL-NEXT: addl $16, %esp
+; NORMAL-NEXT: subl $12, %esp
+; NORMAL-NEXT: movl $8, 8(%esp)
+; NORMAL-NEXT: movl $7, 4(%esp)
+; NORMAL-NEXT: movl $5, (%esp)
+; NORMAL-NEXT: movl $6, %eax
+; NORMAL-NEXT: calll _inreg
+; NORMAL-NEXT: addl $12, %esp
+; NORMAL-NEXT: pushl $12
+; NORMAL-NEXT: pushl $11
+; NORMAL-NEXT: pushl $10
+; NORMAL-NEXT: pushl $9
+; NORMAL-NEXT: calll _good
+; NORMAL-NEXT: addl $16, %esp
+define void @test12b() optsize {
+entry:
+ call void @good(i32 1, i32 2, i32 3, i32 4)
+ call void @inreg(i32 5, i32 6, i32 7, i32 8)
+ call void @good(i32 9, i32 10, i32 11, i32 12)
+ ret void
+}
diff --git a/test/CodeGen/X86/musttail-fastcall.ll b/test/CodeGen/X86/musttail-fastcall.ll
new file mode 100644
index 0000000..c7e5ffc
--- /dev/null
+++ b/test/CodeGen/X86/musttail-fastcall.ll
@@ -0,0 +1,109 @@
+; RUN: llc < %s -mtriple=i686-pc-win32 -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE2
+; RUN: llc < %s -mtriple=i686-pc-win32 -mattr=+sse2,+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
+; RUN: llc < %s -mtriple=i686-pc-win32 -mattr=+sse2,+avx,+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512
+
+; While we don't support varargs with fastcall, we do support forwarding.
+
+@asdf = internal constant [4 x i8] c"asdf"
+
+declare void @puts(i8*)
+
+define i32 @call_fast_thunk() {
+ %r = call x86_fastcallcc i32 (...)* @fast_thunk(i32 inreg 1, i32 inreg 2, i32 3)
+ ret i32 %r
+}
+
+define x86_fastcallcc i32 @fast_thunk(...) {
+ call void @puts(i8* getelementptr ([4 x i8]* @asdf, i32 0, i32 0))
+ %r = musttail call x86_fastcallcc i32 (...)* bitcast (i32 (i32, i32, i32)* @fast_target to i32 (...)*) (...)
+ ret i32 %r
+}
+
+; Check that we spill and fill around the call to puts.
+
+; CHECK-LABEL: @fast_thunk@0:
+; CHECK-DAG: movl %ecx, {{.*}}
+; CHECK-DAG: movl %edx, {{.*}}
+; CHECK: calll _puts
+; CHECK-DAG: movl {{.*}}, %ecx
+; CHECK-DAG: movl {{.*}}, %edx
+; CHECK: jmp @fast_target@12
+
+define x86_fastcallcc i32 @fast_target(i32 inreg %a, i32 inreg %b, i32 %c) {
+ %a0 = add i32 %a, %b
+ %a1 = add i32 %a0, %c
+ ret i32 %a1
+}
+
+; Repeat the test for vectorcall, which has XMM registers.
+
+define i32 @call_vector_thunk() {
+ %r = call x86_vectorcallcc i32 (...)* @vector_thunk(i32 inreg 1, i32 inreg 2, i32 3)
+ ret i32 %r
+}
+
+define x86_vectorcallcc i32 @vector_thunk(...) {
+ call void @puts(i8* getelementptr ([4 x i8]* @asdf, i32 0, i32 0))
+ %r = musttail call x86_vectorcallcc i32 (...)* bitcast (i32 (i32, i32, i32)* @vector_target to i32 (...)*) (...)
+ ret i32 %r
+}
+
+; Check that we spill and fill SSE registers around the call to puts.
+
+; CHECK-LABEL: vector_thunk@@0:
+; CHECK-DAG: movl %ecx, {{.*}}
+; CHECK-DAG: movl %edx, {{.*}}
+
+; SSE2-DAG: movups %xmm0, {{.*}}
+; SSE2-DAG: movups %xmm1, {{.*}}
+; SSE2-DAG: movups %xmm2, {{.*}}
+; SSE2-DAG: movups %xmm3, {{.*}}
+; SSE2-DAG: movups %xmm4, {{.*}}
+; SSE2-DAG: movups %xmm5, {{.*}}
+
+; AVX-DAG: vmovups %ymm0, {{.*}}
+; AVX-DAG: vmovups %ymm1, {{.*}}
+; AVX-DAG: vmovups %ymm2, {{.*}}
+; AVX-DAG: vmovups %ymm3, {{.*}}
+; AVX-DAG: vmovups %ymm4, {{.*}}
+; AVX-DAG: vmovups %ymm5, {{.*}}
+
+; AVX512-DAG: vmovups %zmm0, {{.*}}
+; AVX512-DAG: vmovups %zmm1, {{.*}}
+; AVX512-DAG: vmovups %zmm2, {{.*}}
+; AVX512-DAG: vmovups %zmm3, {{.*}}
+; AVX512-DAG: vmovups %zmm4, {{.*}}
+; AVX512-DAG: vmovups %zmm5, {{.*}}
+
+; CHECK: calll _puts
+
+; SSE2-DAG: movups {{.*}}, %xmm0
+; SSE2-DAG: movups {{.*}}, %xmm1
+; SSE2-DAG: movups {{.*}}, %xmm2
+; SSE2-DAG: movups {{.*}}, %xmm3
+; SSE2-DAG: movups {{.*}}, %xmm4
+; SSE2-DAG: movups {{.*}}, %xmm5
+
+; AVX-DAG: vmovups {{.*}}, %ymm0
+; AVX-DAG: vmovups {{.*}}, %ymm1
+; AVX-DAG: vmovups {{.*}}, %ymm2
+; AVX-DAG: vmovups {{.*}}, %ymm3
+; AVX-DAG: vmovups {{.*}}, %ymm4
+; AVX-DAG: vmovups {{.*}}, %ymm5
+
+; AVX512-DAG: vmovups {{.*}}, %zmm0
+; AVX512-DAG: vmovups {{.*}}, %zmm1
+; AVX512-DAG: vmovups {{.*}}, %zmm2
+; AVX512-DAG: vmovups {{.*}}, %zmm3
+; AVX512-DAG: vmovups {{.*}}, %zmm4
+; AVX512-DAG: vmovups {{.*}}, %zmm5
+
+; CHECK-DAG: movl {{.*}}, %ecx
+; CHECK-DAG: movl {{.*}}, %edx
+; CHECK: jmp vector_target@@12
+
+define x86_vectorcallcc i32 @vector_target(i32 inreg %a, i32 inreg %b, i32 %c) {
+ %a0 = add i32 %a, %b
+ %a1 = add i32 %a0, %c
+ ret i32 %a1
+}
diff --git a/test/CodeGen/X86/musttail-varargs.ll b/test/CodeGen/X86/musttail-varargs.ll
index 1e99c14..7f105a1 100644
--- a/test/CodeGen/X86/musttail-varargs.ll
+++ b/test/CodeGen/X86/musttail-varargs.ll
@@ -1,13 +1,21 @@
; RUN: llc < %s -enable-tail-merge=0 -mtriple=x86_64-linux | FileCheck %s --check-prefix=LINUX
; RUN: llc < %s -enable-tail-merge=0 -mtriple=x86_64-windows | FileCheck %s --check-prefix=WINDOWS
+; RUN: llc < %s -enable-tail-merge=0 -mtriple=i686-windows | FileCheck %s --check-prefix=X86
; Test that we actually spill and reload all arguments in the variadic argument
; pack. Doing a normal call will clobber all argument registers, and we will
; spill around it. A simple adjustment should not require any XMM spills.
+declare void @llvm.va_start(i8*) nounwind
+
declare void(i8*, ...)* @get_f(i8* %this)
define void @f_thunk(i8* %this, ...) {
+ ; Use va_start so that we exercise the combination.
+ %ap = alloca [4 x i8*], align 16
+ %ap_i8 = bitcast [4 x i8*]* %ap to i8*
+ call void @llvm.va_start(i8* %ap_i8)
+
%fptr = call void(i8*, ...)*(i8*)* @get_f(i8* %this)
musttail call void (i8*, ...)* %fptr(i8* %this, ...)
ret void
@@ -65,6 +73,12 @@ define void @f_thunk(i8* %this, ...) {
; WINDOWS-NOT: mov{{.}}ps
; WINDOWS: jmpq *{{.*}} # TAILCALL
+; No regparms on normal x86 conventions.
+
+; X86-LABEL: _f_thunk:
+; X86: calll _get_f
+; X86: jmpl *{{.*}} # TAILCALL
+
; This thunk shouldn't require any spills and reloads, assuming the register
; allocator knows what it's doing.
@@ -82,6 +96,9 @@ define void @g_thunk(i8* %fptr_i8, ...) {
; WINDOWS-NOT: movq
; WINDOWS: jmpq *%rcx # TAILCALL
+; X86-LABEL: _g_thunk:
+; X86: jmpl *%eax # TAILCALL
+
; Do a simple multi-exit multi-bb test.
%struct.Foo = type { i1, i8*, i8* }
@@ -117,3 +134,7 @@ else:
; WINDOWS: jne
; WINDOWS: jmpq *{{.*}} # TAILCALL
; WINDOWS: jmpq *{{.*}} # TAILCALL
+; X86-LABEL: _h_thunk:
+; X86: jne
+; X86: jmpl *{{.*}} # TAILCALL
+; X86: jmpl *{{.*}} # TAILCALL
diff --git a/test/CodeGen/X86/named-reg-alloc.ll b/test/CodeGen/X86/named-reg-alloc.ll
index 9463ea3..c33b4eb 100644
--- a/test/CodeGen/X86/named-reg-alloc.ll
+++ b/test/CodeGen/X86/named-reg-alloc.ll
@@ -11,4 +11,4 @@ entry:
declare i32 @llvm.read_register.i32(metadata) nounwind
-!0 = metadata !{metadata !"eax\00"}
+!0 = !{!"eax\00"}
diff --git a/test/CodeGen/X86/named-reg-notareg.ll b/test/CodeGen/X86/named-reg-notareg.ll
index d85dddd..18c517d 100644
--- a/test/CodeGen/X86/named-reg-notareg.ll
+++ b/test/CodeGen/X86/named-reg-notareg.ll
@@ -10,4 +10,4 @@ entry:
declare i32 @llvm.read_register.i32(metadata) nounwind
-!0 = metadata !{metadata !"notareg\00"}
+!0 = !{!"notareg\00"}
diff --git a/test/CodeGen/X86/no-compact-unwind.ll b/test/CodeGen/X86/no-compact-unwind.ll
deleted file mode 100644
index 991cd4e..0000000
--- a/test/CodeGen/X86/no-compact-unwind.ll
+++ /dev/null
@@ -1,64 +0,0 @@
-; RUN: llc < %s -mtriple x86_64-apple-macosx10.8.0 -mcpu corei7 -filetype=obj -o - \
-; RUN: | llvm-objdump -triple x86_64-apple-macosx10.8.0 -s - \
-; RUN: | FileCheck -check-prefix=CU %s
-; RUN: llc < %s -mtriple x86_64-apple-darwin11 -mcpu corei7 \
-; RUN: | llvm-mc -triple x86_64-apple-darwin11 -filetype=obj -o - \
-; RUN: | llvm-objdump -triple x86_64-apple-darwin11 -s - \
-; RUN: | FileCheck -check-prefix=FROM-ASM %s
-
-%"struct.dyld::MappedRanges" = type { [400 x %struct.anon], %"struct.dyld::MappedRanges"* }
-%struct.anon = type { %class.ImageLoader*, i64, i64 }
-%class.ImageLoader = type { i32 (...)**, i8*, i8*, i32, i64, i64, i32, i32, %"struct.ImageLoader::recursive_lock"*, i16, i16, [4 x i8] }
-%"struct.ImageLoader::recursive_lock" = type { i32, i32 }
-
-@G1 = external hidden global %"struct.dyld::MappedRanges", align 8
-
-declare void @OSMemoryBarrier() optsize
-
-; This compact unwind encoding indicates that we could not generate correct
-; compact unwind encodings for this function. This then defaults to using the
-; DWARF EH frame.
-
-; CU: Contents of section __compact_unwind:
-; CU-NEXT: 0048 00000000 00000000 42000000 00000004
-; CU-NEXT: 0058 00000000 00000000 00000000 00000000
-
-; FROM-ASM: Contents of section __compact_unwind:
-; FROM-ASM-NEXT: 0048 00000000 00000000 42000000 00000004
-; FROM-ASM-NEXT: 0058 00000000 00000000 00000000 00000000
-
-define void @func(%class.ImageLoader* %image) optsize ssp uwtable {
-entry:
- br label %for.cond1.preheader
-
-for.cond1.preheader: ; preds = %for.inc10, %entry
- %p.019 = phi %"struct.dyld::MappedRanges"* [ @G1, %entry ], [ %1, %for.inc10 ]
- br label %for.body3
-
-for.body3: ; preds = %for.inc, %for.cond1.preheader
- %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.inc ]
- %image4 = getelementptr inbounds %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 0, i64 %indvars.iv, i32 0
- %0 = load %class.ImageLoader** %image4, align 8
- %cmp5 = icmp eq %class.ImageLoader* %0, %image
- br i1 %cmp5, label %if.then, label %for.inc
-
-if.then: ; preds = %for.body3
- tail call void @OSMemoryBarrier() optsize
- store %class.ImageLoader* null, %class.ImageLoader** %image4, align 8
- br label %for.inc
-
-for.inc: ; preds = %if.then, %for.body3
- %indvars.iv.next = add i64 %indvars.iv, 1
- %lftr.wideiv = trunc i64 %indvars.iv.next to i32
- %exitcond = icmp eq i32 %lftr.wideiv, 400
- br i1 %exitcond, label %for.inc10, label %for.body3
-
-for.inc10: ; preds = %for.inc
- %next = getelementptr inbounds %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 1
- %1 = load %"struct.dyld::MappedRanges"** %next, align 8
- %cmp = icmp eq %"struct.dyld::MappedRanges"* %1, null
- br i1 %cmp, label %for.end11, label %for.cond1.preheader
-
-for.end11: ; preds = %for.inc10
- ret void
-}
diff --git a/test/CodeGen/X86/non-unique-sections.ll b/test/CodeGen/X86/non-unique-sections.ll
new file mode 100644
index 0000000..e588b9d
--- /dev/null
+++ b/test/CodeGen/X86/non-unique-sections.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -mtriple=x86_64-pc-linux -function-sections -unique-section-names=false | FileCheck %s
+
+; CHECK: .section .text,"ax",@progbits,unique
+; CHECK-NOT: section
+; CHECK: f:
+define void @f() {
+ ret void
+}
+
+; CHECK: .section .text,"ax",@progbits,unique
+; CHECK-NOT: section
+; CHECK: g:
+define void @g() {
+ ret void
+}
diff --git a/test/CodeGen/X86/nontemporal-2.ll b/test/CodeGen/X86/nontemporal-2.ll
index 9d0cb9a..f62f372 100644
--- a/test/CodeGen/X86/nontemporal-2.ll
+++ b/test/CodeGen/X86/nontemporal-2.ll
@@ -28,4 +28,4 @@ define void @test3(<2 x double>* %dst) {
ret void
}
-!1 = metadata !{i32 1}
+!1 = !{i32 1}
diff --git a/test/CodeGen/X86/nontemporal.ll b/test/CodeGen/X86/nontemporal.ll
index ae04435..f9385df 100644
--- a/test/CodeGen/X86/nontemporal.ll
+++ b/test/CodeGen/X86/nontemporal.ll
@@ -19,4 +19,4 @@ define void @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E) {
ret void
}
-!0 = metadata !{i32 1}
+!0 = !{i32 1}
diff --git a/test/CodeGen/X86/norex-subreg.ll b/test/CodeGen/X86/norex-subreg.ll
index 2c529fd..fb41ded 100644
--- a/test/CodeGen/X86/norex-subreg.ll
+++ b/test/CodeGen/X86/norex-subreg.ll
@@ -1,5 +1,5 @@
-; RUN: llc -O0 < %s
-; RUN: llc < %s
+; RUN: llc -O0 < %s -verify-machineinstrs
+; RUN: llc < %s -verify-machineinstrs
target triple = "x86_64-apple-macosx10.7"
; This test case extracts a sub_8bit_hi sub-register:
diff --git a/test/CodeGen/X86/nosse-varargs.ll b/test/CodeGen/X86/nosse-varargs.ll
index e6da0ab..8070c47 100644
--- a/test/CodeGen/X86/nosse-varargs.ll
+++ b/test/CodeGen/X86/nosse-varargs.ll
@@ -1,11 +1,12 @@
-; RUN: llvm-as < %s > %t
-; RUN: llc -march=x86-64 -mattr=-sse < %t | not grep xmm
-; RUN: llc -march=x86-64 < %t | grep xmm
+; RUN: llc < %s -march=x86-64 -mattr=-sse | FileCheck %s -check-prefix=NOSSE
+; RUN: llc < %s -march=x86-64 | FileCheck %s -check-prefix=YESSSE
; PR3403
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"
%struct.__va_list_tag = type { i32, i32, i8*, i8* }
+; NOSSE-NOT: xmm
+; YESSSE: xmm
define i32 @foo(float %a, i8* nocapture %fmt, ...) nounwind {
entry:
%ap = alloca [1 x %struct.__va_list_tag], align 8 ; <[1 x %struct.__va_list_tag]*> [#uses=4]
diff --git a/test/CodeGen/X86/null-streamer.ll b/test/CodeGen/X86/null-streamer.ll
index b559729..f6eb0e1 100644
--- a/test/CodeGen/X86/null-streamer.ll
+++ b/test/CodeGen/X86/null-streamer.ll
@@ -14,16 +14,16 @@ define void @f1() {
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!11, !13}
-!0 = metadata !{metadata !"0x11\004\00 \001\00\000\00\000", metadata !1, metadata !2, metadata !2, metadata !3, metadata !9, metadata !2} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{metadata !"", metadata !""}
-!2 = metadata !{}
-!3 = metadata !{metadata !4}
-!4 = metadata !{metadata !"0x2e\00\00\00\002\000\001\000\006\00256\001\002", metadata !1, metadata !5, metadata !6, null, i32 ()* null, null, null, metadata !2} ; [ DW_TAG_subprogram ]
-!5 = metadata !{metadata !"0x29", metadata !1} ; [ DW_TAG_file_type ]
-!6 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !7, null, null, null} ; [ DW_TAG_subroutine_type ]
-!7 = metadata !{metadata !8}
-!8 = metadata !{metadata !"0x24\00\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ]
-!9 = metadata !{metadata !10}
-!10 = metadata !{metadata !"0x34\00i\00i\00_ZL1i\001\001\001", null, metadata !5, metadata !8, null, null} ; [ DW_TAG_variable ]
-!11 = metadata !{i32 2, metadata !"Dwarf Version", i32 3}
-!13 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x11\004\00 \001\00\000\00\000", !1, !2, !2, !3, !9, !2} ; [ DW_TAG_compile_unit ]
+!1 = !{!"", !""}
+!2 = !{}
+!3 = !{!4}
+!4 = !{!"0x2e\00\00\00\002\000\001\000\006\00256\001\002", !1, !5, !6, null, i32 ()* null, null, null, !2} ; [ DW_TAG_subprogram ]
+!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ]
+!6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ]
+!7 = !{!8}
+!8 = !{!"0x24\00\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ]
+!9 = !{!10}
+!10 = !{!"0x34\00i\00i\00_ZL1i\001\001\001", null, !5, !8, null, null} ; [ DW_TAG_variable ]
+!11 = !{i32 2, !"Dwarf Version", i32 3}
+!13 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/objc-gc-module-flags.ll b/test/CodeGen/X86/objc-gc-module-flags.ll
index 8cb2c03..f197510 100644
--- a/test/CodeGen/X86/objc-gc-module-flags.ll
+++ b/test/CodeGen/X86/objc-gc-module-flags.ll
@@ -7,7 +7,7 @@
!llvm.module.flags = !{!0, !1, !2, !3}
-!0 = metadata !{i32 1, metadata !"Objective-C Version", i32 2}
-!1 = metadata !{i32 1, metadata !"Objective-C Image Info Version", i32 0}
-!2 = metadata !{i32 1, metadata !"Objective-C Image Info Section", metadata !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
-!3 = metadata !{i32 1, metadata !"Objective-C Garbage Collection", i32 2}
+!0 = !{i32 1, !"Objective-C Version", i32 2}
+!1 = !{i32 1, !"Objective-C Image Info Version", i32 0}
+!2 = !{i32 1, !"Objective-C Image Info Section", !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
+!3 = !{i32 1, !"Objective-C Garbage Collection", i32 2}
diff --git a/test/CodeGen/X86/odr_comdat.ll b/test/CodeGen/X86/odr_comdat.ll
deleted file mode 100644
index 547334c..0000000
--- a/test/CodeGen/X86/odr_comdat.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s -check-prefix=X86LINUX
-
-; Checking that a comdat group gets generated correctly for a static member
-; of instantiated C++ templates.
-; see http://sourcery.mentor.com/public/cxx-abi/abi.html#vague-itemplate
-; section 5.2.6 Instantiated templates
-; "Any static member data object is emitted in a COMDAT identified by its mangled
-; name, in any object file with a reference to its name symbol."
-
-; Case 1: variable is not explicitly initialized, and ends up in a .bss section
-; X86LINUX: .section .bss._ZN1CIiE1iE,"aGw",@nobits,_ZN1CIiE1iE,comdat
-@_ZN1CIiE1iE = weak_odr global i32 0, align 4
-
-; Case 2: variable is explicitly initialized, and ends up in a .data section
-; X86LINUX: .section .data._ZN1CIiE1jE,"aGw",@progbits,_ZN1CIiE1jE,comdat
-@_ZN1CIiE1jE = weak_odr global i32 12, align 4
diff --git a/test/CodeGen/X86/palignr.ll b/test/CodeGen/X86/palignr.ll
index 3efcc2e..dfa2ced 100644
--- a/test/CodeGen/X86/palignr.ll
+++ b/test/CodeGen/X86/palignr.ll
@@ -40,7 +40,9 @@ define <4 x i32> @test3(<4 x i32> %A, <4 x i32> %B) nounwind {
;
; CHECK-YONAH-LABEL: test3:
; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
+; CHECK-YONAH-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; CHECK-YONAH-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,2,2,3]
+; CHECK-YONAH-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-YONAH-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 undef, i32 4 >
ret <4 x i32> %C
diff --git a/test/CodeGen/X86/peep-test-2.ll b/test/CodeGen/X86/peep-test-2.ll
index e4bafbb..e43b8ef 100644
--- a/test/CodeGen/X86/peep-test-2.ll
+++ b/test/CodeGen/X86/peep-test-2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -march=x86 | FileCheck %s
; CHECK: testl
diff --git a/test/CodeGen/X86/phys_subreg_coalesce-3.ll b/test/CodeGen/X86/phys_subreg_coalesce-3.ll
index 6eb97c3..12a3adf 100644
--- a/test/CodeGen/X86/phys_subreg_coalesce-3.ll
+++ b/test/CodeGen/X86/phys_subreg_coalesce-3.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mtriple=i386-apple-darwin -mcpu=corei7 | FileCheck %s
; rdar://5571034
; This requires physreg joining, %vreg13 is live everywhere:
diff --git a/test/CodeGen/X86/pic_jumptable.ll b/test/CodeGen/X86/pic_jumptable.ll
index bdd8859..d66ff0c 100644
--- a/test/CodeGen/X86/pic_jumptable.ll
+++ b/test/CodeGen/X86/pic_jumptable.ll
@@ -10,7 +10,7 @@
declare void @_Z3bari(i32)
-; CHECK-LINUX: .text._Z3fooILi1EEvi,"axG",@progbits,_Z3fooILi1EEvi,comdat
+; CHECK-LINUX: _Z3fooILi1EEvi:
define linkonce void @_Z3fooILi1EEvi(i32 %Y) nounwind {
entry:
; CHECK: L0$pb
diff --git a/test/CodeGen/X86/pmul.ll b/test/CodeGen/X86/pmul.ll
index 8937d6a..6bfa656 100644
--- a/test/CodeGen/X86/pmul.ll
+++ b/test/CodeGen/X86/pmul.ll
@@ -3,16 +3,19 @@
define <4 x i32> @a(<4 x i32> %i) nounwind {
; SSE2-LABEL: a:
-; SSE2: movdqa {{.*}}, %[[X1:xmm[0-9]+]]
-; SSE2-NEXT: pshufd {{.*}} # [[X2:xmm[0-9]+]] = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq %[[X1]], %xmm0
-; SSE2-NEXT: pmuludq %[[X1]], %[[X2]]
-; SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,2],[[X2]][0,2]
-; SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,2,1,3]
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [117,117,117,117]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: a:
-; SSE41: pmulld
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
entry:
%A = mul <4 x i32> %i, < i32 117, i32 117, i32 117, i32 117 >
@@ -21,9 +24,19 @@ entry:
define <2 x i64> @b(<2 x i64> %i) nounwind {
; ALL-LABEL: b:
-; ALL: pmuludq
-; ALL: pmuludq
-; ALL: pmuludq
+; ALL: # BB#0: # %entry
+; ALL-NEXT: movdqa {{.*#+}} xmm1 = [117,117]
+; ALL-NEXT: movdqa %xmm0, %xmm2
+; ALL-NEXT: pmuludq %xmm1, %xmm2
+; ALL-NEXT: pxor %xmm3, %xmm3
+; ALL-NEXT: pmuludq %xmm0, %xmm3
+; ALL-NEXT: psllq $32, %xmm3
+; ALL-NEXT: paddq %xmm3, %xmm2
+; ALL-NEXT: psrlq $32, %xmm0
+; ALL-NEXT: pmuludq %xmm1, %xmm0
+; ALL-NEXT: psllq $32, %xmm0
+; ALL-NEXT: paddq %xmm2, %xmm0
+; ALL-NEXT: retq
entry:
%A = mul <2 x i64> %i, < i64 117, i64 117 >
ret <2 x i64> %A
@@ -31,16 +44,19 @@ entry:
define <4 x i32> @c(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE2-LABEL: c:
-; SSE2: pshufd {{.*}} # [[X2:xmm[0-9]+]] = xmm0[1,1,3,3]
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*}} # xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq %[[X2]], %xmm1
-; SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,2],xmm1[0,2]
-; SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,2,1,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: c:
-; SSE41: pmulld
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pmulld %xmm1, %xmm0
; SSE41-NEXT: retq
entry:
%A = mul <4 x i32> %i, %j
@@ -49,9 +65,19 @@ entry:
define <2 x i64> @d(<2 x i64> %i, <2 x i64> %j) nounwind {
; ALL-LABEL: d:
-; ALL: pmuludq
-; ALL: pmuludq
-; ALL: pmuludq
+; ALL: # BB#0: # %entry
+; ALL-NEXT: movdqa %xmm0, %xmm2
+; ALL-NEXT: pmuludq %xmm1, %xmm2
+; ALL-NEXT: movdqa %xmm1, %xmm3
+; ALL-NEXT: psrlq $32, %xmm3
+; ALL-NEXT: pmuludq %xmm0, %xmm3
+; ALL-NEXT: psllq $32, %xmm3
+; ALL-NEXT: paddq %xmm3, %xmm2
+; ALL-NEXT: psrlq $32, %xmm0
+; ALL-NEXT: pmuludq %xmm1, %xmm0
+; ALL-NEXT: psllq $32, %xmm0
+; ALL-NEXT: paddq %xmm2, %xmm0
+; ALL-NEXT: retq
entry:
%A = mul <2 x i64> %i, %j
ret <2 x i64> %A
@@ -61,20 +87,32 @@ declare void @foo()
define <4 x i32> @e(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE2-LABEL: e:
-; SSE2: movdqa {{[0-9]*}}(%rsp), %xmm0
-; SSE2-NEXT: pshufd {{.*}} # [[X1:xmm[0-9]+]] = xmm0[1,1,3,3]
-; SSE2-NEXT: movdqa {{[0-9]*}}(%rsp), %[[X2:xmm[0-9]+]]
-; SSE2-NEXT: pmuludq %[[X2]], %xmm0
-; SSE2-NEXT: pshufd {{.*}} # [[X2]] = [[X2]][1,1,3,3]
-; SSE2-NEXT: pmuludq %[[X1]], %[[X2]]
-; SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,2],[[X2]][0,2]
-; SSE2-NEXT: shufps {{.*}} # xmm0 = xmm0[0,2,1,3]
-; SSE2-NEXT: addq ${{[0-9]+}}, %rsp
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: subq $40, %rsp
+; SSE2-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: callq foo
+; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: pmuludq %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: addq $40, %rsp
; SSE2-NEXT: retq
;
; SSE41-LABEL: e:
-; SSE41: pmulld {{[0-9]+}}(%rsp), %xmm
-; SSE41-NEXT: addq ${{[0-9]+}}, %rsp
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: subq $40, %rsp
+; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE41-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE41-NEXT: callq foo
+; SSE41-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
+; SSE41-NEXT: pmulld {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
+; SSE41-NEXT: addq $40, %rsp
; SSE41-NEXT: retq
entry:
; Use a call to force spills.
@@ -85,9 +123,26 @@ entry:
define <2 x i64> @f(<2 x i64> %i, <2 x i64> %j) nounwind {
; ALL-LABEL: f:
-; ALL: pmuludq
-; ALL: pmuludq
-; ALL: pmuludq
+; ALL: # BB#0: # %entry
+; ALL-NEXT: subq $40, %rsp
+; ALL-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; ALL-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; ALL-NEXT: callq foo
+; ALL-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
+; ALL-NEXT: movdqa %xmm0, %xmm2
+; ALL-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; ALL-NEXT: pmuludq %xmm3, %xmm2
+; ALL-NEXT: movdqa %xmm3, %xmm1
+; ALL-NEXT: psrlq $32, %xmm1
+; ALL-NEXT: pmuludq %xmm0, %xmm1
+; ALL-NEXT: psllq $32, %xmm1
+; ALL-NEXT: paddq %xmm1, %xmm2
+; ALL-NEXT: psrlq $32, %xmm0
+; ALL-NEXT: pmuludq %xmm3, %xmm0
+; ALL-NEXT: psllq $32, %xmm0
+; ALL-NEXT: paddq %xmm2, %xmm0
+; ALL-NEXT: addq $40, %rsp
+; ALL-NEXT: retq
entry:
; Use a call to force spills.
call void @foo()
diff --git a/test/CodeGen/X86/pointer-vector.ll b/test/CodeGen/X86/pointer-vector.ll
index 0ee9987..5e0c2da 100644
--- a/test/CodeGen/X86/pointer-vector.ll
+++ b/test/CodeGen/X86/pointer-vector.ll
@@ -81,8 +81,7 @@ define <4 x i32*> @INT2PTR1(<4 x i8>* %p) nounwind {
entry:
%G = load <4 x i8>* %p
;CHECK: movl
-;CHECK: pmovzxbd
-;CHECK: pand
+;CHECK: pmovzxbd (%
%K = inttoptr <4 x i8> %G to <4 x i32*>
;CHECK: ret
ret <4 x i32*> %K
diff --git a/test/CodeGen/X86/pr11468.ll b/test/CodeGen/X86/pr11468.ll
index f7e9adb..f721df1 100644
--- a/test/CodeGen/X86/pr11468.ll
+++ b/test/CodeGen/X86/pr11468.ll
@@ -29,5 +29,5 @@ entry:
; CHECK: popq %rbp
}
-!0 = metadata !{i32 125}
+!0 = !{i32 125}
diff --git a/test/CodeGen/X86/pr12360.ll b/test/CodeGen/X86/pr12360.ll
index 8b30596..6734036 100644
--- a/test/CodeGen/X86/pr12360.ll
+++ b/test/CodeGen/X86/pr12360.ll
@@ -22,7 +22,7 @@ entry:
ret i1 %tobool
}
-!0 = metadata !{i8 0, i8 2}
+!0 = !{i8 0, i8 2}
; check that we don't build a "trunc" from i1 to i1, which would assert.
diff --git a/test/CodeGen/X86/pr15267.ll b/test/CodeGen/X86/pr15267.ll
index b4dc5fd..90df990 100644
--- a/test/CodeGen/X86/pr15267.ll
+++ b/test/CodeGen/X86/pr15267.ll
@@ -4,8 +4,7 @@ define <4 x i3> @test1(<4 x i3>* %in) nounwind {
%ret = load <4 x i3>* %in, align 1
ret <4 x i3> %ret
}
-
-; CHECK: test1
+; CHECK-LABEL: test1
; CHECK: movzwl
; CHECK: shrl $3
; CHECK: andl $7
@@ -25,7 +24,7 @@ define <4 x i1> @test2(<4 x i1>* %in) nounwind {
ret <4 x i1> %ret
}
-; CHECK: test2
+; CHECK-LABEL: test2
; CHECK: movzbl
; CHECK: shrl
; CHECK: andl $1
@@ -46,7 +45,7 @@ define <4 x i64> @test3(<4 x i1>* %in) nounwind {
ret <4 x i64> %sext
}
-; CHECK: test3
+; CHECK-LABEL: test3
; CHECK: movzbl
; CHECK: movq
; CHECK: shlq
@@ -67,3 +66,71 @@ define <4 x i64> @test3(<4 x i1>* %in) nounwind {
; CHECK: vpunpcklqdq
; CHECK: vinsertf128
; CHECK: ret
+
+define <16 x i4> @test4(<16 x i4>* %in) nounwind {
+ %ret = load <16 x i4>* %in, align 1
+ ret <16 x i4> %ret
+}
+
+; CHECK-LABEL: test4
+; CHECK: movl
+; CHECK-NEXT: shrl
+; CHECK-NEXT: andl
+; CHECK-NEXT: movl
+; CHECK-NEXT: andl
+; CHECK-NEXT: vmovd
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: movl
+; CHECK-NEXT: shrl
+; CHECK-NEXT: andl
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: movl
+; CHECK-NEXT: shrl
+; CHECK-NEXT: andl
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: movl
+; CHECK-NEXT: shrl
+; CHECK-NEXT: andl
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: movl
+; CHECK-NEXT: shrl
+; CHECK-NEXT: andl
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: movl
+; CHECK-NEXT: shrl
+; CHECK-NEXT: andl
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: movl
+; CHECK-NEXT: shrl
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: movq
+; CHECK-NEXT: shrq
+; CHECK-NEXT: andl
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: movq
+; CHECK-NEXT: shrq
+; CHECK-NEXT: andl
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: movq
+; CHECK-NEXT: shrq
+; CHECK-NEXT: andl
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: movq
+; CHECK-NEXT: shrq
+; CHECK-NEXT: andl
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: movq
+; CHECK-NEXT: shrq
+; CHECK-NEXT: andl
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: movq
+; CHECK-NEXT: shrq
+; CHECK-NEXT: andl
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: movq
+; CHECK-NEXT: shrq
+; CHECK-NEXT: andl
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: shrq
+; CHECK-NEXT: vpinsrb
+; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/pr18846.ll b/test/CodeGen/X86/pr18846.ll
index 27801be..c65bc79 100644
--- a/test/CodeGen/X86/pr18846.ll
+++ b/test/CodeGen/X86/pr18846.ll
@@ -131,9 +131,9 @@ attributes #1 = { nounwind }
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"clang version 3.5 "}
-!1 = metadata !{metadata !2, metadata !2, i64 0}
-!2 = metadata !{metadata !"float", metadata !3, i64 0}
-!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
-!4 = metadata !{metadata !"Simple C/C++ TBAA"}
-!5 = metadata !{metadata !3, metadata !3, i64 0}
+!0 = !{!"clang version 3.5 "}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"float", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{!3, !3, i64 0}
diff --git a/test/CodeGen/X86/pr21792.ll b/test/CodeGen/X86/pr21792.ll
new file mode 100644
index 0000000..4138afc
--- /dev/null
+++ b/test/CodeGen/X86/pr21792.ll
@@ -0,0 +1,41 @@
+; RUN: llc -mtriple=x86_64-linux -mcpu=corei7 < %s | FileCheck %s
+; This fixes a missing cases in the MI scheduler's constrainLocalCopy exposed by
+; PR21792
+
+@stuff = external constant [256 x double], align 16
+
+define void @func(<4 x float> %vx) {
+entry:
+ %tmp2 = bitcast <4 x float> %vx to <2 x i64>
+ %and.i = and <2 x i64> %tmp2, <i64 8727373547504, i64 8727373547504>
+ %tmp3 = bitcast <2 x i64> %and.i to <4 x i32>
+ %index.sroa.0.0.vec.extract = extractelement <4 x i32> %tmp3, i32 0
+ %idx.ext = sext i32 %index.sroa.0.0.vec.extract to i64
+ %add.ptr = getelementptr inbounds i8* bitcast ([256 x double]* @stuff to i8*), i64 %idx.ext
+ %tmp4 = bitcast i8* %add.ptr to double*
+ %index.sroa.0.4.vec.extract = extractelement <4 x i32> %tmp3, i32 1
+ %idx.ext5 = sext i32 %index.sroa.0.4.vec.extract to i64
+ %add.ptr6 = getelementptr inbounds i8* bitcast ([256 x double]* @stuff to i8*), i64 %idx.ext5
+ %tmp5 = bitcast i8* %add.ptr6 to double*
+ %index.sroa.0.8.vec.extract = extractelement <4 x i32> %tmp3, i32 2
+ %idx.ext14 = sext i32 %index.sroa.0.8.vec.extract to i64
+ %add.ptr15 = getelementptr inbounds i8* bitcast ([256 x double]* @stuff to i8*), i64 %idx.ext14
+ %tmp6 = bitcast i8* %add.ptr15 to double*
+ %index.sroa.0.12.vec.extract = extractelement <4 x i32> %tmp3, i32 3
+ %idx.ext19 = sext i32 %index.sroa.0.12.vec.extract to i64
+ %add.ptr20 = getelementptr inbounds i8* bitcast ([256 x double]* @stuff to i8*), i64 %idx.ext19
+ %tmp7 = bitcast i8* %add.ptr20 to double*
+ %add.ptr46 = getelementptr inbounds i8* bitcast (double* getelementptr inbounds ([256 x double]* @stuff, i64 0, i64 1) to i8*), i64 %idx.ext
+ %tmp16 = bitcast i8* %add.ptr46 to double*
+ %add.ptr51 = getelementptr inbounds i8* bitcast (double* getelementptr inbounds ([256 x double]* @stuff, i64 0, i64 1) to i8*), i64 %idx.ext5
+ %tmp17 = bitcast i8* %add.ptr51 to double*
+ call void @toto(double* %tmp4, double* %tmp5, double* %tmp6, double* %tmp7, double* %tmp16, double* %tmp17)
+ ret void
+; CHECK-LABEL: func:
+; CHECK: pextrq $1, %xmm0,
+; CHECK-NEXT: movd %xmm0, %r[[AX:..]]
+; CHECK-NEXT: movslq %e[[AX]],
+; CHECK-NEXT: sarq $32, %r[[AX]]
+}
+
+declare void @toto(double*, double*, double*, double*, double*, double*)
diff --git a/test/CodeGen/X86/pr22019.ll b/test/CodeGen/X86/pr22019.ll
new file mode 100644
index 0000000..4cee5d7
--- /dev/null
+++ b/test/CodeGen/X86/pr22019.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+module asm "pselect = __pselect"
+module asm "var = __var"
+module asm "alias = __alias"
+; CHECK: pselect = __pselect
+; CHECK: var = __var
+; CHECK: alias = __alias
+
+; CHECK: pselect:
+; CHECK: retq
+define void @pselect() {
+ ret void
+}
+
+; CHECK: var:
+; CHECK: .long 0
+@var = global i32 0
+
+; CHECK: alias = var
+@alias = alias i32* @var
diff --git a/test/CodeGen/X86/pr22103.ll b/test/CodeGen/X86/pr22103.ll
new file mode 100644
index 0000000..77c0751
--- /dev/null
+++ b/test/CodeGen/X86/pr22103.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s | FileCheck %s
+; Don't try to emit a direct call through a TLS global.
+; This fixes PR22103
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@a = external thread_local global i64
+
+; Function Attrs: nounwind
+define void @_Z1fv() {
+; CHECK-NOT: callq *$a
+; CHECK: movq %fs:0, [[RAX:%r..]]
+; CHECK-NEXT: addq a@GOTTPOFF(%rip), [[RAX]]
+; CHECK-NEXT: callq *[[RAX]]
+entry:
+ call void bitcast (i64* @a to void ()*)()
+ ret void
+}
diff --git a/test/CodeGen/X86/pre-ra-sched.ll b/test/CodeGen/X86/pre-ra-sched.ll
index 70135d4..bb4c126 100644
--- a/test/CodeGen/X86/pre-ra-sched.ll
+++ b/test/CodeGen/X86/pre-ra-sched.ll
@@ -1,4 +1,4 @@
-; RUN-disabled: llc < %s -mtriple=x86_64-apple-macosx -pre-RA-sched=ilp -debug-only=pre-RA-sched \
+; RUN-disabled: llc < %s -verify-machineinstrs -mtriple=x86_64-apple-macosx -pre-RA-sched=ilp -debug-only=pre-RA-sched \
; RUN-disabled: 2>&1 | FileCheck %s
; RUN: true
; REQUIRES: asserts
diff --git a/test/CodeGen/X86/prefixdata.ll b/test/CodeGen/X86/prefixdata.ll
index 2ec1892..9bb54a2 100644
--- a/test/CodeGen/X86/prefixdata.ll
+++ b/test/CodeGen/X86/prefixdata.ll
@@ -2,16 +2,17 @@
@i = linkonce_odr global i32 1
-; CHECK: f:
-; CHECK-NEXT: .cfi_startproc
+; CHECK: .type f,@function
; CHECK-NEXT: .long 1
+; CHECK-NEXT: # 0x1
+; CHECK-NEXT: f:
define void @f() prefix i32 1 {
ret void
}
-; CHECK: g:
-; CHECK-NEXT: .cfi_startproc
+; CHECK: .type g,@function
; CHECK-NEXT: .quad i
+; CHECK-NEXT: g:
define void @g() prefix i32* @i {
ret void
}
diff --git a/test/CodeGen/X86/prologuedata.ll b/test/CodeGen/X86/prologuedata.ll
new file mode 100644
index 0000000..6a50ddb
--- /dev/null
+++ b/test/CodeGen/X86/prologuedata.ll
@@ -0,0 +1,17 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+@i = linkonce_odr global i32 1
+
+; CHECK: f:
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: .long 1
+define void @f() prologue i32 1 {
+ ret void
+}
+
+; CHECK: g:
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: .quad i
+define void @g() prologue i32* @i {
+ ret void
+}
diff --git a/test/CodeGen/X86/pshufb-mask-comments.ll b/test/CodeGen/X86/pshufb-mask-comments.ll
index 7fc9890..ca5a02c 100644
--- a/test/CodeGen/X86/pshufb-mask-comments.ll
+++ b/test/CodeGen/X86/pshufb-mask-comments.ll
@@ -27,4 +27,26 @@ define <16 x i8> @test3(<16 x i8> %V) {
ret <16 x i8> %1
}
+; Test that we won't crash when the constant was reused for another instruction.
+
+define <16 x i8> @test4(<2 x i64>* %V) {
+; CHECK-LABEL: test4
+; CHECK: pshufb {{.*}}
+ store <2 x i64> <i64 1084818905618843912, i64 506097522914230528>, <2 x i64>* %V, align 16
+ %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> undef, <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
+ ret <16 x i8> %1
+}
+
+define <16 x i8> @test5() {
+; CHECK-LABEL: test5
+; CHECK: pshufb {{.*}}
+ store <2 x i64> <i64 1, i64 0>, <2 x i64>* undef, align 16
+ %l = load <2 x i64>* undef, align 16
+ %shuffle = shufflevector <2 x i64> %l, <2 x i64> undef, <2 x i32> zeroinitializer
+ store <2 x i64> %shuffle, <2 x i64>* undef, align 16
+ %1 = load <16 x i8>* undef, align 16
+ %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> undef, <16 x i8> %1)
+ ret <16 x i8> %2
+}
+
declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) nounwind readnone
diff --git a/test/CodeGen/X86/psubus.ll b/test/CodeGen/X86/psubus.ll
index aff4afb..5e1343e 100644
--- a/test/CodeGen/X86/psubus.ll
+++ b/test/CodeGen/X86/psubus.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=core2 < %s | FileCheck %s -check-prefix=SSE2
+; RUN: llc -mcpu=core2 < %s | FileCheck %s -check-prefix=SSSE3
; RUN: llc -mcpu=corei7-avx < %s | FileCheck %s -check-prefix=AVX1
; RUN: llc -mcpu=core-avx2 < %s | FileCheck %s -check-prefix=AVX2
@@ -7,334 +7,344 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @test1(i16* nocapture %head) nounwind {
vector.ph:
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds i16* %head, i64 %index
+ %0 = getelementptr inbounds i16* %head, i64 0
%1 = bitcast i16* %0 to <8 x i16>*
%2 = load <8 x i16>* %1, align 2
%3 = icmp slt <8 x i16> %2, zeroinitializer
%4 = xor <8 x i16> %2, <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768>
%5 = select <8 x i1> %3, <8 x i16> %4, <8 x i16> zeroinitializer
store <8 x i16> %5, <8 x i16>* %1, align 2
- %index.next = add i64 %index, 8
- %6 = icmp eq i64 %index.next, 16384
- br i1 %6, label %for.end, label %vector.body
-
-for.end: ; preds = %vector.body
ret void
-; SSE2: @test1
-; SSE2: psubusw LCPI0_0(%rip), %xmm0
+; SSSE3: @test1
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movdqu (%rdi), %xmm0
+; SSSE3-NEXT: psubusw LCPI0_0(%rip), %xmm0
+; SSSE3-NEXT: movdqu %xmm0, (%rdi)
+; SSSE3-NEXT: retq
; AVX1: @test1
-; AVX1: vpsubusw LCPI0_0(%rip), %xmm0, %xmm0
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovdqu (%rdi), %xmm0
+; AVX1-NEXT: vpsubusw LCPI0_0(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovdqu %xmm0, (%rdi)
+; AVX1-NEXT: retq
; AVX2: @test1
-; AVX2: vpsubusw LCPI0_0(%rip), %xmm0, %xmm0
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; AVX2-NEXT: vpsubusw LCPI0_0(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vmovdqu %xmm0, (%rdi)
+; AVX2-NEXT: retq
}
define void @test2(i16* nocapture %head) nounwind {
vector.ph:
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds i16* %head, i64 %index
+ %0 = getelementptr inbounds i16* %head, i64 0
%1 = bitcast i16* %0 to <8 x i16>*
%2 = load <8 x i16>* %1, align 2
%3 = icmp ugt <8 x i16> %2, <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766>
%4 = add <8 x i16> %2, <i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767>
%5 = select <8 x i1> %3, <8 x i16> %4, <8 x i16> zeroinitializer
store <8 x i16> %5, <8 x i16>* %1, align 2
- %index.next = add i64 %index, 8
- %6 = icmp eq i64 %index.next, 16384
- br i1 %6, label %for.end, label %vector.body
-
-for.end: ; preds = %vector.body
ret void
-; SSE2: @test2
-; SSE2: psubusw LCPI1_0(%rip), %xmm0
+; SSSE3: @test2
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movdqu (%rdi), %xmm0
+; SSSE3-NEXT: psubusw LCPI1_0(%rip), %xmm0
+; SSSE3-NEXT: movdqu %xmm0, (%rdi)
+; SSSE3-NEXT: retq
; AVX1: @test2
-; AVX1: vpsubusw LCPI1_0(%rip), %xmm0, %xmm0
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovdqu (%rdi), %xmm0
+; AVX1-NEXT: vpsubusw LCPI1_0(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovdqu %xmm0, (%rdi)
+; AVX1-NEXT: retq
; AVX2: @test2
-; AVX2: vpsubusw LCPI1_0(%rip), %xmm0, %xmm0
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; AVX2-NEXT: vpsubusw LCPI1_0(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vmovdqu %xmm0, (%rdi)
+; AVX2-NEXT: retq
}
define void @test3(i16* nocapture %head, i16 zeroext %w) nounwind {
vector.ph:
%0 = insertelement <8 x i16> undef, i16 %w, i32 0
%broadcast15 = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> zeroinitializer
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %1 = getelementptr inbounds i16* %head, i64 %index
+ %1 = getelementptr inbounds i16* %head, i64 0
%2 = bitcast i16* %1 to <8 x i16>*
%3 = load <8 x i16>* %2, align 2
%4 = icmp ult <8 x i16> %3, %broadcast15
%5 = sub <8 x i16> %3, %broadcast15
%6 = select <8 x i1> %4, <8 x i16> zeroinitializer, <8 x i16> %5
store <8 x i16> %6, <8 x i16>* %2, align 2
- %index.next = add i64 %index, 8
- %7 = icmp eq i64 %index.next, 16384
- br i1 %7, label %for.end, label %vector.body
-
-for.end: ; preds = %vector.body
ret void
-; SSE2: @test3
-; SSE2: psubusw %xmm0, %xmm1
+; SSSE3: @test3
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movd %esi, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; SSSE3-NEXT: movdqu (%rdi), %xmm1
+; SSSE3-NEXT: psubusw %xmm0, %xmm1
+; SSSE3-NEXT: movdqu %xmm1, (%rdi)
+; SSSE3-NEXT: retq
; AVX1: @test3
-; AVX1: vpsubusw %xmm0, %xmm1, %xmm1
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovd %esi, %xmm0
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX1-NEXT: vmovdqu (%rdi), %xmm1
+; AVX1-NEXT: vpsubusw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vmovdqu %xmm0, (%rdi)
+; AVX1-NEXT: retq
; AVX2: @test3
-; AVX2: vpsubusw %xmm0, %xmm1, %xmm1
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovd %esi, %xmm0
+; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
+; AVX2-NEXT: vmovdqu (%rdi), %xmm1
+; AVX2-NEXT: vpsubusw %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovdqu %xmm0, (%rdi)
+; AVX2-NEXT: retq
}
define void @test4(i8* nocapture %head) nounwind {
vector.ph:
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds i8* %head, i64 %index
+ %0 = getelementptr inbounds i8* %head, i64 0
%1 = bitcast i8* %0 to <16 x i8>*
%2 = load <16 x i8>* %1, align 1
%3 = icmp slt <16 x i8> %2, zeroinitializer
%4 = xor <16 x i8> %2, <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>
%5 = select <16 x i1> %3, <16 x i8> %4, <16 x i8> zeroinitializer
store <16 x i8> %5, <16 x i8>* %1, align 1
- %index.next = add i64 %index, 16
- %6 = icmp eq i64 %index.next, 16384
- br i1 %6, label %for.end, label %vector.body
-
-for.end: ; preds = %vector.body
ret void
-; SSE2: @test4
-; SSE2: psubusb LCPI3_0(%rip), %xmm0
+; SSSE3: @test4
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movdqu (%rdi), %xmm0
+; SSSE3-NEXT: psubusb LCPI3_0(%rip), %xmm0
+; SSSE3-NEXT: movdqu %xmm0, (%rdi)
+; SSSE3-NEXT: retq
; AVX1: @test4
-; AVX1: vpsubusb LCPI3_0(%rip), %xmm0, %xmm0
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovdqu (%rdi), %xmm0
+; AVX1-NEXT: vpsubusb LCPI3_0(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovdqu %xmm0, (%rdi)
+; AVX1-NEXT: retq
; AVX2: @test4
-; AVX2: vpsubusb LCPI3_0(%rip), %xmm0, %xmm0
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; AVX2-NEXT: vpsubusb LCPI3_0(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vmovdqu %xmm0, (%rdi)
+; AVX2-NEXT: retq
}
define void @test5(i8* nocapture %head) nounwind {
vector.ph:
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds i8* %head, i64 %index
+ %0 = getelementptr inbounds i8* %head, i64 0
%1 = bitcast i8* %0 to <16 x i8>*
%2 = load <16 x i8>* %1, align 1
%3 = icmp ugt <16 x i8> %2, <i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126>
%4 = add <16 x i8> %2, <i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127>
%5 = select <16 x i1> %3, <16 x i8> %4, <16 x i8> zeroinitializer
store <16 x i8> %5, <16 x i8>* %1, align 1
- %index.next = add i64 %index, 16
- %6 = icmp eq i64 %index.next, 16384
- br i1 %6, label %for.end, label %vector.body
-
-for.end: ; preds = %vector.body
ret void
-; SSE2: @test5
-; SSE2: psubusb LCPI4_0(%rip), %xmm0
+; SSSE3: @test5
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movdqu (%rdi), %xmm0
+; SSSE3-NEXT: psubusb LCPI4_0(%rip), %xmm0
+; SSSE3-NEXT: movdqu %xmm0, (%rdi)
+; SSSE3-NEXT: retq
; AVX1: @test5
-; AVX1: vpsubusb LCPI4_0(%rip), %xmm0, %xmm0
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovdqu (%rdi), %xmm0
+; AVX1-NEXT: vpsubusb LCPI4_0(%rip), %xmm0
+; AVX1-NEXT: vmovdqu %xmm0, (%rdi)
+; AVX1-NEXT: retq
; AVX2: @test5
-; AVX2: vpsubusb LCPI4_0(%rip), %xmm0, %xmm0
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; AVX2-NEXT: vpsubusb LCPI4_0(%rip), %xmm0
+; AVX2-NEXT: vmovdqu %xmm0, (%rdi)
+; AVX2-NEXT: retq
}
define void @test6(i8* nocapture %head, i8 zeroext %w) nounwind {
vector.ph:
%0 = insertelement <16 x i8> undef, i8 %w, i32 0
%broadcast15 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %1 = getelementptr inbounds i8* %head, i64 %index
+ %1 = getelementptr inbounds i8* %head, i64 0
%2 = bitcast i8* %1 to <16 x i8>*
%3 = load <16 x i8>* %2, align 1
%4 = icmp ult <16 x i8> %3, %broadcast15
%5 = sub <16 x i8> %3, %broadcast15
%6 = select <16 x i1> %4, <16 x i8> zeroinitializer, <16 x i8> %5
store <16 x i8> %6, <16 x i8>* %2, align 1
- %index.next = add i64 %index, 16
- %7 = icmp eq i64 %index.next, 16384
- br i1 %7, label %for.end, label %vector.body
-
-for.end: ; preds = %vector.body
ret void
-; SSE2: @test6
-; SSE2: psubusb %xmm0, %xmm1
+; SSSE3: @test6
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movd %esi, %xmm0
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: pshufb %xmm1, %xmm0
+; SSSE3-NEXT: movdqu (%rdi), %xmm1
+; SSSE3-NEXT: psubusb %xmm0, %xmm1
+; SSSE3-NEXT: movdqu %xmm1, (%rdi)
+; SSSE3-NEXT: retq
; AVX1: @test6
-; AVX1: vpsubusb %xmm0, %xmm1, %xmm1
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovd %esi, %xmm0
+; AVX1-NEXT: vpxor %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm1, %xmm0
+; AVX1-NEXT: vmovdqu (%rdi), %xmm1
+; AVX1-NEXT: vpsubusb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vmovdqu %xmm0, (%rdi)
+; AVX1-NEXT: retq
; AVX2: @test6
-; AVX2: vpsubusb %xmm0, %xmm1, %xmm1
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovd %esi, %xmm0
+; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
+; AVX2-NEXT: vmovdqu (%rdi), %xmm1
+; AVX2-NEXT: vpsubusb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovdqu %xmm0, (%rdi)
+; AVX2-NEXT: retq
}
define void @test7(i16* nocapture %head) nounwind {
vector.ph:
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds i16* %head, i64 %index
+ %0 = getelementptr inbounds i16* %head, i64 0
%1 = bitcast i16* %0 to <16 x i16>*
%2 = load <16 x i16>* %1, align 2
%3 = icmp slt <16 x i16> %2, zeroinitializer
%4 = xor <16 x i16> %2, <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768>
%5 = select <16 x i1> %3, <16 x i16> %4, <16 x i16> zeroinitializer
store <16 x i16> %5, <16 x i16>* %1, align 2
- %index.next = add i64 %index, 8
- %6 = icmp eq i64 %index.next, 16384
- br i1 %6, label %for.end, label %vector.body
-
-for.end: ; preds = %vector.body
ret void
; AVX2: @test7
-; AVX2: vpsubusw LCPI6_0(%rip), %ymm0, %ymm0
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; AVX2-NEXT: vpsubusw LCPI6_0(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
}
define void @test8(i16* nocapture %head) nounwind {
vector.ph:
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds i16* %head, i64 %index
+ %0 = getelementptr inbounds i16* %head, i64 0
%1 = bitcast i16* %0 to <16 x i16>*
%2 = load <16 x i16>* %1, align 2
%3 = icmp ugt <16 x i16> %2, <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766>
%4 = add <16 x i16> %2, <i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767>
%5 = select <16 x i1> %3, <16 x i16> %4, <16 x i16> zeroinitializer
store <16 x i16> %5, <16 x i16>* %1, align 2
- %index.next = add i64 %index, 8
- %6 = icmp eq i64 %index.next, 16384
- br i1 %6, label %for.end, label %vector.body
-
-for.end: ; preds = %vector.body
ret void
; AVX2: @test8
-; AVX2: vpsubusw LCPI7_0(%rip), %ymm0, %ymm0
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; AVX2-NEXT: vpsubusw LCPI7_0(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
}
define void @test9(i16* nocapture %head, i16 zeroext %w) nounwind {
vector.ph:
%0 = insertelement <16 x i16> undef, i16 %w, i32 0
%broadcast15 = shufflevector <16 x i16> %0, <16 x i16> undef, <16 x i32> zeroinitializer
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %1 = getelementptr inbounds i16* %head, i64 %index
+ %1 = getelementptr inbounds i16* %head, i64 0
%2 = bitcast i16* %1 to <16 x i16>*
%3 = load <16 x i16>* %2, align 2
%4 = icmp ult <16 x i16> %3, %broadcast15
%5 = sub <16 x i16> %3, %broadcast15
%6 = select <16 x i1> %4, <16 x i16> zeroinitializer, <16 x i16> %5
store <16 x i16> %6, <16 x i16>* %2, align 2
- %index.next = add i64 %index, 8
- %7 = icmp eq i64 %index.next, 16384
- br i1 %7, label %for.end, label %vector.body
-
-for.end: ; preds = %vector.body
ret void
-
; AVX2: @test9
-; AVX2: vpsubusw %ymm0, %ymm1, %ymm1
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovd %esi, %xmm0
+; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
+; AVX2-NEXT: vmovdqu (%rdi), %ymm1
+; AVX2-NEXT: vpsubusw %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
}
define void @test10(i8* nocapture %head) nounwind {
vector.ph:
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds i8* %head, i64 %index
+ %0 = getelementptr inbounds i8* %head, i64 0
%1 = bitcast i8* %0 to <32 x i8>*
%2 = load <32 x i8>* %1, align 1
%3 = icmp slt <32 x i8> %2, zeroinitializer
%4 = xor <32 x i8> %2, <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>
%5 = select <32 x i1> %3, <32 x i8> %4, <32 x i8> zeroinitializer
store <32 x i8> %5, <32 x i8>* %1, align 1
- %index.next = add i64 %index, 16
- %6 = icmp eq i64 %index.next, 16384
- br i1 %6, label %for.end, label %vector.body
-
-for.end: ; preds = %vector.body
ret void
-
; AVX2: @test10
-; AVX2: vpsubusb LCPI9_0(%rip), %ymm0, %ymm0
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; AVX2-NEXT: vpsubusb LCPI9_0(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
}
define void @test11(i8* nocapture %head) nounwind {
vector.ph:
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds i8* %head, i64 %index
+ %0 = getelementptr inbounds i8* %head, i64 0
%1 = bitcast i8* %0 to <32 x i8>*
%2 = load <32 x i8>* %1, align 1
%3 = icmp ugt <32 x i8> %2, <i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126>
%4 = add <32 x i8> %2, <i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127>
%5 = select <32 x i1> %3, <32 x i8> %4, <32 x i8> zeroinitializer
store <32 x i8> %5, <32 x i8>* %1, align 1
- %index.next = add i64 %index, 16
- %6 = icmp eq i64 %index.next, 16384
- br i1 %6, label %for.end, label %vector.body
-
-for.end: ; preds = %vector.body
ret void
; AVX2: @test11
-; AVX2: vpsubusb LCPI10_0(%rip), %ymm0, %ymm0
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; AVX2-NEXT: vpsubusb LCPI10_0(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
}
define void @test12(i8* nocapture %head, i8 zeroext %w) nounwind {
vector.ph:
%0 = insertelement <32 x i8> undef, i8 %w, i32 0
%broadcast15 = shufflevector <32 x i8> %0, <32 x i8> undef, <32 x i32> zeroinitializer
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %1 = getelementptr inbounds i8* %head, i64 %index
+ %1 = getelementptr inbounds i8* %head, i64 0
%2 = bitcast i8* %1 to <32 x i8>*
%3 = load <32 x i8>* %2, align 1
%4 = icmp ult <32 x i8> %3, %broadcast15
%5 = sub <32 x i8> %3, %broadcast15
%6 = select <32 x i1> %4, <32 x i8> zeroinitializer, <32 x i8> %5
store <32 x i8> %6, <32 x i8>* %2, align 1
- %index.next = add i64 %index, 16
- %7 = icmp eq i64 %index.next, 16384
- br i1 %7, label %for.end, label %vector.body
-
-for.end: ; preds = %vector.body
ret void
; AVX2: @test12
-; AVX2: vpsubusb %ymm0, %ymm1, %ymm1
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovd %esi, %xmm0
+; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
+; AVX2-NEXT: vmovdqu (%rdi), %ymm1
+; AVX2-NEXT: vpsubusb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
}
diff --git a/test/CodeGen/X86/ragreedy-bug.ll b/test/CodeGen/X86/ragreedy-bug.ll
index df9b41d..83ac274 100644
--- a/test/CodeGen/X86/ragreedy-bug.ll
+++ b/test/CodeGen/X86/ragreedy-bug.ll
@@ -266,27 +266,27 @@ return:
%retval.0 = phi i32 [ 0, %entry ], [ 1, %land.lhs.true52 ], [ 1, %land.lhs.true43 ], [ 0, %if.else123 ], [ 1, %while.cond59.preheader ], [ 1, %while.cond95.preheader ], [ 1, %while.cond130.preheader ], [ 1, %land.lhs.true28 ], [ 1, %if.then83 ], [ 0, %lor.lhs.false74 ], [ 1, %land.rhs ], [ 1, %if.then117 ], [ 0, %while.body104 ], [ 1, %land.rhs99 ], [ 1, %if.then152 ], [ 0, %while.body139 ], [ 1, %land.rhs134 ], [ 0, %while.body ]
ret i32 %retval.0
}
-!181 = metadata !{metadata !"branch_weights", i32 662038, i32 1}
-!988 = metadata !{metadata !"branch_weights", i32 12091450, i32 1916}
-!989 = metadata !{metadata !"branch_weights", i32 7564670, i32 4526781}
-!990 = metadata !{metadata !"branch_weights", i32 7484958, i32 13283499}
-!991 = metadata !{metadata !"branch_weights", i32 8677007, i32 4606493}
-!992 = metadata !{metadata !"branch_weights", i32 -1172426948, i32 145094705}
-!993 = metadata !{metadata !"branch_weights", i32 1468914, i32 5683688}
-!994 = metadata !{metadata !"branch_weights", i32 114025221, i32 -1217548794, i32 -1199521551, i32 87712616}
-!995 = metadata !{metadata !"branch_weights", i32 1853716452, i32 -444717951, i32 932776759}
-!996 = metadata !{metadata !"branch_weights", i32 1004870, i32 20259}
-!997 = metadata !{metadata !"branch_weights", i32 20071, i32 189}
-!998 = metadata !{metadata !"branch_weights", i32 -1020255939, i32 572177766}
-!999 = metadata !{metadata !"branch_weights", i32 2666513, i32 3466431}
-!1000 = metadata !{metadata !"branch_weights", i32 5117635, i32 1859780}
-!1001 = metadata !{metadata !"branch_weights", i32 354902465, i32 -1444604407}
-!1002 = metadata !{metadata !"branch_weights", i32 -1762419279, i32 1592770684}
-!1003 = metadata !{metadata !"branch_weights", i32 1435905930, i32 -1951930624}
-!1004 = metadata !{metadata !"branch_weights", i32 1, i32 504888}
-!1005 = metadata !{metadata !"branch_weights", i32 94662, i32 504888}
-!1006 = metadata !{metadata !"branch_weights", i32 -1897793104, i32 160196332}
-!1007 = metadata !{metadata !"branch_weights", i32 2074643678, i32 -29579071}
-!1008 = metadata !{metadata !"branch_weights", i32 1, i32 226163}
-!1009 = metadata !{metadata !"branch_weights", i32 58357, i32 226163}
-!1010 = metadata !{metadata !"branch_weights", i32 -2072848646, i32 92907517}
+!181 = !{!"branch_weights", i32 662038, i32 1}
+!988 = !{!"branch_weights", i32 12091450, i32 1916}
+!989 = !{!"branch_weights", i32 7564670, i32 4526781}
+!990 = !{!"branch_weights", i32 7484958, i32 13283499}
+!991 = !{!"branch_weights", i32 8677007, i32 4606493}
+!992 = !{!"branch_weights", i32 -1172426948, i32 145094705}
+!993 = !{!"branch_weights", i32 1468914, i32 5683688}
+!994 = !{!"branch_weights", i32 114025221, i32 -1217548794, i32 -1199521551, i32 87712616}
+!995 = !{!"branch_weights", i32 1853716452, i32 -444717951, i32 932776759}
+!996 = !{!"branch_weights", i32 1004870, i32 20259}
+!997 = !{!"branch_weights", i32 20071, i32 189}
+!998 = !{!"branch_weights", i32 -1020255939, i32 572177766}
+!999 = !{!"branch_weights", i32 2666513, i32 3466431}
+!1000 = !{!"branch_weights", i32 5117635, i32 1859780}
+!1001 = !{!"branch_weights", i32 354902465, i32 -1444604407}
+!1002 = !{!"branch_weights", i32 -1762419279, i32 1592770684}
+!1003 = !{!"branch_weights", i32 1435905930, i32 -1951930624}
+!1004 = !{!"branch_weights", i32 1, i32 504888}
+!1005 = !{!"branch_weights", i32 94662, i32 504888}
+!1006 = !{!"branch_weights", i32 -1897793104, i32 160196332}
+!1007 = !{!"branch_weights", i32 2074643678, i32 -29579071}
+!1008 = !{!"branch_weights", i32 1, i32 226163}
+!1009 = !{!"branch_weights", i32 58357, i32 226163}
+!1010 = !{!"branch_weights", i32 -2072848646, i32 92907517}
diff --git a/test/CodeGen/X86/ragreedy-hoist-spill.ll b/test/CodeGen/X86/ragreedy-hoist-spill.ll
index c6b28f7..57afb41 100644
--- a/test/CodeGen/X86/ragreedy-hoist-spill.ll
+++ b/test/CodeGen/X86/ragreedy-hoist-spill.ll
@@ -202,7 +202,6 @@ lor.rhs500:
; CHECK: lor.rhs500
; Make sure that we don't hoist the spill to outer loops.
; CHECK: movq %r{{.*}}, {{[0-9]+}}(%rsp)
- ; CHECK: movq %r{{.*}}, {{[0-9]+}}(%rsp)
; CHECK: callq {{.*}}maskrune
%call3.i.i2792 = call i32 @__maskrune(i32 undef, i64 256)
br i1 undef, label %land.lhs.true504, label %do.body479.backedge
@@ -378,12 +377,12 @@ declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
!llvm.ident = !{!0}
-!0 = metadata !{metadata !"clang version 3.5.0 (trunk 204257)"}
-!1 = metadata !{metadata !2, metadata !2, i64 0}
-!2 = metadata !{metadata !"int", metadata !3, i64 0}
-!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
-!4 = metadata !{metadata !"Simple C/C++ TBAA"}
-!5 = metadata !{metadata !3, metadata !3, i64 0}
-!6 = metadata !{metadata !7, metadata !8, i64 8}
-!7 = metadata !{metadata !"", metadata !8, i64 0, metadata !8, i64 8, metadata !3, i64 16}
-!8 = metadata !{metadata !"any pointer", metadata !3, i64 0}
+!0 = !{!"clang version 3.5.0 (trunk 204257)"}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{!3, !3, i64 0}
+!6 = !{!7, !8, i64 8}
+!7 = !{!"", !8, i64 0, !8, i64 8, !3, i64 16}
+!8 = !{!"any pointer", !3, i64 0}
diff --git a/test/CodeGen/X86/regalloc-reconcile-broken-hints.ll b/test/CodeGen/X86/regalloc-reconcile-broken-hints.ll
new file mode 100644
index 0000000..0067942
--- /dev/null
+++ b/test/CodeGen/X86/regalloc-reconcile-broken-hints.ll
@@ -0,0 +1,145 @@
+; RUN: llc < %s -o - -mtriple=x86_64-apple-macosx | FileCheck %s
+; Test case for the recoloring of broken hints.
+; This is tricky to have something reasonably small to kick this optimization since
+; it requires that spliting and spilling occur.
+; The bottom line is that this test case is fragile.
+; This was reduced from the make_list function from the llvm-testsuite:
+; SingleSource/Benchmarks/McGill/chomp.c
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+%struct._list = type { i32*, %struct._list* }
+
+@ncol = external global i32, align 4
+@nrow = external global i32, align 4
+
+declare noalias i32* @copy_data()
+
+declare noalias i8* @malloc(i64)
+
+declare i32 @get_value()
+
+declare i32 @in_wanted(i32* nocapture readonly)
+
+declare noalias i32* @make_data()
+
+; CHECK-LABEL: make_list:
+; Function prologue.
+; CHECK: pushq
+; CHECK: subq ${{[0-9]+}}, %rsp
+; Move the first argument (%data) into a temporary register.
+; It will not survive the call to malloc otherwise.
+; CHECK: movq %rdi, [[ARG1:%r[0-9a-z]+]]
+; CHECK: callq _malloc
+; Compute %data - 1 as used for load in land.rhs.i (via the variable %indvars.iv.next.i).
+; CHECK: addq $-4, [[ARG1]]
+; We use to produce a useless copy here and move %data in another temporary register.
+; CHECK-NOT: movq [[ARG1]]
+; End of the first basic block.
+; CHECK: .align
+; Now check that %data is used in an address computation.
+; CHECK: leaq ([[ARG1]]
+define %struct._list* @make_list(i32* nocapture readonly %data, i32* nocapture %value, i32* nocapture %all) {
+entry:
+ %call = tail call i8* @malloc(i64 16)
+ %next = getelementptr inbounds i8* %call, i64 8
+ %tmp = bitcast i8* %next to %struct._list**
+ %tmp2 = bitcast i8* %call to %struct._list*
+ %.pre78 = load i32* @ncol, align 4
+ br label %for.cond1.preheader
+
+for.cond1.preheader: ; preds = %for.inc32, %entry
+ %tmp4 = phi i32 [ %.pre78, %entry ], [ 0, %for.inc32 ]
+ %current.077 = phi %struct._list* [ %tmp2, %entry ], [ %current.1.lcssa, %for.inc32 ]
+ %cmp270 = icmp eq i32 %tmp4, 0
+ br i1 %cmp270, label %for.inc32, label %for.body3
+
+for.body3: ; preds = %if.end31, %for.cond1.preheader
+ %current.173 = phi %struct._list* [ %current.2, %if.end31 ], [ %current.077, %for.cond1.preheader ]
+ %row.172 = phi i32 [ %row.3, %if.end31 ], [ 0, %for.cond1.preheader ]
+ %col.071 = phi i32 [ %inc, %if.end31 ], [ 0, %for.cond1.preheader ]
+ %call4 = tail call i32* @make_data()
+ %tmp5 = load i32* @ncol, align 4
+ %tobool14.i = icmp eq i32 %tmp5, 0
+ br i1 %tobool14.i, label %while.cond.i, label %while.body.lr.ph.i
+
+while.body.lr.ph.i: ; preds = %for.body3
+ %tmp6 = sext i32 %tmp5 to i64
+ br label %while.body.i
+
+while.body.i: ; preds = %while.body.i, %while.body.lr.ph.i
+ %indvars.iv.i = phi i64 [ %tmp6, %while.body.lr.ph.i ], [ %indvars.iv.next.i, %while.body.i ]
+ %indvars.iv.next.i = add nsw i64 %indvars.iv.i, -1
+ %tmp9 = trunc i64 %indvars.iv.next.i to i32
+ %tobool.i = icmp eq i32 %tmp9, 0
+ br i1 %tobool.i, label %while.cond.i, label %while.body.i
+
+while.cond.i: ; preds = %land.rhs.i, %while.body.i, %for.body3
+ %indvars.iv.i64 = phi i64 [ %indvars.iv.next.i65, %land.rhs.i ], [ 0, %for.body3 ], [ %tmp6, %while.body.i ]
+ %indvars.iv.next.i65 = add nsw i64 %indvars.iv.i64, -1
+ %tmp10 = trunc i64 %indvars.iv.i64 to i32
+ %tobool.i66 = icmp eq i32 %tmp10, 0
+ br i1 %tobool.i66, label %if.else, label %land.rhs.i
+
+land.rhs.i: ; preds = %while.cond.i
+ %arrayidx.i67 = getelementptr inbounds i32* %call4, i64 %indvars.iv.next.i65
+ %tmp11 = load i32* %arrayidx.i67, align 4
+ %arrayidx2.i68 = getelementptr inbounds i32* %data, i64 %indvars.iv.next.i65
+ %tmp12 = load i32* %arrayidx2.i68, align 4
+ %cmp.i69 = icmp eq i32 %tmp11, %tmp12
+ br i1 %cmp.i69, label %while.cond.i, label %equal_data.exit
+
+equal_data.exit: ; preds = %land.rhs.i
+ %cmp3.i = icmp slt i32 %tmp10, 1
+ br i1 %cmp3.i, label %if.else, label %if.then
+
+if.then: ; preds = %equal_data.exit
+ %next7 = getelementptr inbounds %struct._list* %current.173, i64 0, i32 1
+ %tmp14 = load %struct._list** %next7, align 8
+ %next12 = getelementptr inbounds %struct._list* %tmp14, i64 0, i32 1
+ store %struct._list* null, %struct._list** %next12, align 8
+ %tmp15 = load %struct._list** %next7, align 8
+ %tmp16 = load i32* %value, align 4
+ %cmp14 = icmp eq i32 %tmp16, 1
+ %.tmp16 = select i1 %cmp14, i32 0, i32 %tmp16
+ %tmp18 = load i32* %all, align 4
+ %tmp19 = or i32 %tmp18, %.tmp16
+ %tmp20 = icmp eq i32 %tmp19, 0
+ br i1 %tmp20, label %if.then19, label %if.end31
+
+if.then19: ; preds = %if.then
+ %call21 = tail call i32 @in_wanted(i32* %call4)
+ br label %if.end31
+
+if.else: ; preds = %equal_data.exit, %while.cond.i
+ %cmp26 = icmp eq i32 %col.071, 0
+ %.row.172 = select i1 %cmp26, i32 0, i32 %row.172
+ %sub30 = add nsw i32 %tmp5, -1
+ br label %if.end31
+
+if.end31: ; preds = %if.else, %if.then19, %if.then
+ %col.1 = phi i32 [ %sub30, %if.else ], [ 0, %if.then ], [ 0, %if.then19 ]
+ %row.3 = phi i32 [ %.row.172, %if.else ], [ %row.172, %if.then ], [ 0, %if.then19 ]
+ %current.2 = phi %struct._list* [ %current.173, %if.else ], [ %tmp15, %if.then ], [ %tmp15, %if.then19 ]
+ %inc = add nsw i32 %col.1, 1
+ %tmp25 = load i32* @ncol, align 4
+ %cmp2 = icmp eq i32 %inc, %tmp25
+ br i1 %cmp2, label %for.cond1.for.inc32_crit_edge, label %for.body3
+
+for.cond1.for.inc32_crit_edge: ; preds = %if.end31
+ %.pre79 = load i32* @nrow, align 4
+ br label %for.inc32
+
+for.inc32: ; preds = %for.cond1.for.inc32_crit_edge, %for.cond1.preheader
+ %tmp26 = phi i32 [ %.pre79, %for.cond1.for.inc32_crit_edge ], [ 0, %for.cond1.preheader ]
+ %current.1.lcssa = phi %struct._list* [ %current.2, %for.cond1.for.inc32_crit_edge ], [ %current.077, %for.cond1.preheader ]
+ %row.1.lcssa = phi i32 [ %row.3, %for.cond1.for.inc32_crit_edge ], [ 0, %for.cond1.preheader ]
+ %inc33 = add nsw i32 %row.1.lcssa, 1
+ %cmp = icmp eq i32 %inc33, %tmp26
+ br i1 %cmp, label %for.end34, label %for.cond1.preheader
+
+for.end34: ; preds = %for.inc32
+ %.pre = load %struct._list** %tmp, align 8
+ ret %struct._list* %.pre
+}
diff --git a/test/CodeGen/X86/remat-phys-dead.ll b/test/CodeGen/X86/remat-phys-dead.ll
index 4d7ee62..6cdcd28 100644
--- a/test/CodeGen/X86/remat-phys-dead.ll
+++ b/test/CodeGen/X86/remat-phys-dead.ll
@@ -1,5 +1,5 @@
; REQUIRES: asserts
-; RUN: llc -mtriple=x86_64-apple-darwin -debug -o /dev/null < %s 2>&1 | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=x86_64-apple-darwin -debug -o /dev/null < %s 2>&1 | FileCheck %s
; We need to make sure that rematerialization into a physical register marks the
; super- or sub-register as dead after this rematerialization since only the
diff --git a/test/CodeGen/X86/scalar_sse_minmax.ll b/test/CodeGen/X86/scalar_sse_minmax.ll
index bc4ab5d..5ca3f85 100644
--- a/test/CodeGen/X86/scalar_sse_minmax.ll
+++ b/test/CodeGen/X86/scalar_sse_minmax.ll
@@ -1,44 +1,53 @@
-; RUN: llc < %s -march=x86 -mattr=+sse,+sse2 | \
-; RUN: grep mins | count 3
-; RUN: llc < %s -march=x86 -mattr=+sse,+sse2 | \
-; RUN: grep maxs | count 2
-
-declare i1 @llvm.isunordered.f64(double, double)
-
-declare i1 @llvm.isunordered.f32(float, float)
+; RUN: llc < %s -march=x86 -mattr=+sse,+sse2 | FileCheck %s
define float @min1(float %x, float %y) {
- %tmp = fcmp olt float %x, %y ; <i1> [#uses=1]
- %retval = select i1 %tmp, float %x, float %y ; <float> [#uses=1]
+; CHECK-LABEL: min1
+; CHECK: mins
+ %tmp = fcmp olt float %x, %y
+ %retval = select i1 %tmp, float %x, float %y
ret float %retval
}
define double @min2(double %x, double %y) {
- %tmp = fcmp olt double %x, %y ; <i1> [#uses=1]
- %retval = select i1 %tmp, double %x, double %y ; <double> [#uses=1]
+; CHECK-LABEL: min2
+; CHECK: mins
+ %tmp = fcmp olt double %x, %y
+ %retval = select i1 %tmp, double %x, double %y
ret double %retval
}
+declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
+define <4 x float> @min3(float %x, float %y) {
+; CHECK-LABEL: min3
+; CHECK: mins
+ %vec0 = insertelement <4 x float> undef, float %x, i32 0
+ %vec1 = insertelement <4 x float> undef, float %y, i32 0
+ %retval = tail call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %vec0, <4 x float> %vec1)
+ ret <4 x float> %retval
+}
+
define float @max1(float %x, float %y) {
- %tmp = fcmp oge float %x, %y ; <i1> [#uses=1]
- %tmp2 = fcmp uno float %x, %y ; <i1> [#uses=1]
- %tmp3 = or i1 %tmp2, %tmp ; <i1> [#uses=1]
- %retval = select i1 %tmp3, float %x, float %y ; <float> [#uses=1]
+; CHECK-LABEL: max1
+; CHECK: maxs
+ %tmp = fcmp uge float %x, %y
+ %retval = select i1 %tmp, float %x, float %y
ret float %retval
}
define double @max2(double %x, double %y) {
- %tmp = fcmp oge double %x, %y ; <i1> [#uses=1]
- %tmp2 = fcmp uno double %x, %y ; <i1> [#uses=1]
- %tmp3 = or i1 %tmp2, %tmp ; <i1> [#uses=1]
- %retval = select i1 %tmp3, double %x, double %y ; <double> [#uses=1]
+; CHECK-LABEL: max2
+; CHECK: maxs
+ %tmp = fcmp uge double %x, %y
+ %retval = select i1 %tmp, double %x, double %y
ret double %retval
}
-define <4 x float> @min3(float %tmp37) {
- %tmp375 = insertelement <4 x float> undef, float %tmp37, i32 0 ; <<4 x float>> [#uses=1]
- %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp375, <4 x float> < float 6.553500e+04, float undef, float undef, float undef > ) ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp48
+declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>)
+define <4 x float> @max3(float %x, float %y) {
+; CHECK-LABEL: max3
+; CHECK: maxs
+ %vec0 = insertelement <4 x float> undef, float %x, i32 0
+ %vec1 = insertelement <4 x float> undef, float %y, i32 0
+ %retval = tail call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %vec0, <4 x float> %vec1)
+ ret <4 x float> %retval
}
-
-declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
diff --git a/test/CodeGen/X86/scev-interchange.ll b/test/CodeGen/X86/scev-interchange.ll
index 71a4d21..0e7047b 100644
--- a/test/CodeGen/X86/scev-interchange.ll
+++ b/test/CodeGen/X86/scev-interchange.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86-64
+; RUN: llc < %s -mtriple=x86_64-linux
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
%"struct.DataOutBase::GmvFlags" = type { i32 }
diff --git a/test/CodeGen/X86/segmented-stacks.ll b/test/CodeGen/X86/segmented-stacks.ll
index 2db7c11..3e47121 100644
--- a/test/CodeGen/X86/segmented-stacks.ll
+++ b/test/CodeGen/X86/segmented-stacks.ll
@@ -1,10 +1,13 @@
; RUN: llc < %s -mcpu=generic -mtriple=i686-linux -verify-machineinstrs | FileCheck %s -check-prefix=X32-Linux
; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux -verify-machineinstrs | FileCheck %s -check-prefix=X64-Linux
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux -code-model=large -verify-machineinstrs | FileCheck %s -check-prefix=X64-Linux-Large
; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux-gnux32 -verify-machineinstrs | FileCheck %s -check-prefix=X32ABI
; RUN: llc < %s -mcpu=generic -mtriple=i686-darwin -verify-machineinstrs | FileCheck %s -check-prefix=X32-Darwin
; RUN: llc < %s -mcpu=generic -mtriple=x86_64-darwin -verify-machineinstrs | FileCheck %s -check-prefix=X64-Darwin
; RUN: llc < %s -mcpu=generic -mtriple=i686-mingw32 -verify-machineinstrs | FileCheck %s -check-prefix=X32-MinGW
; RUN: llc < %s -mcpu=generic -mtriple=x86_64-freebsd -verify-machineinstrs | FileCheck %s -check-prefix=X64-FreeBSD
+; RUN: llc < %s -mcpu=generic -mtriple=i686-dragonfly -verify-machineinstrs | FileCheck %s -check-prefix=X32-DFlyBSD
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-dragonfly -verify-machineinstrs | FileCheck %s -check-prefix=X64-DFlyBSD
; RUN: llc < %s -mcpu=generic -mtriple=x86_64-mingw32 -verify-machineinstrs | FileCheck %s -check-prefix=X64-MinGW
; We used to crash with filetype=obj
@@ -15,6 +18,8 @@
; RUN: llc < %s -mcpu=generic -mtriple=x86_64-darwin -filetype=obj
; RUN: llc < %s -mcpu=generic -mtriple=i686-mingw32 -filetype=obj
; RUN: llc < %s -mcpu=generic -mtriple=x86_64-freebsd -filetype=obj
+; RUN: llc < %s -mcpu=generic -mtriple=i686-dragonfly -filetype=obj
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-dragonfly -filetype=obj
; RUN: llc < %s -mcpu=generic -mtriple=x86_64-mingw32 -filetype=obj
; RUN: not llc < %s -mcpu=generic -mtriple=x86_64-solaris 2> %t.log
@@ -53,6 +58,16 @@ define void @test_basic() #0 {
; X64-Linux-NEXT: callq __morestack
; X64-Linux-NEXT: ret
+; X64-Linux-Large-LABEL: test_basic:
+
+; X64-Linux-Large: cmpq %fs:112, %rsp
+; X64-Linux-Large-NEXT: ja .LBB0_2
+
+; X64-Linux-Large: movabsq $40, %r10
+; X64-Linux-Large-NEXT: movabsq $0, %r11
+; X64-Linux-Large-NEXT: callq *__morestack_addr(%rip)
+; X64-Linux-Large-NEXT: ret
+
; X32ABI-LABEL: test_basic:
; X32ABI: cmpl %fs:64, %esp
@@ -114,6 +129,26 @@ define void @test_basic() #0 {
; X64-FreeBSD-NEXT: callq __morestack
; X64-FreeBSD-NEXT: ret
+; X32-DFlyBSD-LABEL: test_basic:
+
+; X32-DFlyBSD: cmpl %fs:16, %esp
+; X32-DFlyBSD-NEXT: ja .LBB0_2
+
+; X32-DFlyBSD: pushl $0
+; X32-DFlyBSD-NEXT: pushl $48
+; X32-DFlyBSD-NEXT: calll __morestack
+; X32-DFlyBSD-NEXT: ret
+
+; X64-DFlyBSD-LABEL: test_basic:
+
+; X64-DFlyBSD: cmpq %fs:32, %rsp
+; X64-DFlyBSD-NEXT: ja .LBB0_2
+
+; X64-DFlyBSD: movabsq $40, %r10
+; X64-DFlyBSD-NEXT: movabsq $0, %r11
+; X64-DFlyBSD-NEXT: callq __morestack
+; X64-DFlyBSD-NEXT: ret
+
}
define i32 @test_nested(i32 * nest %closure, i32 %other) #0 {
@@ -199,6 +234,24 @@ define i32 @test_nested(i32 * nest %closure, i32 %other) #0 {
; X64-FreeBSD-NEXT: ret
; X64-FreeBSD-NEXT: movq %rax, %r10
+; X32-DFlyBSD: cmpl %fs:16, %esp
+; X32-DFlyBSD-NEXT: ja .LBB1_2
+
+; X32-DFlyBSD: pushl $4
+; X32-DFlyBSD-NEXT: pushl $52
+; X32-DFlyBSD-NEXT: calll __morestack
+; X32-DFlyBSD-NEXT: ret
+
+; X64-DFlyBSD: cmpq %fs:32, %rsp
+; X64-DFlyBSD-NEXT: ja .LBB1_2
+
+; X64-DFlyBSD: movq %r10, %rax
+; X64-DFlyBSD-NEXT: movabsq $56, %r10
+; X64-DFlyBSD-NEXT: movabsq $0, %r11
+; X64-DFlyBSD-NEXT: callq __morestack
+; X64-DFlyBSD-NEXT: ret
+; X64-DFlyBSD-NEXT: movq %rax, %r10
+
}
define void @test_large() #0 {
@@ -280,6 +333,24 @@ define void @test_large() #0 {
; X64-FreeBSD-NEXT: callq __morestack
; X64-FreeBSD-NEXT: ret
+; X32-DFlyBSD: leal -40008(%esp), %ecx
+; X32-DFlyBSD-NEXT: cmpl %fs:16, %ecx
+; X32-DFlyBSD-NEXT: ja .LBB2_2
+
+; X32-DFlyBSD: pushl $0
+; X32-DFlyBSD-NEXT: pushl $40008
+; X32-DFlyBSD-NEXT: calll __morestack
+; X32-DFlyBSD-NEXT: ret
+
+; X64-DFlyBSD: leaq -40008(%rsp), %r11
+; X64-DFlyBSD-NEXT: cmpq %fs:32, %r11
+; X64-DFlyBSD-NEXT: ja .LBB2_2
+
+; X64-DFlyBSD: movabsq $40008, %r10
+; X64-DFlyBSD-NEXT: movabsq $0, %r11
+; X64-DFlyBSD-NEXT: callq __morestack
+; X64-DFlyBSD-NEXT: ret
+
}
define fastcc void @test_fastcc() #0 {
@@ -368,6 +439,26 @@ define fastcc void @test_fastcc() #0 {
; X64-FreeBSD-NEXT: callq __morestack
; X64-FreeBSD-NEXT: ret
+; X32-DFlyBSD-LABEL: test_fastcc:
+
+; X32-DFlyBSD: cmpl %fs:16, %esp
+; X32-DFlyBSD-NEXT: ja .LBB3_2
+
+; X32-DFlyBSD: pushl $0
+; X32-DFlyBSD-NEXT: pushl $48
+; X32-DFlyBSD-NEXT: calll __morestack
+; X32-DFlyBSD-NEXT: ret
+
+; X64-DFlyBSD-LABEL: test_fastcc:
+
+; X64-DFlyBSD: cmpq %fs:32, %rsp
+; X64-DFlyBSD-NEXT: ja .LBB3_2
+
+; X64-DFlyBSD: movabsq $40, %r10
+; X64-DFlyBSD-NEXT: movabsq $0, %r11
+; X64-DFlyBSD-NEXT: callq __morestack
+; X64-DFlyBSD-NEXT: ret
+
}
define fastcc void @test_fastcc_large() #0 {
@@ -464,6 +555,28 @@ define fastcc void @test_fastcc_large() #0 {
; X64-FreeBSD-NEXT: callq __morestack
; X64-FreeBSD-NEXT: ret
+; X32-DFlyBSD-LABEL: test_fastcc_large:
+
+; X32-DFlyBSD: leal -40008(%esp), %eax
+; X32-DFlyBSD-NEXT: cmpl %fs:16, %eax
+; X32-DFlyBSD-NEXT: ja .LBB4_2
+
+; X32-DFlyBSD: pushl $0
+; X32-DFlyBSD-NEXT: pushl $40008
+; X32-DFlyBSD-NEXT: calll __morestack
+; X32-DFlyBSD-NEXT: ret
+
+; X64-DFlyBSD-LABEL: test_fastcc_large:
+
+; X64-DFlyBSD: leaq -40008(%rsp), %r11
+; X64-DFlyBSD-NEXT: cmpq %fs:32, %r11
+; X64-DFlyBSD-NEXT: ja .LBB4_2
+
+; X64-DFlyBSD: movabsq $40008, %r10
+; X64-DFlyBSD-NEXT: movabsq $0, %r11
+; X64-DFlyBSD-NEXT: callq __morestack
+; X64-DFlyBSD-NEXT: ret
+
}
define fastcc void @test_fastcc_large_with_ecx_arg(i32 %a) #0 {
@@ -515,6 +628,16 @@ define void @test_nostack() #0 {
; X64-FreeBSD-LABEL: test_nostack:
; X64-FreeBSD-NOT: callq __morestack
+
+; X32-DFlyBSD-LABEL: test_nostack:
+; X32-DFlyBSD-NOT: calll __morestack
+
+; X64-DFlyBSD-LABEL: test_nostack:
+; X64-DFlyBSD-NOT: callq __morestack
}
attributes #0 = { "split-stack" }
+
+; X64-Linux-Large: .rodata
+; X64-Linux-Large-NEXT: __morestack_addr:
+; X64-Linux-Large-NEXT: .quad __morestack
diff --git a/test/CodeGen/X86/seh-basic.ll b/test/CodeGen/X86/seh-basic.ll
new file mode 100644
index 0000000..69d70d7
--- /dev/null
+++ b/test/CodeGen/X86/seh-basic.ll
@@ -0,0 +1,175 @@
+; RUN: llc -mtriple x86_64-pc-windows-msvc < %s | FileCheck %s
+
+define void @two_invoke_merged() {
+entry:
+ invoke void @try_body()
+ to label %again unwind label %lpad
+
+again:
+ invoke void @try_body()
+ to label %done unwind label %lpad
+
+done:
+ ret void
+
+lpad:
+ %vals = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
+ catch i8* bitcast (i32 (i8*, i8*)* @filt0 to i8*)
+ catch i8* bitcast (i32 (i8*, i8*)* @filt1 to i8*)
+ %sel = extractvalue { i8*, i32 } %vals, 1
+ call void @use_selector(i32 %sel)
+ ret void
+}
+
+; Normal path code
+
+; CHECK-LABEL: {{^}}two_invoke_merged:
+; CHECK: .seh_proc two_invoke_merged
+; CHECK: .seh_handler __C_specific_handler, @unwind, @except
+; CHECK: .Ltmp0:
+; CHECK: callq try_body
+; CHECK-NEXT: .Ltmp1:
+; CHECK: .Ltmp2:
+; CHECK: callq try_body
+; CHECK-NEXT: .Ltmp3:
+; CHECK: retq
+
+; Landing pad code
+
+; CHECK: .Ltmp5:
+; CHECK: movl $1, %ecx
+; CHECK: jmp
+; CHECK: .Ltmp6:
+; CHECK: movl $2, %ecx
+; CHECK: callq use_selector
+
+; CHECK: .seh_handlerdata
+; CHECK-NEXT: .long 2
+; CHECK-NEXT: .long .Ltmp0@IMGREL
+; CHECK-NEXT: .long .Ltmp3@IMGREL+1
+; CHECK-NEXT: .long filt0@IMGREL
+; CHECK-NEXT: .long .Ltmp5@IMGREL
+; CHECK-NEXT: .long .Ltmp0@IMGREL
+; CHECK-NEXT: .long .Ltmp3@IMGREL+1
+; CHECK-NEXT: .long filt1@IMGREL
+; CHECK-NEXT: .long .Ltmp6@IMGREL
+; CHECK: .text
+; CHECK: .seh_endproc
+
+define void @two_invoke_gap() {
+entry:
+ invoke void @try_body()
+ to label %again unwind label %lpad
+
+again:
+ call void @do_nothing_on_unwind()
+ invoke void @try_body()
+ to label %done unwind label %lpad
+
+done:
+ ret void
+
+lpad:
+ %vals = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
+ catch i8* bitcast (i32 (i8*, i8*)* @filt0 to i8*)
+ %sel = extractvalue { i8*, i32 } %vals, 1
+ call void @use_selector(i32 %sel)
+ ret void
+}
+
+; Normal path code
+
+; CHECK-LABEL: {{^}}two_invoke_gap:
+; CHECK: .seh_proc two_invoke_gap
+; CHECK: .seh_handler __C_specific_handler, @unwind, @except
+; CHECK: .Ltmp11:
+; CHECK: callq try_body
+; CHECK-NEXT: .Ltmp12:
+; CHECK: callq do_nothing_on_unwind
+; CHECK: .Ltmp13:
+; CHECK: callq try_body
+; CHECK-NEXT: .Ltmp14:
+; CHECK: retq
+
+; Landing pad code
+
+; CHECK: .Ltmp16:
+; CHECK: movl $1, %ecx
+; CHECK: callq use_selector
+
+; CHECK: .seh_handlerdata
+; CHECK-NEXT: .long 2
+; CHECK-NEXT: .long .Ltmp11@IMGREL
+; CHECK-NEXT: .long .Ltmp12@IMGREL+1
+; CHECK-NEXT: .long filt0@IMGREL
+; CHECK-NEXT: .long .Ltmp16@IMGREL
+; CHECK-NEXT: .long .Ltmp13@IMGREL
+; CHECK-NEXT: .long .Ltmp14@IMGREL+1
+; CHECK-NEXT: .long filt0@IMGREL
+; CHECK-NEXT: .long .Ltmp16@IMGREL
+; CHECK: .text
+; CHECK: .seh_endproc
+
+define void @two_invoke_nounwind_gap() {
+entry:
+ invoke void @try_body()
+ to label %again unwind label %lpad
+
+again:
+ call void @cannot_unwind()
+ invoke void @try_body()
+ to label %done unwind label %lpad
+
+done:
+ ret void
+
+lpad:
+ %vals = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
+ catch i8* bitcast (i32 (i8*, i8*)* @filt0 to i8*)
+ %sel = extractvalue { i8*, i32 } %vals, 1
+ call void @use_selector(i32 %sel)
+ ret void
+}
+
+; Normal path code
+
+; CHECK-LABEL: {{^}}two_invoke_nounwind_gap:
+; CHECK: .seh_proc two_invoke_nounwind_gap
+; CHECK: .seh_handler __C_specific_handler, @unwind, @except
+; CHECK: .Ltmp21:
+; CHECK: callq try_body
+; CHECK-NEXT: .Ltmp22:
+; CHECK: callq cannot_unwind
+; CHECK: .Ltmp23:
+; CHECK: callq try_body
+; CHECK-NEXT: .Ltmp24:
+; CHECK: retq
+
+; Landing pad code
+
+; CHECK: .Ltmp26:
+; CHECK: movl $1, %ecx
+; CHECK: callq use_selector
+
+; CHECK: .seh_handlerdata
+; CHECK-NEXT: .long 1
+; CHECK-NEXT: .long .Ltmp21@IMGREL
+; CHECK-NEXT: .long .Ltmp24@IMGREL+1
+; CHECK-NEXT: .long filt0@IMGREL
+; CHECK-NEXT: .long .Ltmp26@IMGREL
+; CHECK: .text
+; CHECK: .seh_endproc
+
+declare void @try_body()
+declare void @do_nothing_on_unwind()
+declare void @cannot_unwind() nounwind
+declare void @use_selector(i32)
+
+declare i32 @filt0(i8* %eh_info, i8* %rsp)
+declare i32 @filt1(i8* %eh_info, i8* %rsp)
+
+declare void @handler0()
+declare void @handler1()
+
+declare i32 @__C_specific_handler(...)
+declare i32 @llvm.eh.typeid.for(i8*) readnone nounwind
diff --git a/test/CodeGen/X86/seh-catch-all.ll b/test/CodeGen/X86/seh-catch-all.ll
new file mode 100644
index 0000000..8e1eb55
--- /dev/null
+++ b/test/CodeGen/X86/seh-catch-all.ll
@@ -0,0 +1,33 @@
+; RUN: llc -mtriple=x86_64-windows-msvc < %s | FileCheck %s
+
+@str = internal unnamed_addr constant [10 x i8] c"recovered\00", align 1
+
+declare i32 @__C_specific_handler(...)
+declare void @crash()
+declare i32 @puts(i8*)
+
+define i32 @main() {
+entry:
+ invoke void @crash()
+ to label %__try.cont unwind label %lpad
+
+lpad:
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
+ catch i8* null
+ call i32 @puts(i8* getelementptr inbounds ([10 x i8]* @str, i64 0, i64 0))
+ br label %__try.cont
+
+__try.cont:
+ ret i32 0
+
+eh.resume:
+ resume { i8*, i32 } %0
+}
+
+; CHECK-LABEL: main:
+; CHECK: .seh_handlerdata
+; CHECK-NEXT: .long 1
+; CHECK-NEXT: .Ltmp{{[0-9]+}}@IMGREL
+; CHECK-NEXT: .Ltmp{{[0-9]+}}@IMGREL+1
+; CHECK-NEXT: 1
+; CHECK-NEXT: .Ltmp{{[0-9]+}}@IMGREL
diff --git a/test/CodeGen/X86/seh-filter.ll b/test/CodeGen/X86/seh-filter.ll
new file mode 100644
index 0000000..6a3a23e
--- /dev/null
+++ b/test/CodeGen/X86/seh-filter.ll
@@ -0,0 +1,21 @@
+; RUN: llc -O0 -mtriple=x86_64-windows-msvc < %s | FileCheck %s
+
+declare void @g()
+define void @f() {
+ invoke void @g() to label %return unwind label %lpad
+
+return:
+ ret void
+
+lpad:
+ %ehptrs = landingpad {i8*, i32} personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
+ filter [0 x i8*] zeroinitializer
+ call void @__cxa_call_unexpected(i8* null)
+ unreachable
+}
+declare i32 @__C_specific_handler(...)
+declare void @__cxa_call_unexpected(i8*)
+
+; We don't emit entries for filters.
+; CHECK: .seh_handlerdata
+; CHECK: .long 0
diff --git a/test/CodeGen/X86/seh-finally.ll b/test/CodeGen/X86/seh-finally.ll
new file mode 100755
index 0000000..d883663
--- /dev/null
+++ b/test/CodeGen/X86/seh-finally.ll
@@ -0,0 +1,45 @@
+; RUN: llc -mtriple=x86_64-windows-msvc < %s | FileCheck %s
+
+@str_recovered = internal unnamed_addr constant [10 x i8] c"recovered\00", align 1
+
+declare void @crash()
+
+define i32 @main() {
+entry:
+ invoke void @crash()
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont: ; preds = %entry
+ %call = call i32 @puts(i8* getelementptr inbounds ([10 x i8]* @str_recovered, i64 0, i64 0))
+ call void @abort()
+ ret i32 0
+
+lpad: ; preds = %entry
+ %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
+ cleanup
+ %1 = extractvalue { i8*, i32 } %0, 0
+ %2 = extractvalue { i8*, i32 } %0, 1
+ %call2 = invoke i32 @puts(i8* getelementptr inbounds ([10 x i8]* @str_recovered, i64 0, i64 0))
+ to label %invoke.cont1 unwind label %terminate.lpad
+
+invoke.cont1: ; preds = %lpad
+ resume { i8*, i32 } %0
+
+terminate.lpad: ; preds = %lpad
+ %3 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
+ catch i8* null
+ call void @abort()
+ unreachable
+}
+
+; CHECK: main:
+
+; FIXME: No handlers yet!
+; CHECK: .seh_handlerdata
+; CHECK-NEXT: .long 0
+
+declare i32 @__C_specific_handler(...)
+
+declare i32 @puts(i8*)
+
+declare void @abort()
diff --git a/test/CodeGen/X86/seh-safe-div.ll b/test/CodeGen/X86/seh-safe-div.ll
new file mode 100644
index 0000000..e294f24
--- /dev/null
+++ b/test/CodeGen/X86/seh-safe-div.ll
@@ -0,0 +1,197 @@
+; RUN: llc -mtriple x86_64-pc-windows-msvc < %s | FileCheck %s
+
+; This test case is also intended to be run manually as a complete functional
+; test. It should link, print something, and exit zero rather than crashing.
+; It is the hypothetical lowering of a C source program that looks like:
+;
+; int safe_div(int *n, int *d) {
+; int r;
+; __try {
+; __try {
+; r = *n / *d;
+; } __except(GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION) {
+; puts("EXCEPTION_ACCESS_VIOLATION");
+; r = -1;
+; }
+; } __except(GetExceptionCode() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
+; puts("EXCEPTION_INT_DIVIDE_BY_ZERO");
+; r = -2;
+; }
+; return r;
+; }
+
+@str1 = internal constant [27 x i8] c"EXCEPTION_ACCESS_VIOLATION\00"
+@str2 = internal constant [29 x i8] c"EXCEPTION_INT_DIVIDE_BY_ZERO\00"
+
+define i32 @safe_div(i32* %n, i32* %d) {
+entry:
+ %r = alloca i32, align 4
+ invoke void @try_body(i32* %r, i32* %n, i32* %d)
+ to label %__try.cont unwind label %lpad
+
+lpad:
+ %vals = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*)
+ catch i8* bitcast (i32 (i8*, i8*)* @safe_div_filt0 to i8*)
+ catch i8* bitcast (i32 (i8*, i8*)* @safe_div_filt1 to i8*)
+ %ehptr = extractvalue { i8*, i32 } %vals, 0
+ %sel = extractvalue { i8*, i32 } %vals, 1
+ %filt0_val = call i32 @llvm.eh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @safe_div_filt0 to i8*))
+ %is_filt0 = icmp eq i32 %sel, %filt0_val
+ br i1 %is_filt0, label %handler0, label %eh.dispatch1
+
+eh.dispatch1:
+ %filt1_val = call i32 @llvm.eh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @safe_div_filt1 to i8*))
+ %is_filt1 = icmp eq i32 %sel, %filt1_val
+ br i1 %is_filt1, label %handler1, label %eh.resume
+
+handler0:
+ call void @puts(i8* getelementptr ([27 x i8]* @str1, i32 0, i32 0))
+ store i32 -1, i32* %r, align 4
+ br label %__try.cont
+
+handler1:
+ call void @puts(i8* getelementptr ([29 x i8]* @str2, i32 0, i32 0))
+ store i32 -2, i32* %r, align 4
+ br label %__try.cont
+
+eh.resume:
+ resume { i8*, i32 } %vals
+
+__try.cont:
+ %safe_ret = load i32* %r, align 4
+ ret i32 %safe_ret
+}
+
+; Normal path code
+
+; CHECK: {{^}}safe_div:
+; CHECK: .seh_proc safe_div
+; CHECK: .seh_handler __C_specific_handler, @unwind, @except
+; CHECK: .Ltmp0:
+; CHECK: leaq [[rloc:.*\(%rsp\)]], %rcx
+; CHECK: callq try_body
+; CHECK-NEXT: .Ltmp1
+; CHECK: .LBB0_7:
+; CHECK: movl [[rloc]], %eax
+; CHECK: retq
+
+; Landing pad code
+
+; CHECK: .Ltmp3:
+; CHECK: movl $1, %[[sel:[a-z]+]]
+; CHECK: .Ltmp4
+; CHECK: movl $2, %[[sel]]
+; CHECK: .L{{.*}}:
+; CHECK: cmpl $1, %[[sel]]
+
+; CHECK: # %handler0
+; CHECK: callq puts
+; CHECK: movl $-1, [[rloc]]
+; CHECK: jmp .LBB0_7
+
+; CHECK: cmpl $2, %[[sel]]
+
+; CHECK: # %handler1
+; CHECK: callq puts
+; CHECK: movl $-2, [[rloc]]
+; CHECK: jmp .LBB0_7
+
+; FIXME: EH preparation should eliminate the 'resume' instr and we should not do
+; the previous 'cmp;jeq'.
+; CHECK-NOT: _Unwind_Resume
+; CHECK: ud2
+
+; CHECK: .seh_handlerdata
+; CHECK: .long 2
+; CHECK: .long .Ltmp0@IMGREL
+; CHECK: .long .Ltmp1@IMGREL+1
+; CHECK: .long safe_div_filt0@IMGREL
+; CHECK: .long .Ltmp3@IMGREL
+; CHECK: .long .Ltmp0@IMGREL
+; CHECK: .long .Ltmp1@IMGREL+1
+; CHECK: .long safe_div_filt1@IMGREL
+; CHECK: .long .Ltmp4@IMGREL
+; CHECK: .text
+; CHECK: .seh_endproc
+
+
+define void @try_body(i32* %r, i32* %n, i32* %d) {
+entry:
+ %0 = load i32* %n, align 4
+ %1 = load i32* %d, align 4
+ %div = sdiv i32 %0, %1
+ store i32 %div, i32* %r, align 4
+ ret void
+}
+
+; The prototype of these filter functions is:
+; int filter(EXCEPTION_POINTERS *eh_ptrs, void *rbp);
+
+; The definition of EXCEPTION_POINTERS is:
+; typedef struct _EXCEPTION_POINTERS {
+; EXCEPTION_RECORD *ExceptionRecord;
+; CONTEXT *ContextRecord;
+; } EXCEPTION_POINTERS;
+
+; The definition of EXCEPTION_RECORD is:
+; typedef struct _EXCEPTION_RECORD {
+; DWORD ExceptionCode;
+; ...
+; } EXCEPTION_RECORD;
+
+; The exception code can be retreived with two loads, one for the record
+; pointer and one for the code. The values of local variables can be
+; accessed via rbp, but that would require additional not yet implemented LLVM
+; support.
+
+define i32 @safe_div_filt0(i8* %eh_ptrs, i8* %rbp) {
+ %eh_ptrs_c = bitcast i8* %eh_ptrs to i32**
+ %eh_rec = load i32** %eh_ptrs_c
+ %eh_code = load i32* %eh_rec
+ ; EXCEPTION_ACCESS_VIOLATION = 0xC0000005
+ %cmp = icmp eq i32 %eh_code, 3221225477
+ %filt.res = zext i1 %cmp to i32
+ ret i32 %filt.res
+}
+
+define i32 @safe_div_filt1(i8* %eh_ptrs, i8* %rbp) {
+ %eh_ptrs_c = bitcast i8* %eh_ptrs to i32**
+ %eh_rec = load i32** %eh_ptrs_c
+ %eh_code = load i32* %eh_rec
+ ; EXCEPTION_INT_DIVIDE_BY_ZERO = 0xC0000094
+ %cmp = icmp eq i32 %eh_code, 3221225620
+ %filt.res = zext i1 %cmp to i32
+ ret i32 %filt.res
+}
+
+@str_result = internal constant [21 x i8] c"safe_div result: %d\0A\00"
+
+define i32 @main() {
+ %d.addr = alloca i32, align 4
+ %n.addr = alloca i32, align 4
+
+ store i32 10, i32* %n.addr, align 4
+ store i32 2, i32* %d.addr, align 4
+ %r1 = call i32 @safe_div(i32* %n.addr, i32* %d.addr)
+ call void (i8*, ...)* @printf(i8* getelementptr ([21 x i8]* @str_result, i32 0, i32 0), i32 %r1)
+
+ store i32 10, i32* %n.addr, align 4
+ store i32 0, i32* %d.addr, align 4
+ %r2 = call i32 @safe_div(i32* %n.addr, i32* %d.addr)
+ call void (i8*, ...)* @printf(i8* getelementptr ([21 x i8]* @str_result, i32 0, i32 0), i32 %r2)
+
+ %r3 = call i32 @safe_div(i32* %n.addr, i32* null)
+ call void (i8*, ...)* @printf(i8* getelementptr ([21 x i8]* @str_result, i32 0, i32 0), i32 %r3)
+ ret i32 0
+}
+
+define void @_Unwind_Resume() {
+ call void @abort()
+ unreachable
+}
+
+declare i32 @__C_specific_handler(...)
+declare i32 @llvm.eh.typeid.for(i8*) readnone nounwind
+declare void @puts(i8*)
+declare void @printf(i8*, ...)
+declare void @abort()
diff --git a/test/CodeGen/X86/selectiondag-crash.ll b/test/CodeGen/X86/selectiondag-crash.ll
new file mode 100644
index 0000000..9978902
--- /dev/null
+++ b/test/CodeGen/X86/selectiondag-crash.ll
@@ -0,0 +1,15 @@
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=corei7 < %s
+
+; Check that llc doesn't crash in the attempt to fold a shuffle with
+; a splat mask into a constant build_vector.
+
+define <8 x i8> @autogen_SD26299(i8) {
+BB:
+ %Shuff = shufflevector <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> zeroinitializer, <8 x i32> <i32 2, i32 undef, i32 6, i32 8, i32 undef, i32 12, i32 14, i32 0>
+ %Shuff14 = shufflevector <8 x i32> %Shuff, <8 x i32> %Shuff, <8 x i32> <i32 7, i32 9, i32 11, i32 undef, i32 undef, i32 1, i32 3, i32 5>
+ %Shuff35 = shufflevector <8 x i32> %Shuff14, <8 x i32> %Shuff, <8 x i32> <i32 undef, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13>
+ %I42 = insertelement <8 x i32> %Shuff35, i32 88608, i32 0
+ %Shuff48 = shufflevector <8 x i32> %Shuff35, <8 x i32> %I42, <8 x i32> <i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 2>
+ %Tr59 = trunc <8 x i32> %Shuff48 to <8 x i8>
+ ret <8 x i8> %Tr59
+}
diff --git a/test/CodeGen/X86/shrink-compare.ll b/test/CodeGen/X86/shrink-compare.ll
index fc7ee06..4ddef4c 100644
--- a/test/CodeGen/X86/shrink-compare.ll
+++ b/test/CodeGen/X86/shrink-compare.ll
@@ -89,3 +89,151 @@ if.end:
; CHECK-NOT: cmpl $1,{{.*}}x+4
; CHECK: ret
}
+
+; CHECK-LABEL: test2_1:
+; CHECK: movzbl
+; CHECK: cmpl $256
+; CHECK: jne
+define void @test2_1(i32 %X) nounwind minsize {
+entry:
+ %and = and i32 %X, 255
+ %cmp = icmp eq i32 %and, 256
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @bar() nounwind
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: test_sext_i8_icmp_1:
+; CHECK: cmpb $1, %{{dil|cl}}
+define void @test_sext_i8_icmp_1(i8 %x) nounwind minsize {
+entry:
+ %sext = sext i8 %x to i32
+ %cmp = icmp eq i32 %sext, 1
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @bar() nounwind
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: test_sext_i8_icmp_47:
+; CHECK: cmpb $47, %{{dil|cl}}
+define void @test_sext_i8_icmp_47(i8 %x) nounwind minsize {
+entry:
+ %sext = sext i8 %x to i32
+ %cmp = icmp eq i32 %sext, 47
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @bar() nounwind
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: test_sext_i8_icmp_127:
+; CHECK: cmpb $127, %{{dil|cl}}
+define void @test_sext_i8_icmp_127(i8 %x) nounwind minsize {
+entry:
+ %sext = sext i8 %x to i32
+ %cmp = icmp eq i32 %sext, 127
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @bar() nounwind
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: test_sext_i8_icmp_neg1:
+; CHECK: cmpb $-1, %{{dil|cl}}
+define void @test_sext_i8_icmp_neg1(i8 %x) nounwind minsize {
+entry:
+ %sext = sext i8 %x to i32
+ %cmp = icmp eq i32 %sext, -1
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @bar() nounwind
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: test_sext_i8_icmp_neg2:
+; CHECK: cmpb $-2, %{{dil|cl}}
+define void @test_sext_i8_icmp_neg2(i8 %x) nounwind minsize {
+entry:
+ %sext = sext i8 %x to i32
+ %cmp = icmp eq i32 %sext, -2
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @bar() nounwind
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: test_sext_i8_icmp_neg127:
+; CHECK: cmpb $-127, %{{dil|cl}}
+define void @test_sext_i8_icmp_neg127(i8 %x) nounwind minsize {
+entry:
+ %sext = sext i8 %x to i32
+ %cmp = icmp eq i32 %sext, -127
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @bar() nounwind
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: test_sext_i8_icmp_neg128:
+; CHECK: cmpb $-128, %{{dil|cl}}
+define void @test_sext_i8_icmp_neg128(i8 %x) nounwind minsize {
+entry:
+ %sext = sext i8 %x to i32
+ %cmp = icmp eq i32 %sext, -128
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @bar() nounwind
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: test_sext_i8_icmp_255:
+; CHECK: movb $1,
+; CHECK: testb
+; CHECK: jne
+define void @test_sext_i8_icmp_255(i8 %x) nounwind minsize {
+entry:
+ %sext = sext i8 %x to i32
+ %cmp = icmp eq i32 %sext, 255
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ tail call void @bar() nounwind
+ br label %if.end
+
+if.end:
+ ret void
+}
diff --git a/test/CodeGen/X86/sibcall-4.ll b/test/CodeGen/X86/sibcall-4.ll
index 980b0f7..2c7f51d 100644
--- a/test/CodeGen/X86/sibcall-4.ll
+++ b/test/CodeGen/X86/sibcall-4.ll
@@ -1,13 +1,13 @@
; RUN: llc < %s -mtriple=i386-pc-linux-gnu | FileCheck %s
; pr7610
-define cc10 void @t(i32* %Base_Arg, i32* %Sp_Arg, i32* %Hp_Arg, i32 %R1_Arg) nounwind {
+define ghccc void @t(i32* %Base_Arg, i32* %Sp_Arg, i32* %Hp_Arg, i32 %R1_Arg) nounwind {
cm1:
; CHECK-LABEL: t:
; CHECK: jmpl *%eax
%nm3 = getelementptr i32* %Sp_Arg, i32 1
%nm9 = load i32* %Sp_Arg
%nma = inttoptr i32 %nm9 to void (i32*, i32*, i32*, i32)*
- tail call cc10 void %nma(i32* %Base_Arg, i32* %nm3, i32* %Hp_Arg, i32 %R1_Arg) nounwind
+ tail call ghccc void %nma(i32* %Base_Arg, i32* %nm3, i32* %Hp_Arg, i32 %R1_Arg) nounwind
ret void
}
diff --git a/test/CodeGen/X86/sibcall-5.ll b/test/CodeGen/X86/sibcall-5.ll
index c04af23..b065cce 100644
--- a/test/CodeGen/X86/sibcall-5.ll
+++ b/test/CodeGen/X86/sibcall-5.ll
@@ -62,4 +62,4 @@ declare i8* @objc_msgSend(i8*, i8*, ...)
declare double @floor(double) optsize
-!0 = metadata !{}
+!0 = !{}
diff --git a/test/CodeGen/X86/sibcall-win64.ll b/test/CodeGen/X86/sibcall-win64.ll
new file mode 100644
index 0000000..f703872
--- /dev/null
+++ b/test/CodeGen/X86/sibcall-win64.ll
@@ -0,0 +1,42 @@
+; RUN: llc < %s -mtriple=x86_64-pc-linux | FileCheck %s
+
+declare x86_64_win64cc void @win64_callee(i32)
+declare void @sysv_callee(i32)
+
+define void @sysv_caller(i32 %p1) {
+entry:
+ tail call x86_64_win64cc void @win64_callee(i32 %p1)
+ ret void
+}
+
+; CHECK-LABEL: sysv_caller:
+; CHECK: subq $40, %rsp
+; CHECK: callq win64_callee
+; CHECK: addq $40, %rsp
+; CHECK: retq
+
+define x86_64_win64cc void @win64_caller(i32 %p1) {
+entry:
+ tail call void @sysv_callee(i32 %p1)
+ ret void
+}
+
+; CHECK-LABEL: win64_caller:
+; CHECK: callq sysv_callee
+; CHECK: retq
+
+define void @sysv_matched(i32 %p1) {
+ tail call void @sysv_callee(i32 %p1)
+ ret void
+}
+
+; CHECK-LABEL: sysv_matched:
+; CHECK: jmp sysv_callee # TAILCALL
+
+define x86_64_win64cc void @win64_matched(i32 %p1) {
+ tail call x86_64_win64cc void @win64_callee(i32 %p1)
+ ret void
+}
+
+; CHECK-LABEL: win64_matched:
+; CHECK: jmp win64_callee # TAILCALL
diff --git a/test/CodeGen/X86/sibcall.ll b/test/CodeGen/X86/sibcall.ll
index 28fc626..4256f9e 100644
--- a/test/CodeGen/X86/sibcall.ll
+++ b/test/CodeGen/X86/sibcall.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -mtriple=i686-linux -mcpu=core2 -mattr=+sse2 -asm-verbose=false | FileCheck %s -check-prefix=32
; RUN: llc < %s -mtriple=x86_64-linux -mcpu=core2 -mattr=+sse2 -asm-verbose=false | FileCheck %s -check-prefix=64
+; RUN: llc < %s -mtriple=x86_64-linux-gnux32 -mcpu=core2 -mattr=+sse2 -asm-verbose=false | FileCheck %s -check-prefix=X32ABI
define void @t1(i32 %x) nounwind ssp {
entry:
@@ -8,6 +9,9 @@ entry:
; 64-LABEL: t1:
; 64: jmp {{_?}}foo
+
+; X32ABI-LABEL: t1:
+; X32ABI: jmp {{_?}}foo
tail call void @foo() nounwind
ret void
}
@@ -21,6 +25,9 @@ entry:
; 64-LABEL: t2:
; 64: jmp {{_?}}foo2
+
+; X32ABI-LABEL: t2:
+; X32ABI: jmp {{_?}}foo2
%0 = tail call i32 @foo2() nounwind
ret void
}
@@ -34,6 +41,9 @@ entry:
; 64-LABEL: t3:
; 64: jmp {{_?}}foo3
+
+; X32ABI-LABEL: t3:
+; X32ABI: jmp {{_?}}foo3
%0 = tail call i32 @foo3() nounwind
ret void
}
@@ -49,6 +59,10 @@ entry:
; 64-LABEL: t4:
; 64-NOT: call
; 64: jmpq *
+
+; X32ABI-LABEL: t4:
+; X32ABI-NOT: call
+; X32ABI: jmpq *
tail call void %x(i32 0) nounwind
ret void
}
@@ -62,6 +76,13 @@ entry:
; 64-LABEL: t5:
; 64-NOT: call
; 64: jmpq *%rdi
+
+; X32ABI-LABEL: t5:
+; X32ABI-NOT: call
+; FIXME: This isn't needed since x32 psABI specifies that callers must
+; zero-extend pointers passed in registers.
+; X32ABI: movl %edi, %eax
+; X32ABI: jmpq *%rax
tail call void %x() nounwind
ret void
}
@@ -75,6 +96,10 @@ entry:
; 64-LABEL: t6:
; 64: jmp {{_?}}t6
; 64: jmp {{_?}}bar
+
+; X32ABI-LABEL: t6:
+; X32ABI: jmp {{_?}}t6
+; X32ABI: jmp {{_?}}bar
%0 = icmp slt i32 %x, 10
br i1 %0, label %bb, label %bb1
@@ -97,6 +122,9 @@ entry:
; 64-LABEL: t7:
; 64: jmp {{_?}}bar2
+
+; X32ABI-LABEL: t7:
+; X32ABI: jmp {{_?}}bar2
%0 = tail call i32 @bar2(i32 %a, i32 %b, i32 %c) nounwind
ret i32 %0
}
@@ -110,6 +138,9 @@ entry:
; 64-LABEL: t8:
; 64: jmp {{_?}}bar3
+
+; X32ABI-LABEL: t8:
+; X32ABI: jmp {{_?}}bar3
%0 = tail call signext i16 @bar3() nounwind ; <i16> [#uses=1]
ret i16 %0
}
@@ -123,6 +154,9 @@ entry:
; 64-LABEL: t9:
; 64: jmpq *
+
+; X32ABI-LABEL: t9:
+; X32ABI: jmpq *
%0 = bitcast i32 (i32)* %x to i16 (i32)*
%1 = tail call signext i16 %0(i32 0) nounwind
ret i16 %1
@@ -135,6 +169,9 @@ entry:
; 64-LABEL: t10:
; 64: callq
+
+; X32ABI-LABEL: t10:
+; X32ABI: callq
%0 = tail call i32 @foo4() noreturn nounwind
unreachable
}
@@ -153,9 +190,14 @@ define i32 @t11(i32 %x, i32 %y, i32 %z.0, i32 %z.1, i32 %z.2) nounwind ssp {
; 32: jmp {{_?}}foo5
; 64-LABEL: t11:
-; 64-NOT: subq ${{[0-9]+}}, %esp
-; 64-NOT: addq ${{[0-9]+}}, %esp
+; 64-NOT: subq ${{[0-9]+}}, %rsp
+; 64-NOT: addq ${{[0-9]+}}, %rsp
; 64: jmp {{_?}}foo5
+
+; X32ABI-LABEL: t11:
+; X32ABI-NOT: subl ${{[0-9]+}}, %esp
+; X32ABI-NOT: addl ${{[0-9]+}}, %esp
+; X32ABI: jmp {{_?}}foo5
entry:
%0 = icmp eq i32 %x, 0
br i1 %0, label %bb6, label %bb
@@ -179,9 +221,14 @@ define i32 @t12(i32 %x, i32 %y, %struct.t* byval align 4 %z) nounwind ssp {
; 32: jmp {{_?}}foo6
; 64-LABEL: t12:
-; 64-NOT: subq ${{[0-9]+}}, %esp
-; 64-NOT: addq ${{[0-9]+}}, %esp
+; 64-NOT: subq ${{[0-9]+}}, %rsp
+; 64-NOT: addq ${{[0-9]+}}, %rsp
; 64: jmp {{_?}}foo6
+
+; X32ABI-LABEL: t12:
+; X32ABI-NOT: subl ${{[0-9]+}}, %esp
+; X32ABI-NOT: addl ${{[0-9]+}}, %esp
+; X32ABI: jmp {{_?}}foo6
entry:
%0 = icmp eq i32 %x, 0
br i1 %0, label %bb2, label %bb
@@ -210,6 +257,11 @@ define %struct.ns* @t13(%struct.cp* %yy) nounwind ssp {
; 64-NOT: jmp
; 64: callq
; 64: ret
+
+; X32ABI-LABEL: t13:
+; X32ABI-NOT: jmp
+; X32ABI: callq
+; X32ABI: ret
entry:
%0 = tail call fastcc %struct.ns* @foo7(%struct.cp* byval align 4 %yy, i8 signext 0) nounwind
ret %struct.ns* %0
@@ -230,6 +282,11 @@ entry:
; 64: movq 32(%rdi)
; 64-NOT: movq 16(%rdi)
; 64: jmpq *16({{%rdi|%rax}})
+
+; X32ABI-LABEL: t14:
+; X32ABI: movl 20(%edi), %edi
+; X32ABI-NEXT: movl 12(%edi), %eax
+; X32ABI-NEXT: jmpq *%rax
%0 = getelementptr inbounds %struct.__block_literal_2* %.block_descriptor, i64 0, i32 5 ; <void ()**> [#uses=1]
%1 = load void ()** %0, align 8 ; <void ()*> [#uses=2]
%2 = bitcast void ()* %1 to %struct.__block_literal_1* ; <%struct.__block_literal_1*> [#uses=1]
@@ -252,6 +309,10 @@ define void @t15(%struct.foo* noalias sret %agg.result) nounwind {
; 64-LABEL: t15:
; 64: callq {{_?}}f
; 64: retq
+
+; X32ABI-LABEL: t15:
+; X32ABI: callq {{_?}}f
+; X32ABI: retq
tail call fastcc void @f(%struct.foo* noalias sret %agg.result) nounwind
ret void
}
@@ -266,6 +327,9 @@ entry:
; 64-LABEL: t16:
; 64: jmp {{_?}}bar4
+
+; X32ABI-LABEL: t16:
+; X32ABI: jmp {{_?}}bar4
%0 = tail call double @bar4() nounwind
ret void
}
@@ -281,6 +345,10 @@ entry:
; 64-LABEL: t17:
; 64: xorl %eax, %eax
; 64: jmp {{_?}}bar5
+
+; X32ABI-LABEL: t17:
+; X32ABI: xorl %eax, %eax
+; X32ABI: jmp {{_?}}bar5
tail call void (...)* @bar5() nounwind
ret void
}
@@ -297,6 +365,10 @@ entry:
; 64-LABEL: t18:
; 64: xorl %eax, %eax
; 64: jmp {{_?}}bar6
+
+; X32ABI-LABEL: t18:
+; X32ABI: xorl %eax, %eax
+; X32ABI: jmp {{_?}}bar6
%0 = tail call double (...)* @bar6() nounwind
ret void
}
@@ -308,6 +380,10 @@ entry:
; CHECK-LABEL: t19:
; CHECK: andl $-32
; CHECK: calll {{_?}}foo
+
+; X32ABI-LABEL: t19:
+; X32ABI: andl $-32
+; X32ABI: callq {{_?}}foo
tail call void @foo() nounwind
ret void
}
@@ -324,6 +400,9 @@ entry:
; 64-LABEL: t20:
; 64: jmp {{_?}}foo20
+
+; X32ABI-LABEL: t20:
+; X32ABI: jmp {{_?}}foo20
%0 = tail call fastcc double @foo20(double %x) nounwind
ret double %0
}
diff --git a/test/CodeGen/X86/sincos-opt.ll b/test/CodeGen/X86/sincos-opt.ll
index 1e34a2b..9d02bcd 100644
--- a/test/CodeGen/X86/sincos-opt.ll
+++ b/test/CodeGen/X86/sincos-opt.ll
@@ -15,9 +15,8 @@ entry:
; OSX_SINCOS-LABEL: test1:
; OSX_SINCOS: callq ___sincosf_stret
-; OSX_SINCOS: movaps %xmm0, %xmm1
-; OSX_SINCOS: shufps {{.*}} ## xmm1 = xmm1[1,1,2,3]
-; OSX_SINCOS: addss %xmm0, %xmm1
+; OSX_SINCOS: movshdup {{.*}} xmm1 = xmm0[1,1,3,3]
+; OSX_SINCOS: addss %xmm1, %xmm0
; OSX_NOOPT: test1
; OSX_NOOPT: callq _sinf
diff --git a/test/CodeGen/X86/sink-blockfreq.ll b/test/CodeGen/X86/sink-blockfreq.ll
index 6e3a003..c2f0411 100644
--- a/test/CodeGen/X86/sink-blockfreq.ll
+++ b/test/CodeGen/X86/sink-blockfreq.ll
@@ -40,6 +40,6 @@ exit:
ret i32 0
}
-!0 = metadata !{metadata !"branch_weights", i32 4, i32 1}
-!1 = metadata !{metadata !"branch_weights", i32 128, i32 1}
-!2 = metadata !{metadata !"branch_weights", i32 1, i32 1}
+!0 = !{!"branch_weights", i32 4, i32 1}
+!1 = !{!"branch_weights", i32 128, i32 1}
+!2 = !{!"branch_weights", i32 1, i32 1}
diff --git a/test/CodeGen/X86/sink-hoist.ll b/test/CodeGen/X86/sink-hoist.ll
index 64f5311..455cf24 100644
--- a/test/CodeGen/X86/sink-hoist.ll
+++ b/test/CodeGen/X86/sink-hoist.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86-64 -asm-verbose=false -mtriple=x86_64-unknown-linux-gnu -mcpu=nehalem -post-RA-scheduler=true -schedmodel=false | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -march=x86-64 -asm-verbose=false -mtriple=x86_64-unknown-linux-gnu -mcpu=nehalem -post-RA-scheduler=true -schedmodel=false | FileCheck %s
; Currently, floating-point selects are lowered to CFG triangles.
; This means that one side of the select is always unconditionally
diff --git a/test/CodeGen/X86/sjlj-baseptr.ll b/test/CodeGen/X86/sjlj-baseptr.ll
new file mode 100644
index 0000000..e439ff4
--- /dev/null
+++ b/test/CodeGen/X86/sjlj-baseptr.ll
@@ -0,0 +1,37 @@
+; RUN: llc < %s -mtriple=i386-pc-linux -mcpu=corei7 -relocation-model=static | FileCheck --check-prefix=X86 %s
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=corei7 -relocation-model=static | FileCheck --check-prefix=X64 %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+%Foo = type { [125 x i8] }
+
+declare i32 @llvm.eh.sjlj.setjmp(i8*) nounwind
+
+declare void @whatever(i64, %Foo*, i8**, i8*, i8*, i32) #0
+
+attributes #0 = { nounwind uwtable "no-frame-pointer-elim"="true" }
+
+define i32 @test1(i64 %n, %Foo* byval nocapture readnone align 8 %f) #0 {
+entry:
+ %buf = alloca [5 x i8*], align 16
+ %p = alloca i8*, align 8
+ %q = alloca i8, align 64
+ %r = bitcast [5 x i8*]* %buf to i8*
+ %s = alloca i8, i64 %n, align 1
+ store i8* %s, i8** %p, align 8
+ %t = call i32 @llvm.eh.sjlj.setjmp(i8* %s)
+ call void @whatever(i64 %n, %Foo* %f, i8** %p, i8* %q, i8* %s, i32 %t) #1
+ ret i32 0
+; X86: movl %esp, %esi
+; X86: movl %esp, -16(%ebp)
+; X86: {{.LBB.*:}}
+; X86: movl -16(%ebp), %esi
+; X86: {{.LBB.*:}}
+; X64: movq %rsp, %rbx
+; X64: movq %rsp, -48(%rbp)
+; X64: {{.LBB.*:}}
+; X64: movq -48(%rbp), %rbx
+; X64: {{.LBB.*:}}
+}
+
+
diff --git a/test/CodeGen/X86/slow-div.ll b/test/CodeGen/X86/slow-div.ll
new file mode 100644
index 0000000..5222382
--- /dev/null
+++ b/test/CodeGen/X86/slow-div.ll
@@ -0,0 +1,28 @@
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+idivl-to-divb < %s | FileCheck -check-prefix=DIV32 %s
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+idivq-to-divw < %s | FileCheck -check-prefix=DIV64 %s
+
+define i32 @div32(i32 %a, i32 %b) {
+entry:
+; DIV32-LABEL: div32:
+; DIV32: orl %{{.*}}, [[REG:%[a-z]+]]
+; DIV32: testl $-256, [[REG]]
+; DIV32: divb
+; DIV64-LABEL: div32:
+; DIV64-NOT: divb
+ %div = sdiv i32 %a, %b
+ ret i32 %div
+}
+
+define i64 @div64(i64 %a, i64 %b) {
+entry:
+; DIV32-LABEL: div64:
+; DIV32-NOT: divw
+; DIV64-LABEL: div64:
+; DIV64: orq %{{.*}}, [[REG:%[a-z]+]]
+; DIV64: testq $-65536, [[REG]]
+; DIV64: divw
+ %div = sdiv i64 %a, %b
+ ret i64 %div
+}
+
+
diff --git a/test/CodeGen/X86/slow-incdec.ll b/test/CodeGen/X86/slow-incdec.ll
index 541d992..323e3ae 100644
--- a/test/CodeGen/X86/slow-incdec.ll
+++ b/test/CodeGen/X86/slow-incdec.ll
@@ -74,7 +74,7 @@ for.end: ; preds = %for.end.loopexit, %
ret i32 %i.0.lcssa
}
-!1 = metadata !{metadata !2, metadata !2, i64 0}
-!2 = metadata !{metadata !"int", metadata !3, i64 0}
-!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
-!4 = metadata !{metadata !"Simple C/C++ TBAA"}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/X86/small-byval-memcpy.ll b/test/CodeGen/X86/small-byval-memcpy.ll
index 1b596b5..3c03750 100644
--- a/test/CodeGen/X86/small-byval-memcpy.ll
+++ b/test/CodeGen/X86/small-byval-memcpy.ll
@@ -1,20 +1,25 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=core2 | grep movsd | count 8
-; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=nehalem | grep movups | count 2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core2 | FileCheck %s --check-prefix=CORE2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=nehalem | FileCheck %s --check-prefix=NEHALEM
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=btver2 | FileCheck %s --check-prefix=BTVER2
-define void @ccosl({ x86_fp80, x86_fp80 }* noalias sret %agg.result, { x86_fp80, x86_fp80 }* byval align 4 %z) nounwind {
-entry:
- %iz = alloca { x86_fp80, x86_fp80 } ; <{ x86_fp80, x86_fp80 }*> [#uses=3]
- %tmp1 = getelementptr { x86_fp80, x86_fp80 }* %z, i32 0, i32 1 ; <x86_fp80*> [#uses=1]
- %tmp2 = load x86_fp80* %tmp1, align 16 ; <x86_fp80> [#uses=1]
- %tmp3 = fsub x86_fp80 0xK80000000000000000000, %tmp2 ; <x86_fp80> [#uses=1]
- %tmp4 = getelementptr { x86_fp80, x86_fp80 }* %iz, i32 0, i32 1 ; <x86_fp80*> [#uses=1]
- %real = getelementptr { x86_fp80, x86_fp80 }* %iz, i32 0, i32 0 ; <x86_fp80*> [#uses=1]
- %tmp6 = getelementptr { x86_fp80, x86_fp80 }* %z, i32 0, i32 0 ; <x86_fp80*> [#uses=1]
- %tmp7 = load x86_fp80* %tmp6, align 16 ; <x86_fp80> [#uses=1]
- store x86_fp80 %tmp3, x86_fp80* %real, align 16
- store x86_fp80 %tmp7, x86_fp80* %tmp4, align 16
- call void @ccoshl( { x86_fp80, x86_fp80 }* noalias sret %agg.result, { x86_fp80, x86_fp80 }* byval align 4 %iz ) nounwind
- ret void
-}
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
+
+define void @copy16bytes(i8* nocapture %a, i8* nocapture readonly %b) {
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 16, i32 1, i1 false)
+ ret void
+
+ ; CHECK-LABEL: copy16bytes
+ ; CORE2: movq
+ ; CORE2-NEXT: movq
+ ; CORE2-NEXT: movq
+ ; CORE2-NEXT: movq
+ ; CORE2-NEXT: retq
-declare void @ccoshl({ x86_fp80, x86_fp80 }* noalias sret , { x86_fp80, x86_fp80 }* byval align 4 ) nounwind
+ ; NEHALEM: movups
+ ; NEHALEM-NEXT: movups
+ ; NEHALEM-NEXT: retq
+
+ ; BTVER2: movups
+ ; BTVER2-NEXT: movups
+ ; BTVER2-NEXT: retq
+}
diff --git a/test/CodeGen/X86/splat-const.ll b/test/CodeGen/X86/splat-const.ll
new file mode 100644
index 0000000..19997b0
--- /dev/null
+++ b/test/CodeGen/X86/splat-const.ll
@@ -0,0 +1,40 @@
+; RUN: llc < %s -mcpu=penryn | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mcpu=sandybridge | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mcpu=haswell | FileCheck %s --check-prefix=AVX2
+; This checks that lowering for creation of constant vectors is sane and
+; doesn't use redundant shuffles. (fixes PR22276)
+target triple = "x86_64-unknown-unknown"
+
+define <4 x i32> @zero_vector() {
+; SSE-LABEL: zero_vector:
+; SSE: xorps %xmm0, %xmm0
+; SSE-NEXT: retq
+; AVX-LABEL: zero_vector:
+; AVX: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+; AVX2-LABEL: zero_vector:
+; AVX2: vxorps %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: retq
+ %zero = insertelement <4 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <4 x i32> %zero, <4 x i32> undef, <4 x i32> zeroinitializer
+ ret <4 x i32> %splat
+}
+
+; Note that for the "const_vector" versions, lowering that uses a shuffle
+; instead of a load would be legitimate, if it's a single broadcast shuffle.
+; (as opposed to the previous mess)
+; However, this is not the current preferred lowering.
+define <4 x i32> @const_vector() {
+; SSE-LABEL: const_vector:
+; SSE: movaps {{.*}}, %xmm0 # xmm0 = [42,42,42,42]
+; SSE-NEXT: retq
+; AVX-LABEL: const_vector:
+; AVX: vmovaps {{.*}}, %xmm0 # xmm0 = [42,42,42,42]
+; AVX-NEXT: retq
+; AVX2-LABEL: const_vector:
+; AVX2: vbroadcastss {{[^%].*}}, %xmm0
+; AVX2-NEXT: retq
+ %const = insertelement <4 x i32> undef, i32 42, i32 0
+ %splat = shufflevector <4 x i32> %const, <4 x i32> undef, <4 x i32> zeroinitializer
+ ret <4 x i32> %splat
+}
diff --git a/test/CodeGen/X86/sret-implicit.ll b/test/CodeGen/X86/sret-implicit.ll
new file mode 100644
index 0000000..3fade1d
--- /dev/null
+++ b/test/CodeGen/X86/sret-implicit.ll
@@ -0,0 +1,10 @@
+; RUN: llc -mtriple=x86_64-apple-darwin8 < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-pc-linux < %s | FileCheck %s
+
+; CHECK-LABEL: return32
+; CHECK-DAG: movq $0, (%rdi)
+; CHECK-DAG: movq %rdi, %rax
+; CHECK: retq
+define i256 @return32() {
+ ret i256 0
+}
diff --git a/test/CodeGen/X86/sse-domains.ll b/test/CodeGen/X86/sse-domains.ll
index 168959a..8cf522d 100644
--- a/test/CodeGen/X86/sse-domains.ll
+++ b/test/CodeGen/X86/sse-domains.ll
@@ -43,45 +43,3 @@ while.body:
while.end:
ret void
}
-
-; CHECK: f2
-; CHECK: for.body
-;
-; This loop contains two cvtsi2ss instructions that update the same xmm
-; register. Verify that the execution dependency fix pass breaks those
-; dependencies by inserting xorps instructions.
-;
-; If the register allocator chooses different registers for the two cvtsi2ss
-; instructions, they are still dependent on themselves.
-; CHECK: xorps [[XMM1:%xmm[0-9]+]]
-; CHECK: , [[XMM1]]
-; CHECK: cvtsi2ssl %{{.*}}, [[XMM1]]
-; CHECK: xorps [[XMM2:%xmm[0-9]+]]
-; CHECK: , [[XMM2]]
-; CHECK: cvtsi2ssl %{{.*}}, [[XMM2]]
-;
-define float @f2(i32 %m) nounwind uwtable readnone ssp {
-entry:
- %tobool3 = icmp eq i32 %m, 0
- br i1 %tobool3, label %for.end, label %for.body
-
-for.body: ; preds = %entry, %for.body
- %m.addr.07 = phi i32 [ %dec, %for.body ], [ %m, %entry ]
- %s1.06 = phi float [ %add, %for.body ], [ 0.000000e+00, %entry ]
- %s2.05 = phi float [ %add2, %for.body ], [ 0.000000e+00, %entry ]
- %n.04 = phi i32 [ %inc, %for.body ], [ 1, %entry ]
- %conv = sitofp i32 %n.04 to float
- %add = fadd float %s1.06, %conv
- %conv1 = sitofp i32 %m.addr.07 to float
- %add2 = fadd float %s2.05, %conv1
- %inc = add nsw i32 %n.04, 1
- %dec = add nsw i32 %m.addr.07, -1
- %tobool = icmp eq i32 %dec, 0
- br i1 %tobool, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- %s1.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
- %s2.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %add2, %for.body ]
- %sub = fsub float %s1.0.lcssa, %s2.0.lcssa
- ret float %sub
-}
diff --git a/test/CodeGen/X86/sse-minmax.ll b/test/CodeGen/X86/sse-minmax.ll
index da36a42..4dcb54c 100644
--- a/test/CodeGen/X86/sse-minmax.ll
+++ b/test/CodeGen/X86/sse-minmax.ll
@@ -803,11 +803,18 @@ define double @ule_inverse_y(double %x) nounwind {
; Test a few more misc. cases.
; CHECK-LABEL: clampTo3k_a:
-; CHECK: minsd
+; CHECK-NEXT: movsd {{[^,]*}}, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: ret
; UNSAFE-LABEL: clampTo3k_a:
-; UNSAFE: minsd
+; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0
+; UNSAFE-NEXT: ret
; FINITE-LABEL: clampTo3k_a:
-; FINITE: minsd
+; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
+; FINITE-NEXT: minsd %xmm0, %xmm1
+; FINITE-NEXT: movaps %xmm1, %xmm0
+; FINITE-NEXT: ret
define double @clampTo3k_a(double %x) nounwind readnone {
entry:
%0 = fcmp ogt double %x, 3.000000e+03 ; <i1> [#uses=1]
@@ -816,11 +823,16 @@ entry:
}
; CHECK-LABEL: clampTo3k_b:
-; CHECK: minsd
+; CHECK-NEXT: minsd {{[^,]*}}, %xmm0
+; CHECK-NEXT: ret
; UNSAFE-LABEL: clampTo3k_b:
-; UNSAFE: minsd
+; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0
+; UNSAFE-NEXT: ret
; FINITE-LABEL: clampTo3k_b:
-; FINITE: minsd
+; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
+; FINITE-NEXT: minsd %xmm0, %xmm1
+; FINITE-NEXT: movaps %xmm1, %xmm0
+; FINITE-NEXT: ret
define double @clampTo3k_b(double %x) nounwind readnone {
entry:
%0 = fcmp uge double %x, 3.000000e+03 ; <i1> [#uses=1]
@@ -829,11 +841,18 @@ entry:
}
; CHECK-LABEL: clampTo3k_c:
-; CHECK: maxsd
+; CHECK-NEXT: movsd {{[^,]*}}, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: ret
; UNSAFE-LABEL: clampTo3k_c:
-; UNSAFE: maxsd
+; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0
+; UNSAFE-NEXT: ret
; FINITE-LABEL: clampTo3k_c:
-; FINITE: maxsd
+; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
+; FINITE-NEXT: maxsd %xmm0, %xmm1
+; FINITE-NEXT: movaps %xmm1, %xmm0
+; FINITE-NEXT: ret
define double @clampTo3k_c(double %x) nounwind readnone {
entry:
%0 = fcmp olt double %x, 3.000000e+03 ; <i1> [#uses=1]
@@ -842,11 +861,16 @@ entry:
}
; CHECK-LABEL: clampTo3k_d:
-; CHECK: maxsd
+; CHECK-NEXT: maxsd {{[^,]*}}, %xmm0
+; CHECK-NEXT: ret
; UNSAFE-LABEL: clampTo3k_d:
-; UNSAFE: maxsd
+; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0
+; UNSAFE-NEXT: ret
; FINITE-LABEL: clampTo3k_d:
-; FINITE: maxsd
+; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
+; FINITE-NEXT: maxsd %xmm0, %xmm1
+; FINITE-NEXT: movaps %xmm1, %xmm0
+; FINITE-NEXT: ret
define double @clampTo3k_d(double %x) nounwind readnone {
entry:
%0 = fcmp ule double %x, 3.000000e+03 ; <i1> [#uses=1]
@@ -855,11 +879,18 @@ entry:
}
; CHECK-LABEL: clampTo3k_e:
-; CHECK: maxsd
+; CHECK-NEXT: movsd {{[^,]*}}, %xmm1
+; CHECK-NEXT: maxsd %xmm0, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: ret
; UNSAFE-LABEL: clampTo3k_e:
-; UNSAFE: maxsd
+; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0
+; UNSAFE-NEXT: ret
; FINITE-LABEL: clampTo3k_e:
-; FINITE: maxsd
+; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
+; FINITE-NEXT: maxsd %xmm0, %xmm1
+; FINITE-NEXT: movaps %xmm1, %xmm0
+; FINITE-NEXT: ret
define double @clampTo3k_e(double %x) nounwind readnone {
entry:
%0 = fcmp olt double %x, 3.000000e+03 ; <i1> [#uses=1]
@@ -868,11 +899,16 @@ entry:
}
; CHECK-LABEL: clampTo3k_f:
-; CHECK: maxsd
+; CHECK-NEXT: maxsd {{[^,]*}}, %xmm0
+; CHECK-NEXT: ret
; UNSAFE-LABEL: clampTo3k_f:
-; UNSAFE: maxsd
+; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0
+; UNSAFE-NEXT: ret
; FINITE-LABEL: clampTo3k_f:
-; FINITE: maxsd
+; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
+; FINITE-NEXT: maxsd %xmm0, %xmm1
+; FINITE-NEXT: movaps %xmm1, %xmm0
+; FINITE-NEXT: ret
define double @clampTo3k_f(double %x) nounwind readnone {
entry:
%0 = fcmp ule double %x, 3.000000e+03 ; <i1> [#uses=1]
@@ -881,11 +917,18 @@ entry:
}
; CHECK-LABEL: clampTo3k_g:
-; CHECK: minsd
+; CHECK-NEXT: movsd {{[^,]*}}, %xmm1
+; CHECK-NEXT: minsd %xmm0, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: ret
; UNSAFE-LABEL: clampTo3k_g:
-; UNSAFE: minsd
+; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0
+; UNSAFE-NEXT: ret
; FINITE-LABEL: clampTo3k_g:
-; FINITE: minsd
+; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
+; FINITE-NEXT: minsd %xmm0, %xmm1
+; FINITE-NEXT: movaps %xmm1, %xmm0
+; FINITE-NEXT: ret
define double @clampTo3k_g(double %x) nounwind readnone {
entry:
%0 = fcmp ogt double %x, 3.000000e+03 ; <i1> [#uses=1]
@@ -894,11 +937,16 @@ entry:
}
; CHECK-LABEL: clampTo3k_h:
-; CHECK: minsd
+; CHECK-NEXT: minsd {{[^,]*}}, %xmm0
+; CHECK-NEXT: ret
; UNSAFE-LABEL: clampTo3k_h:
-; UNSAFE: minsd
+; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0
+; UNSAFE-NEXT: ret
; FINITE-LABEL: clampTo3k_h:
-; FINITE: minsd
+; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
+; FINITE-NEXT: minsd %xmm0, %xmm1
+; FINITE-NEXT: movaps %xmm1, %xmm0
+; FINITE-NEXT: ret
define double @clampTo3k_h(double %x) nounwind readnone {
entry:
%0 = fcmp uge double %x, 3.000000e+03 ; <i1> [#uses=1]
@@ -907,33 +955,73 @@ entry:
}
; UNSAFE-LABEL: test_maxpd:
-; UNSAFE: maxpd
-define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) {
+; UNSAFE-NEXT: maxpd %xmm1, %xmm0
+; UNSAFE-NEXT: ret
+define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) nounwind {
%max_is_x = fcmp oge <2 x double> %x, %y
%max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %max
}
; UNSAFE-LABEL: test_minpd:
-; UNSAFE: minpd
-define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) {
+; UNSAFE-NEXT: minpd %xmm1, %xmm0
+; UNSAFE-NEXT: ret
+define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) nounwind {
%min_is_x = fcmp ole <2 x double> %x, %y
%min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %min
}
; UNSAFE-LABEL: test_maxps:
-; UNSAFE: maxps
-define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) {
+; UNSAFE-NEXT: maxps %xmm1, %xmm0
+; UNSAFE-NEXT: ret
+define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) nounwind {
%max_is_x = fcmp oge <4 x float> %x, %y
%max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
ret <4 x float> %max
}
; UNSAFE-LABEL: test_minps:
-; UNSAFE: minps
-define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) {
+; UNSAFE-NEXT: minps %xmm1, %xmm0
+; UNSAFE-NEXT: ret
+define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) nounwind {
%min_is_x = fcmp ole <4 x float> %x, %y
%min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
ret <4 x float> %min
}
+
+; UNSAFE-LABEL: test_maxps_illegal_v2f32:
+; UNSAFE-NEXT: maxps %xmm1, %xmm0
+; UNSAFE-NEXT: ret
+define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y) nounwind {
+ %max_is_x = fcmp oge <2 x float> %x, %y
+ %max = select <2 x i1> %max_is_x, <2 x float> %x, <2 x float> %y
+ ret <2 x float> %max
+}
+
+; UNSAFE-LABEL: test_minps_illegal_v2f32:
+; UNSAFE-NEXT: minps %xmm1, %xmm0
+; UNSAFE-NEXT: ret
+define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y) nounwind {
+ %min_is_x = fcmp ole <2 x float> %x, %y
+ %min = select <2 x i1> %min_is_x, <2 x float> %x, <2 x float> %y
+ ret <2 x float> %min
+}
+
+; UNSAFE-LABEL: test_maxps_illegal_v3f32:
+; UNSAFE-NEXT: maxps %xmm1, %xmm0
+; UNSAFE-NEXT: ret
+define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y) nounwind {
+ %max_is_x = fcmp oge <3 x float> %x, %y
+ %max = select <3 x i1> %max_is_x, <3 x float> %x, <3 x float> %y
+ ret <3 x float> %max
+}
+
+; UNSAFE-LABEL: test_minps_illegal_v3f32:
+; UNSAFE-NEXT: minps %xmm1, %xmm0
+; UNSAFE-NEXT: ret
+define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y) nounwind {
+ %min_is_x = fcmp ole <3 x float> %x, %y
+ %min = select <3 x i1> %min_is_x, <3 x float> %x, <3 x float> %y
+ ret <3 x float> %min
+}
diff --git a/test/CodeGen/X86/sse-scalar-fp-arith.ll b/test/CodeGen/X86/sse-scalar-fp-arith.ll
index b122ef6..8b1c6d0 100644
--- a/test/CodeGen/X86/sse-scalar-fp-arith.ll
+++ b/test/CodeGen/X86/sse-scalar-fp-arith.ll
@@ -370,8 +370,155 @@ define <4 x float> @test_multiple_div_ss(<4 x float> %a, <4 x float> %b) {
ret <4 x float> %3
}
+; With SSE4.1 or greater, the shuffles in the following tests may
+; be lowered to X86Blendi nodes.
+
+define <4 x float> @blend_add_ss(<4 x float> %a, float %b) {
+; SSE-LABEL: blend_add_ss:
+; SSE: # BB#0:
+; SSE-NEXT: addss %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: blend_add_ss:
+; AVX: # BB#0:
+; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+
+ %ext = extractelement <4 x float> %a, i32 0
+ %op = fadd float %b, %ext
+ %ins = insertelement <4 x float> undef, float %op, i32 0
+ %shuf = shufflevector <4 x float> %ins, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x float> %shuf
+}
+
+define <4 x float> @blend_sub_ss(<4 x float> %a, float %b) {
+; SSE-LABEL: blend_sub_ss:
+; SSE: # BB#0:
+; SSE-NEXT: subss %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: blend_sub_ss:
+; AVX: # BB#0:
+; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+
+ %ext = extractelement <4 x float> %a, i32 0
+ %op = fsub float %ext, %b
+ %ins = insertelement <4 x float> undef, float %op, i32 0
+ %shuf = shufflevector <4 x float> %ins, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x float> %shuf
+}
+
+define <4 x float> @blend_mul_ss(<4 x float> %a, float %b) {
+; SSE-LABEL: blend_mul_ss:
+; SSE: # BB#0:
+; SSE-NEXT: mulss %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: blend_mul_ss:
+; AVX: # BB#0:
+; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+
+ %ext = extractelement <4 x float> %a, i32 0
+ %op = fmul float %b, %ext
+ %ins = insertelement <4 x float> undef, float %op, i32 0
+ %shuf = shufflevector <4 x float> %ins, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x float> %shuf
+}
+
+define <4 x float> @blend_div_ss(<4 x float> %a, float %b) {
+; SSE-LABEL: blend_div_ss:
+; SSE: # BB#0:
+; SSE-NEXT: divss %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: blend_div_ss:
+; AVX: # BB#0:
+; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+
+ %ext = extractelement <4 x float> %a, i32 0
+ %op = fdiv float %ext, %b
+ %ins = insertelement <4 x float> undef, float %op, i32 0
+ %shuf = shufflevector <4 x float> %ins, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ ret <4 x float> %shuf
+}
+
+define <2 x double> @blend_add_sd(<2 x double> %a, double %b) {
+; SSE-LABEL: blend_add_sd:
+; SSE: # BB#0:
+; SSE-NEXT: addsd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: blend_add_sd:
+; AVX: # BB#0:
+; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+
+ %ext = extractelement <2 x double> %a, i32 0
+ %op = fadd double %b, %ext
+ %ins = insertelement <2 x double> undef, double %op, i32 0
+ %shuf = shufflevector <2 x double> %ins, <2 x double> %a, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %shuf
+}
+
+define <2 x double> @blend_sub_sd(<2 x double> %a, double %b) {
+; SSE-LABEL: blend_sub_sd:
+; SSE: # BB#0:
+; SSE-NEXT: subsd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: blend_sub_sd:
+; AVX: # BB#0:
+; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+
+ %ext = extractelement <2 x double> %a, i32 0
+ %op = fsub double %ext, %b
+ %ins = insertelement <2 x double> undef, double %op, i32 0
+ %shuf = shufflevector <2 x double> %ins, <2 x double> %a, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %shuf
+}
+
+define <2 x double> @blend_mul_sd(<2 x double> %a, double %b) {
+; SSE-LABEL: blend_mul_sd:
+; SSE: # BB#0:
+; SSE-NEXT: mulsd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: blend_mul_sd:
+; AVX: # BB#0:
+; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+
+ %ext = extractelement <2 x double> %a, i32 0
+ %op = fmul double %b, %ext
+ %ins = insertelement <2 x double> undef, double %op, i32 0
+ %shuf = shufflevector <2 x double> %ins, <2 x double> %a, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %shuf
+}
+
+define <2 x double> @blend_div_sd(<2 x double> %a, double %b) {
+; SSE-LABEL: blend_div_sd:
+; SSE: # BB#0:
+; SSE-NEXT: divsd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: blend_div_sd:
+; AVX: # BB#0:
+; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+
+ %ext = extractelement <2 x double> %a, i32 0
+ %op = fdiv double %ext, %b
+ %ins = insertelement <2 x double> undef, double %op, i32 0
+ %shuf = shufflevector <2 x double> %ins, <2 x double> %a, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %shuf
+}
+
; Ensure that the backend selects SSE/AVX scalar fp instructions
-; from a packed fp instrution plus a vector insert.
+; from a packed fp instruction plus a vector insert.
define <4 x float> @insert_test_add_ss(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: insert_test_add_ss:
diff --git a/test/CodeGen/X86/2010-01-07-UAMemFeature.ll b/test/CodeGen/X86/sse-unaligned-mem-feature.ll
index bb24adb..bb55829 100644
--- a/test/CodeGen/X86/2010-01-07-UAMemFeature.ll
+++ b/test/CodeGen/X86/sse-unaligned-mem-feature.ll
@@ -1,5 +1,4 @@
-; RUN: llc -mcpu=yonah -mattr=vector-unaligned-mem -march=x86 < %s | FileCheck %s
-; CHECK: addps (
+; RUN: llc -mcpu=yonah -mattr=sse-unaligned-mem -march=x86 < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"
@@ -8,4 +7,7 @@ define <4 x float> @foo(<4 x float>* %P, <4 x float> %In) nounwind {
%A = load <4 x float>* %P, align 4
%B = fadd <4 x float> %A, %In
ret <4 x float> %B
+
+; CHECK-LABEL: @foo
+; CHECK: addps (%eax), %xmm0
}
diff --git a/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
new file mode 100644
index 0000000..b0412b9
--- /dev/null
+++ b/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86 -mcpu=pentium4 -mattr=sse2 | FileCheck %s
+
+define <2 x i64> @test_x86_sse2_psll_dq_bs(<2 x i64> %a0) {
+ ; CHECK: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8]
+ %res = call <2 x i64> @llvm.x86.sse2.psll.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
+ ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.sse2.psll.dq.bs(<2 x i64>, i32) nounwind readnone
+
+
+define <2 x i64> @test_x86_sse2_psrl_dq_bs(<2 x i64> %a0) {
+ ; CHECK: psrldq {{.*#+}} xmm0 = xmm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero
+ %res = call <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
+ ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64>, i32) nounwind readnone
+
+define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) {
+ ; CHECK: pslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+ %res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
+ ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64>, i32) nounwind readnone
+
+
+define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) {
+ ; CHECK: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+ %res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
+ ret <2 x i64> %res
+}
+declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32) nounwind readnone
diff --git a/test/CodeGen/X86/sse2-intrinsics-x86.ll b/test/CodeGen/X86/sse2-intrinsics-x86.ll
index c4d9e6d..cab62a3 100644
--- a/test/CodeGen/X86/sse2-intrinsics-x86.ll
+++ b/test/CodeGen/X86/sse2-intrinsics-x86.ll
@@ -408,22 +408,6 @@ define <4 x i32> @test_x86_sse2_psll_d(<4 x i32> %a0, <4 x i32> %a1) {
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) {
- ; CHECK: pslldq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
- %res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64>, i32) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_psll_dq_bs(<2 x i64> %a0) {
- ; CHECK: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8]
- %res = call <2 x i64> @llvm.x86.sse2.psll.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.psll.dq.bs(<2 x i64>, i32) nounwind readnone
define <2 x i64> @test_x86_sse2_psll_q(<2 x i64> %a0, <2 x i64> %a1) {
@@ -504,22 +488,6 @@ define <4 x i32> @test_x86_sse2_psrl_d(<4 x i32> %a0, <4 x i32> %a1) {
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) {
- ; CHECK: psrldq {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
- %res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32) nounwind readnone
-
-
-define <2 x i64> @test_x86_sse2_psrl_dq_bs(<2 x i64> %a0) {
- ; CHECK: psrldq {{.*#+}} xmm0 = xmm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero
- %res = call <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %res
-}
-declare <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64>, i32) nounwind readnone
define <2 x i64> @test_x86_sse2_psrl_q(<2 x i64> %a0, <2 x i64> %a1) {
diff --git a/test/CodeGen/X86/sse2.ll b/test/CodeGen/X86/sse2.ll
index b7db6cb..0b69ae8 100644
--- a/test/CodeGen/X86/sse2.ll
+++ b/test/CodeGen/X86/sse2.ll
@@ -75,7 +75,7 @@ define <4 x i32> @test5(i8** %ptr) nounwind {
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl (%eax), %eax
-; CHECK-NEXT: movss (%eax), %xmm1
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: pxor %xmm0, %xmm0
; CHECK-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -179,8 +179,8 @@ define void @test12() nounwind {
; CHECK-LABEL: test12:
; CHECK: ## BB#0:
; CHECK-NEXT: movapd 0, %xmm0
-; CHECK-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
-; CHECK-NEXT: movsd %xmm0, %xmm1
+; CHECK-NEXT: movapd {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; CHECK-NEXT: xorpd %xmm2, %xmm2
; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
; CHECK-NEXT: addps %xmm1, %xmm0
@@ -293,7 +293,7 @@ entry:
define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
; CHECK-LABEL: test_insert_64_zext:
; CHECK: ## BB#0:
-; CHECK-NEXT: movq %xmm0, %xmm0
+; CHECK-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; CHECK-NEXT: retl
%1 = shufflevector <2 x i64> %i, <2 x i64> <i64 0, i64 undef>, <2 x i32> <i32 0, i32 2>
ret <2 x i64> %1
@@ -302,8 +302,7 @@ define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
define <4 x i32> @PR19721(<4 x i32> %i) {
; CHECK-LABEL: PR19721:
; CHECK: ## BB#0:
-; CHECK-NEXT: xorps %xmm1, %xmm1
-; CHECK-NEXT: movss %xmm1, %xmm0
+; CHECK-NEXT: andps LCPI19_0, %xmm0
; CHECK-NEXT: retl
%bc = bitcast <4 x i32> %i to i128
%insert = and i128 %bc, -4294967296
@@ -316,10 +315,11 @@ define <4 x i32> @test_mul(<4 x i32> %x, <4 x i32> %y) {
; CHECK: ## BB#0:
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-NEXT: pmuludq %xmm1, %xmm0
+; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; CHECK-NEXT: pmuludq %xmm2, %xmm1
-; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: retl
%m = mul <4 x i32> %x, %y
ret <4 x i32> %m
diff --git a/test/CodeGen/X86/sse3.ll b/test/CodeGen/X86/sse3.ll
index 0a5b0ca..6c0b701 100644
--- a/test/CodeGen/X86/sse3.ll
+++ b/test/CodeGen/X86/sse3.ll
@@ -25,14 +25,11 @@ entry:
define <8 x i16> @t1(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; X64-LABEL: t1:
; X64: ## BB#0:
-; X64-NEXT: movdqa (%rdi), %xmm0
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
-; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,2,3,4,5,6,7]
-; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-NEXT: movaps {{.*#+}} xmm0 = [0,65535,65535,65535,65535,65535,65535,65535]
+; X64-NEXT: movaps %xmm0, %xmm1
+; X64-NEXT: andnps (%rsi), %xmm1
+; X64-NEXT: andps (%rdi), %xmm0
+; X64-NEXT: orps %xmm1, %xmm0
; X64-NEXT: retq
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -44,11 +41,11 @@ define <8 x i16> @t1(<8 x i16>* %A, <8 x i16>* %B) nounwind {
define <8 x i16> @t2(<8 x i16> %A, <8 x i16> %B) nounwind {
; X64-LABEL: t2:
; X64: ## BB#0:
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,0,3,4,5,6,7]
-; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X64-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,65535,65535]
+; X64-NEXT: pand %xmm2, %xmm0
+; X64-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,1,4,5,6,7]
+; X64-NEXT: pandn %xmm1, %xmm2
+; X64-NEXT: por %xmm2, %xmm0
; X64-NEXT: retq
%tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 9, i32 1, i32 2, i32 9, i32 4, i32 5, i32 6, i32 7 >
ret <8 x i16> %tmp
@@ -92,7 +89,7 @@ define <8 x i16> @t5(<8 x i16> %A, <8 x i16> %B) nounwind {
define <8 x i16> @t6(<8 x i16> %A, <8 x i16> %B) nounwind {
; X64-LABEL: t6:
; X64: ## BB#0:
-; X64-NEXT: movss %xmm1, %xmm0
+; X64-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; X64-NEXT: retq
%tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 8, i32 9, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 >
ret <8 x i16> %tmp
@@ -195,8 +192,8 @@ define void @t10() nounwind {
define <8 x i16> @t11(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
; X64-LABEL: t11:
; X64: ## BB#0: ## %entry
+; X64-NEXT: psrld $16, %xmm0
; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
; X64-NEXT: retq
entry:
%tmp7 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 1, i32 8, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
@@ -232,8 +229,9 @@ entry:
define <8 x i16> @t14(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
; X64-LABEL: t14:
; X64: ## BB#0: ## %entry
-; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
+; X64-NEXT: psrlq $16, %xmm0
+; X64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: retq
entry:
%tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 2, i32 undef , i32 undef >
@@ -245,11 +243,8 @@ define <8 x i16> @t15(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
; X64-LABEL: t15:
; X64: ## BB#0: ## %entry
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,3,4,5,6,7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,0,3]
-; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
-; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
+; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,2,4,5,6,7]
+; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-NEXT: retq
entry:
%tmp8 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 undef, i32 undef, i32 7, i32 2, i32 8, i32 undef, i32 undef , i32 undef >
@@ -262,15 +257,7 @@ define <16 x i8> @t16(<16 x i8> %T0) nounwind readnone {
; X64: ## BB#0: ## %entry
; X64-NEXT: movdqa {{.*#+}} xmm1 = [0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0]
; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-NEXT: pxor %xmm2, %xmm2
-; X64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
-; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
-; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X64-NEXT: packuswb %xmm0, %xmm0
+; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: retq
entry:
%tmp8 = shufflevector <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 1, i8 1, i8 1, i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> %T0, <16 x i32> < i32 0, i32 1, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
@@ -282,7 +269,7 @@ entry:
define <4 x i32> @t17() nounwind {
; X64-LABEL: t17:
; X64: ## BB#0: ## %entry
-; X64-NEXT: movddup (%rax), %xmm0
+; X64-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: andpd {{.*}}(%rip), %xmm0
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/sse41-pmovxrm-intrinsics.ll b/test/CodeGen/X86/sse41-pmovxrm-intrinsics.ll
new file mode 100644
index 0000000..55faf4d
--- /dev/null
+++ b/test/CodeGen/X86/sse41-pmovxrm-intrinsics.ll
@@ -0,0 +1,123 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
+
+define <8 x i16> @test_llvm_x86_sse41_pmovsxbw(<16 x i8>* %a) {
+; CHECK-LABEL: test_llvm_x86_sse41_pmovsxbw
+; SSE41: pmovsxbw (%rdi), %xmm0
+; AVX: vpmovsxbw (%rdi), %xmm0
+ %1 = load <16 x i8>* %a, align 1
+ %2 = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %1)
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_llvm_x86_sse41_pmovsxbd(<16 x i8>* %a) {
+; CHECK-LABEL: test_llvm_x86_sse41_pmovsxbd
+; SSE41: pmovsxbd (%rdi), %xmm0
+; AVX: vpmovsxbd (%rdi), %xmm0
+ %1 = load <16 x i8>* %a, align 1
+ %2 = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %1)
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @test_llvm_x86_sse41_pmovsxbq(<16 x i8>* %a) {
+; CHECK-LABEL: test_llvm_x86_sse41_pmovsxbq
+; SSE41: pmovsxbq (%rdi), %xmm0
+; AVX: vpmovsxbq (%rdi), %xmm0
+ %1 = load <16 x i8>* %a, align 1
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %1)
+ ret <2 x i64> %2
+}
+
+define <4 x i32> @test_llvm_x86_sse41_pmovsxwd(<8 x i16>* %a) {
+; CHECK-LABEL: test_llvm_x86_sse41_pmovsxwd
+; SSE41: pmovsxwd (%rdi), %xmm0
+; AVX: vpmovsxwd (%rdi), %xmm0
+ %1 = load <8 x i16>* %a, align 1
+ %2 = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1)
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @test_llvm_x86_sse41_pmovsxwq(<8 x i16>* %a) {
+; CHECK-LABEL: test_llvm_x86_sse41_pmovsxwq
+; SSE41: pmovsxwq (%rdi), %xmm0
+; AVX: vpmovsxwq (%rdi), %xmm0
+ %1 = load <8 x i16>* %a, align 1
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %1)
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @test_llvm_x86_sse41_pmovsxdq(<4 x i32>* %a) {
+; CHECK-LABEL: test_llvm_x86_sse41_pmovsxdq
+; SSE41: pmovsxdq (%rdi), %xmm0
+; AVX: vpmovsxdq (%rdi), %xmm0
+ %1 = load <4 x i32>* %a, align 1
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %1)
+ ret <2 x i64> %2
+}
+
+define <8 x i16> @test_llvm_x86_sse41_pmovzxbw(<16 x i8>* %a) {
+; CHECK-LABEL: test_llvm_x86_sse41_pmovzxbw
+; SSE41: pmovzxbw (%rdi), %xmm0
+; AVX: vpmovzxbw (%rdi), %xmm0
+ %1 = load <16 x i8>* %a, align 1
+ %2 = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %1)
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_llvm_x86_sse41_pmovzxbd(<16 x i8>* %a) {
+; CHECK-LABEL: test_llvm_x86_sse41_pmovzxbd
+; SSE41: pmovzxbd (%rdi), %xmm0
+; AVX: vpmovzxbd (%rdi), %xmm0
+ %1 = load <16 x i8>* %a, align 1
+ %2 = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %1)
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @test_llvm_x86_sse41_pmovzxbq(<16 x i8>* %a) {
+; CHECK-LABEL: test_llvm_x86_sse41_pmovzxbq
+; SSE41: pmovzxbq (%rdi), %xmm0
+; AVX: vpmovzxbq (%rdi), %xmm0
+ %1 = load <16 x i8>* %a, align 1
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %1)
+ ret <2 x i64> %2
+}
+
+define <4 x i32> @test_llvm_x86_sse41_pmovzxwd(<8 x i16>* %a) {
+; CHECK-LABEL: test_llvm_x86_sse41_pmovzxwd
+; SSE41: pmovzxwd (%rdi), %xmm0
+; AVX: vpmovzxwd (%rdi), %xmm0
+ %1 = load <8 x i16>* %a, align 1
+ %2 = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %1)
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @test_llvm_x86_sse41_pmovzxwq(<8 x i16>* %a) {
+; CHECK-LABEL: test_llvm_x86_sse41_pmovzxwq
+; SSE41: pmovzxwq (%rdi), %xmm0
+; AVX: vpmovzxwq (%rdi), %xmm0
+ %1 = load <8 x i16>* %a, align 1
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %1)
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @test_llvm_x86_sse41_pmovzxdq(<4 x i32>* %a) {
+; CHECK-LABEL: test_llvm_x86_sse41_pmovzxdq
+; SSE41: pmovzxdq (%rdi), %xmm0
+; AVX: vpmovzxdq (%rdi), %xmm0
+ %1 = load <4 x i32>* %a, align 1
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %1)
+ ret <2 x i64> %2
+}
+
+declare <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32>)
+declare <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16>)
+declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>)
+declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>)
+declare <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8>)
+declare <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8>)
+declare <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32>)
+declare <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16>)
+declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>)
+declare <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8>)
+declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>)
+declare <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8>)
diff --git a/test/CodeGen/X86/sse41.ll b/test/CodeGen/X86/sse41.ll
index d5c6f74..a5b07e7 100644
--- a/test/CodeGen/X86/sse41.ll
+++ b/test/CodeGen/X86/sse41.ll
@@ -78,13 +78,13 @@ define <2 x i64> @pmovzxbq_1() nounwind {
; X32-LABEL: pmovzxbq_1:
; X32: ## BB#0: ## %entry
; X32-NEXT: movl L_g16$non_lazy_ptr, %eax
-; X32-NEXT: pmovzxbq (%eax), %xmm0
+; X32-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: pmovzxbq_1:
; X64: ## BB#0: ## %entry
; X64-NEXT: movq _g16@{{.*}}(%rip), %rax
-; X64-NEXT: pmovzxbq (%rax), %xmm0
+; X64-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq
entry:
%0 = load i16* @g16, align 2 ; <i16> [#uses=1]
@@ -202,7 +202,7 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) noun
define <4 x float> @insertps_2(<4 x float> %t1, float %t2) nounwind {
; X32-LABEL: insertps_2:
; X32: ## BB#0:
-; X32-NEXT: insertps $0, {{[0-9]+}}(%esp), %xmm0
+; X32-NEXT: insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: insertps_2:
@@ -291,22 +291,20 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind {
; X32-LABEL: buildvector:
; X32: ## BB#0: ## %entry
-; X32-NEXT: movaps %xmm0, %xmm2
-; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; X32-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X32-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
; X32-NEXT: addss %xmm1, %xmm0
-; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; X32-NEXT: addss %xmm2, %xmm1
-; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; X32-NEXT: addss %xmm2, %xmm3
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
; X32-NEXT: retl
;
; X64-LABEL: buildvector:
; X64: ## BB#0: ## %entry
-; X64-NEXT: movaps %xmm0, %xmm2
-; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; X64-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X64-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
; X64-NEXT: addss %xmm1, %xmm0
-; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; X64-NEXT: addss %xmm2, %xmm1
-; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; X64-NEXT: addss %xmm2, %xmm3
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
; X64-NEXT: retq
entry:
%tmp7 = extractelement <2 x float> %A, i32 0
@@ -324,12 +322,12 @@ define <4 x float> @insertps_from_shufflevector_1(<4 x float> %a, <4 x float>* n
; X32-LABEL: insertps_from_shufflevector_1:
; X32: ## BB#0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: insertps $48, (%eax), %xmm0
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_shufflevector_1:
; X64: ## BB#0: ## %entry
-; X64-NEXT: insertps $48, (%rdi), %xmm0
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X64-NEXT: retq
entry:
%0 = load <4 x float>* %pb, align 16
@@ -358,12 +356,14 @@ define <4 x i32> @pinsrd_from_shufflevector_i32(<4 x i32> %a, <4 x i32>* nocaptu
; X32-LABEL: pinsrd_from_shufflevector_i32:
; X32: ## BB#0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: insertps $48, (%eax), %xmm0
+; X32-NEXT: pshufd {{.*#+}} xmm1 = mem[0,1,2,0]
+; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; X32-NEXT: retl
;
; X64-LABEL: pinsrd_from_shufflevector_i32:
; X64: ## BB#0: ## %entry
-; X64-NEXT: insertps $48, (%rdi), %xmm0
+; X64-NEXT: pshufd {{.*#+}} xmm1 = mem[0,1,2,0]
+; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; X64-NEXT: retq
entry:
%0 = load <4 x i32>* %pb, align 16
@@ -374,12 +374,14 @@ entry:
define <4 x i32> @insertps_from_shufflevector_i32_2(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: insertps_from_shufflevector_i32_2:
; X32: ## BB#0: ## %entry
-; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[3],xmm0[2,3]
+; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_shufflevector_i32_2:
; X64: ## BB#0: ## %entry
-; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[3],xmm0[2,3]
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; X64-NEXT: retq
entry:
%vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 7, i32 2, i32 3>
@@ -390,12 +392,12 @@ define <4 x float> @insertps_from_load_ins_elt_undef(<4 x float> %a, float* %b)
; X32-LABEL: insertps_from_load_ins_elt_undef:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: insertps $16, (%eax), %xmm0
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_load_ins_elt_undef:
; X64: ## BB#0:
-; X64-NEXT: insertps $16, (%rdi), %xmm0
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; X64-NEXT: retq
%1 = load float* %b, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
@@ -408,14 +410,16 @@ define <4 x i32> @insertps_from_load_ins_elt_undef_i32(<4 x i32> %a, i32* %b) {
; X32-LABEL: insertps_from_load_ins_elt_undef_i32:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movd (%eax), %xmm1
-; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_load_ins_elt_undef_i32:
; X64: ## BB#0:
-; X64-NEXT: movd (%rdi), %xmm1
-; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; X64-NEXT: retq
%1 = load i32* %b, align 4
%2 = insertelement <4 x i32> undef, i32 %1, i32 0
@@ -449,12 +453,12 @@ define <4 x float> @shuf_XYZ0(<4 x float> %x, <4 x float> %a) {
define <4 x float> @shuf_XY00(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_XY00:
; X32: ## BB#0:
-; X32-NEXT: movq %xmm0, %xmm0
+; X32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X32-NEXT: retl
;
; X64-LABEL: shuf_XY00:
; X64: ## BB#0:
-; X64-NEXT: movq %xmm0, %xmm0
+; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
@@ -527,14 +531,14 @@ define <4 x float> @shuf_X00A(<4 x float> %x, <4 x float> %a) {
; X32: ## BB#0:
; X32-NEXT: xorps %xmm2, %xmm2
; X32-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
-; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; X32-NEXT: retl
;
; X64-LABEL: shuf_X00A:
; X64: ## BB#0:
; X64-NEXT: xorps %xmm2, %xmm2
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
-; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
@@ -547,18 +551,12 @@ define <4 x float> @shuf_X00A(<4 x float> %x, <4 x float> %a) {
define <4 x float> @shuf_X00X(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_X00X:
; X32: ## BB#0:
-; X32-NEXT: xorps %xmm1, %xmm1
-; X32-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
-; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],zero,zero,xmm0[0]
-; X32-NEXT: movaps %xmm1, %xmm0
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0]
; X32-NEXT: retl
;
; X64-LABEL: shuf_X00X:
; X64: ## BB#0:
-; X64-NEXT: xorps %xmm1, %xmm1
-; X64-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
-; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],zero,zero,xmm0[0]
-; X64-NEXT: movaps %xmm1, %xmm0
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0]
; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
@@ -571,20 +569,14 @@ define <4 x float> @shuf_X00X(<4 x float> %x, <4 x float> %a) {
define <4 x float> @shuf_X0YC(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_X0YC:
; X32: ## BB#0:
-; X32-NEXT: xorps %xmm2, %xmm2
-; X32-NEXT: blendps {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
-; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0],zero,xmm0[1],zero
-; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
-; X32-NEXT: movaps %xmm2, %xmm0
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[2]
; X32-NEXT: retl
;
; X64-LABEL: shuf_X0YC:
; X64: ## BB#0:
-; X64-NEXT: xorps %xmm2, %xmm2
-; X64-NEXT: blendps {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
-; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0],zero,xmm0[1],zero
-; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
-; X64-NEXT: movaps %xmm2, %xmm0
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[2]
; X64-NEXT: retq
%vecext = extractelement <4 x float> %x, i32 0
%vecinit = insertelement <4 x float> undef, float %vecext, i32 0
@@ -619,12 +611,12 @@ define <4 x i32> @i32_shuf_XYZ0(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_XY00(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_XY00:
; X32: ## BB#0:
-; X32-NEXT: movq %xmm0, %xmm0
+; X32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_XY00:
; X64: ## BB#0:
-; X64-NEXT: movq %xmm0, %xmm0
+; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
@@ -638,12 +630,16 @@ define <4 x i32> @i32_shuf_XY00(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_XYY0(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_XYY0:
; X32: ## BB#0:
-; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
+; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3]
+; X32-NEXT: pxor %xmm0, %xmm0
+; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_XYY0:
; X64: ## BB#0:
-; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3]
+; X64-NEXT: pxor %xmm0, %xmm0
+; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
@@ -657,12 +653,16 @@ define <4 x i32> @i32_shuf_XYY0(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_XYW0(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_XYW0:
; X32: ## BB#0:
-; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
+; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,3,3]
+; X32-NEXT: pxor %xmm0, %xmm0
+; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_XYW0:
; X64: ## BB#0:
-; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,3,3]
+; X64-NEXT: pxor %xmm0, %xmm0
+; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
@@ -677,12 +677,16 @@ define <4 x i32> @i32_shuf_XYW0(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_W00W(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_W00W:
; X32: ## BB#0:
-; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
+; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; X32-NEXT: pxor %xmm0, %xmm0
+; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_W00W:
; X64: ## BB#0:
-; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; X64-NEXT: pxor %xmm0, %xmm0
+; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 3
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
@@ -695,16 +699,18 @@ define <4 x i32> @i32_shuf_W00W(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_X00A:
; X32: ## BB#0:
-; X32-NEXT: xorps %xmm2, %xmm2
-; X32-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
-; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; X32-NEXT: pxor %xmm2, %xmm2
+; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
+; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_X00A:
; X64: ## BB#0:
-; X64-NEXT: xorps %xmm2, %xmm2
-; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
-; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; X64-NEXT: pxor %xmm2, %xmm2
+; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
@@ -717,18 +723,16 @@ define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_X00X:
; X32: ## BB#0:
-; X32-NEXT: xorps %xmm1, %xmm1
-; X32-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
-; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
-; X32-NEXT: movaps %xmm1, %xmm0
+; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,2,0]
+; X32-NEXT: pxor %xmm0, %xmm0
+; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_X00X:
; X64: ## BB#0:
-; X64-NEXT: xorps %xmm1, %xmm1
-; X64-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
-; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
-; X64-NEXT: movaps %xmm1, %xmm0
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,2,0]
+; X64-NEXT: pxor %xmm0, %xmm0
+; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
@@ -741,20 +745,16 @@ define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) {
define <4 x i32> @i32_shuf_X0YC(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_X0YC:
; X32: ## BB#0:
-; X32-NEXT: xorps %xmm2, %xmm2
-; X32-NEXT: blendps {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
-; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1],zero
-; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
-; X32-NEXT: movaps %xmm2, %xmm0
+; X32-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
+; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,2,2]
+; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5],xmm0[6,7]
; X32-NEXT: retl
;
; X64-LABEL: i32_shuf_X0YC:
; X64: ## BB#0:
-; X64-NEXT: xorps %xmm2, %xmm2
-; X64-NEXT: blendps {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
-; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1],zero
-; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
-; X64-NEXT: movaps %xmm2, %xmm0
+; X64-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,2,2]
+; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5],xmm0[6,7]
; X64-NEXT: retq
%vecext = extractelement <4 x i32> %x, i32 0
%vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
@@ -816,12 +816,12 @@ define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocap
; X32-LABEL: insertps_from_vector_load:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: insertps $48, (%eax), %xmm0
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_vector_load:
; X64: ## BB#0:
-; X64-NEXT: insertps $48, (%rdi), %xmm0
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X64-NEXT: retq
%1 = load <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
@@ -834,12 +834,12 @@ define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>
; X32-LABEL: insertps_from_vector_load_offset:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: insertps $96, 4(%eax), %xmm0
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[1],xmm0[3]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_vector_load_offset:
; X64: ## BB#0:
-; X64-NEXT: insertps $96, 4(%rdi), %xmm0
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[1],xmm0[3]
; X64-NEXT: retq
%1 = load <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
@@ -853,13 +853,13 @@ define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x floa
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: shll $4, %ecx
-; X32-NEXT: insertps $-64, 12(%eax,%ecx), %xmm0
+; X32-NEXT: insertps {{.*#+}} xmm0 = mem[3],xmm0[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_vector_load_offset_2:
; X64: ## BB#0:
; X64-NEXT: shlq $4, %rsi
-; X64-NEXT: insertps $-64, 12(%rdi,%rsi), %xmm0
+; X64-NEXT: insertps {{.*#+}} xmm0 = mem[3],xmm0[1,2,3]
; X64-NEXT: retq
%1 = getelementptr inbounds <4 x float>* %pb, i64 %index
%2 = load <4 x float>* %1, align 16
@@ -872,14 +872,14 @@ define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocap
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movss (%ecx,%eax,4), %xmm1
+; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; X32-NEXT: retl
;
; X64-LABEL: insertps_from_broadcast_loadf32:
; X64: ## BB#0:
-; X64-NEXT: movss (%rdi,%rsi,4), %xmm1
+; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; X64-NEXT: retq
@@ -924,7 +924,7 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movss (%ecx,%eax,4), %xmm4
+; X32-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0,0,0]
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
@@ -937,7 +937,7 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
;
; X64-LABEL: insertps_from_broadcast_multiple_use:
; X64: ## BB#0:
-; X64-NEXT: movss (%rdi,%rsi,4), %xmm4
+; X64-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; X64-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0,0,0]
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
@@ -967,16 +967,16 @@ define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) {
; X32-LABEL: insertps_with_undefs:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movss (%eax), %xmm1
-; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],zero,xmm0[0],xmm1[3]
-; X32-NEXT: movaps %xmm1, %xmm0
+; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X32-NEXT: movapd %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: insertps_with_undefs:
; X64: ## BB#0:
-; X64-NEXT: movss (%rdi), %xmm1
-; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],zero,xmm0[0],xmm1[3]
-; X64-NEXT: movaps %xmm1, %xmm0
+; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-NEXT: movapd %xmm1, %xmm0
; X64-NEXT: retq
%1 = load float* %b, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
@@ -990,12 +990,12 @@ define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) {
; X32-LABEL: pr20087:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: insertps $-78, 8(%eax), %xmm0
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],mem[2]
; X32-NEXT: retl
;
; X64-LABEL: pr20087:
; X64: ## BB#0:
-; X64-NEXT: insertps $-78, 8(%rdi), %xmm0
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],mem[2]
; X64-NEXT: retq
%load = load <4 x float> *%ptr
%ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2>
@@ -1007,16 +1007,18 @@ define void @insertps_pr20411(i32* noalias nocapture %RET) #1 {
; X32-LABEL: insertps_pr20411:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: pshufd {{.*#+}} xmm0 = mem[3,1,2,3]
-; X32-NEXT: insertps $-36, LCPI49_1+12, %xmm0
-; X32-NEXT: movups %xmm0, (%eax)
+; X32-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,0,1]
+; X32-NEXT: pshufd {{.*#+}} xmm1 = mem[3,1,2,3]
+; X32-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
+; X32-NEXT: movdqu %xmm1, (%eax)
; X32-NEXT: retl
;
; X64-LABEL: insertps_pr20411:
; X64: ## BB#0:
-; X64-NEXT: pshufd {{.*#+}} xmm0 = mem[3,1,2,3]
-; X64-NEXT: insertps $-36, LCPI49_1+{{.*}}(%rip), %xmm0
-; X64-NEXT: movups %xmm0, (%rdi)
+; X64-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,0,1]
+; X64-NEXT: pshufd {{.*#+}} xmm1 = mem[3,1,2,3]
+; X64-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
+; X64-NEXT: movdqu %xmm1, (%rdi)
; X64-NEXT: retq
%gather_load = shufflevector <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, <8 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%shuffle109 = shufflevector <4 x i32> <i32 4, i32 5, i32 6, i32 7>, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; 4 5 6 7
@@ -1029,12 +1031,12 @@ define void @insertps_pr20411(i32* noalias nocapture %RET) #1 {
define <4 x float> @insertps_4(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_4:
-; X32: ## BB#0:
+; X32: ## BB#0: ## %entry
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_4:
-; X64: ## BB#0:
+; X64: ## BB#0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
; X64-NEXT: retq
entry:
@@ -1049,12 +1051,12 @@ entry:
define <4 x float> @insertps_5(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_5:
-; X32: ## BB#0:
+; X32: ## BB#0: ## %entry
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_5:
-; X64: ## BB#0:
+; X64: ## BB#0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
; X64-NEXT: retq
entry:
@@ -1069,12 +1071,12 @@ entry:
define <4 x float> @insertps_6(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_6:
-; X32: ## BB#0:
+; X32: ## BB#0: ## %entry
; X32-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_6:
-; X64: ## BB#0:
+; X64: ## BB#0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero
; X64-NEXT: retq
entry:
@@ -1088,12 +1090,12 @@ entry:
define <4 x float> @insertps_7(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_7:
-; X32: ## BB#0:
+; X32: ## BB#0: ## %entry
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_7:
-; X64: ## BB#0:
+; X64: ## BB#0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero
; X64-NEXT: retq
entry:
@@ -1108,12 +1110,12 @@ entry:
define <4 x float> @insertps_8(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_8:
-; X32: ## BB#0:
+; X32: ## BB#0: ## %entry
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
; X32-NEXT: retl
;
; X64-LABEL: insertps_8:
-; X64: ## BB#0:
+; X64: ## BB#0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
; X64-NEXT: retq
entry:
@@ -1128,13 +1130,13 @@ entry:
define <4 x float> @insertps_9(<4 x float> %A, <4 x float> %B) {
; X32-LABEL: insertps_9:
-; X32: ## BB#0:
+; X32: ## BB#0: ## %entry
; X32-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: insertps_9:
-; X64: ## BB#0:
+; X64: ## BB#0: ## %entry
; X64-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -1146,3 +1148,59 @@ entry:
%vecinit3 = insertelement <4 x float> %vecinit2, float 0.000000e+00, i32 3
ret <4 x float> %vecinit3
}
+
+define <4 x float> @insertps_10(<4 x float> %A)
+; X32-LABEL: insertps_10:
+; X32: ## BB#0:
+; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero
+; X32-NEXT: retl
+;
+; X64-LABEL: insertps_10:
+; X64: ## BB#0:
+; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero
+; X64-NEXT: retq
+{
+ %vecext = extractelement <4 x float> %A, i32 0
+ %vecbuild1 = insertelement <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %vecext, i32 0
+ %vecbuild2 = insertelement <4 x float> %vecbuild1, float %vecext, i32 2
+ ret <4 x float> %vecbuild2
+}
+
+define <4 x float> @build_vector_to_shuffle_1(<4 x float> %A) {
+; X32-LABEL: build_vector_to_shuffle_1:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: xorps %xmm1, %xmm1
+; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; X32-NEXT: retl
+;
+; X64-LABEL: build_vector_to_shuffle_1:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: xorps %xmm1, %xmm1
+; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; X64-NEXT: retq
+entry:
+ %vecext = extractelement <4 x float> %A, i32 1
+ %vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1
+ %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2
+ %vecinit3 = shufflevector <4 x float> %vecinit1, <4 x float> %A, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
+ ret <4 x float> %vecinit3
+}
+
+define <4 x float> @build_vector_to_shuffle_2(<4 x float> %A) {
+; X32-LABEL: build_vector_to_shuffle_2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: xorps %xmm1, %xmm1
+; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; X32-NEXT: retl
+;
+; X64-LABEL: build_vector_to_shuffle_2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: xorps %xmm1, %xmm1
+; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; X64-NEXT: retq
+entry:
+ %vecext = extractelement <4 x float> %A, i32 1
+ %vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1
+ %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2
+ ret <4 x float> %vecinit1
+}
diff --git a/test/CodeGen/X86/sse4a.ll b/test/CodeGen/X86/sse4a.ll
index 165d476..f8fa125 100644
--- a/test/CodeGen/X86/sse4a.ll
+++ b/test/CodeGen/X86/sse4a.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse4a | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-linux -mattr=sse4a | FileCheck %s
define void @test1(i8* %p, <4 x float> %a) nounwind optsize ssp {
; CHECK-LABEL: test1:
diff --git a/test/CodeGen/X86/sse_partial_update.ll b/test/CodeGen/X86/sse_partial_update.ll
index 2c16a55..377c3b7 100644
--- a/test/CodeGen/X86/sse_partial_update.ll
+++ b/test/CodeGen/X86/sse_partial_update.ll
@@ -5,11 +5,18 @@
; There is a mismatch between the intrinsic and the actual instruction.
; The actual instruction has a partial update of dest, while the intrinsic
; passes through the upper FP values. Here, we make sure the source and
-; destination of rsqrtss are the same.
-define void @t1(<4 x float> %a) nounwind uwtable ssp {
+; destination of each scalar unary op are the same.
+
+define void @rsqrtss(<4 x float> %a) nounwind uwtable ssp {
entry:
-; CHECK-LABEL: t1:
+; CHECK-LABEL: rsqrtss:
; CHECK: rsqrtss %xmm0, %xmm0
+; CHECK-NEXT: cvtss2sd %xmm0
+; CHECK-NEXT: movshdup
+; CHECK-NEXT: cvtss2sd %xmm0
+; CHECK-NEXT: movap
+; CHECK-NEXT: jmp
+
%0 = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %a) nounwind
%a.addr.0.extract = extractelement <4 x float> %0, i32 0
%conv = fpext float %a.addr.0.extract to double
@@ -21,10 +28,16 @@ entry:
declare void @callee(double, double)
declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
-define void @t2(<4 x float> %a) nounwind uwtable ssp {
+define void @rcpss(<4 x float> %a) nounwind uwtable ssp {
entry:
-; CHECK-LABEL: t2:
+; CHECK-LABEL: rcpss:
; CHECK: rcpss %xmm0, %xmm0
+; CHECK-NEXT: cvtss2sd %xmm0
+; CHECK-NEXT: movshdup
+; CHECK-NEXT: cvtss2sd %xmm0
+; CHECK-NEXT: movap
+; CHECK-NEXT: jmp
+
%0 = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %a) nounwind
%a.addr.0.extract = extractelement <4 x float> %0, i32 0
%conv = fpext float %a.addr.0.extract to double
@@ -34,3 +47,46 @@ entry:
ret void
}
declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
+
+define void @sqrtss(<4 x float> %a) nounwind uwtable ssp {
+entry:
+; CHECK-LABEL: sqrtss:
+; CHECK: sqrtss %xmm0, %xmm0
+; CHECK-NEXT: cvtss2sd %xmm0
+; CHECK-NEXT: movshdup
+; CHECK-NEXT: cvtss2sd %xmm0
+; CHECK-NEXT: movap
+; CHECK-NEXT: jmp
+
+ %0 = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a) nounwind
+ %a.addr.0.extract = extractelement <4 x float> %0, i32 0
+ %conv = fpext float %a.addr.0.extract to double
+ %a.addr.4.extract = extractelement <4 x float> %0, i32 1
+ %conv3 = fpext float %a.addr.4.extract to double
+ tail call void @callee(double %conv, double %conv3) nounwind
+ ret void
+}
+declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
+
+define void @sqrtsd(<2 x double> %a) nounwind uwtable ssp {
+entry:
+; CHECK-LABEL: sqrtsd:
+; CHECK: sqrtsd %xmm0, %xmm0
+; CHECK-NEXT: cvtsd2ss %xmm0
+; CHECK-NEXT: shufpd
+; CHECK-NEXT: cvtsd2ss %xmm0
+; CHECK-NEXT: movap
+; CHECK-NEXT: jmp
+
+ %0 = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a) nounwind
+ %a0 = extractelement <2 x double> %0, i32 0
+ %conv = fptrunc double %a0 to float
+ %a1 = extractelement <2 x double> %0, i32 1
+ %conv3 = fptrunc double %a1 to float
+ tail call void @callee2(float %conv, float %conv3) nounwind
+ ret void
+}
+
+declare void @callee2(float, float)
+declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone
+
diff --git a/test/CodeGen/X86/stack-align.ll b/test/CodeGen/X86/stack-align.ll
index eafb7c2..74f4c78 100644
--- a/test/CodeGen/X86/stack-align.ll
+++ b/test/CodeGen/X86/stack-align.ll
@@ -1,7 +1,10 @@
; RUN: llc < %s -relocation-model=static -mcpu=yonah | FileCheck %s
-; The double argument is at 4(esp) which is 16-byte aligned, allowing us to
-; fold the load into the andpd.
+; The double argument is at 4(esp) which is 16-byte aligned, but we
+; are required to read in extra bytes of memory in order to fold the
+; load. Bad Things may happen when reading/processing undefined bytes,
+; so don't fold the load.
+; PR22371 / http://reviews.llvm.org/D7474
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i686-apple-darwin8"
@@ -15,22 +18,31 @@ entry:
%tmp = getelementptr { double, double }* %z, i32 0, i32 0 ; <double*> [#uses=1]
%tmp1 = load volatile double* %tmp, align 8 ; <double> [#uses=1]
%tmp2 = tail call double @fabs( double %tmp1 ) readnone ; <double> [#uses=1]
- ; CHECK: andpd{{.*}}4(%esp), %xmm
%tmp6 = fadd double %tmp4, %tmp2 ; <double> [#uses=1]
store volatile double %tmp6, double* %P, align 8
ret void
+
+; CHECK-LABEL: test:
+; CHECK: movsd {{.*}}G, %xmm{{.*}}
+; CHECK: andpd %xmm{{.*}}, %xmm{{.*}}
+; CHECK: movsd 4(%esp), %xmm{{.*}}
+; CHECK: andpd %xmm{{.*}}, %xmm{{.*}}
+
+
}
define void @test2() alignstack(16) nounwind {
entry:
- ; CHECK: andl{{.*}}$-16, %esp
+; CHECK-LABEL: test2:
+; CHECK: andl{{.*}}$-16, %esp
ret void
}
; Use a call to force a spill.
define <2 x double> @test3(<2 x double> %x, <2 x double> %y) alignstack(32) nounwind {
entry:
- ; CHECK: andl{{.*}}$-32, %esp
+; CHECK-LABEL: test3:
+; CHECK: andl{{.*}}$-32, %esp
call void @test2()
%A = fmul <2 x double> %x, %y
ret <2 x double> %A
diff --git a/test/CodeGen/X86/stack-folding-fp-avx1.ll b/test/CodeGen/X86/stack-folding-fp-avx1.ll
new file mode 100644
index 0000000..18cd417
--- /dev/null
+++ b/test/CodeGen/X86/stack-folding-fp-avx1.ll
@@ -0,0 +1,1811 @@
+; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx,+f16c < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+; Stack reload folding tests.
+;
+; By including a nop call with sideeffects we can force a partial register spill of the
+; relevant registers and check that the reload is correctly folded into the instruction.
+
+define <2 x double> @stack_fold_addpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_addpd
+ ;CHECK: vaddpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd <2 x double> %a0, %a1
+ ret <2 x double> %2
+}
+
+define <4 x double> @stack_fold_addpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_addpd_ymm
+ ;CHECK: vaddpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd <4 x double> %a0, %a1
+ ret <4 x double> %2
+}
+
+define <4 x float> @stack_fold_addps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_addps
+ ;CHECK: vaddps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd <4 x float> %a0, %a1
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_addps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_addps_ymm
+ ;CHECK: vaddps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd <8 x float> %a0, %a1
+ ret <8 x float> %2
+}
+
+define double @stack_fold_addsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_addsd
+ ;CHECK: vaddsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd double %a0, %a1
+ ret double %2
+}
+
+define <2 x double> @stack_fold_addsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_addsd_int
+ ;CHECK: vaddsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.add.sd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.add.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define float @stack_fold_addss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_addss
+ ;CHECK: vaddss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd float %a0, %a1
+ ret float %2
+}
+
+define <4 x float> @stack_fold_addss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_addss_int
+ ;CHECK: vaddss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.add.ss(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.add.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_addsubpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_addsubpd
+ ;CHECK: vaddsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double>, <2 x double>) nounwind readnone
+
+define <4 x double> @stack_fold_addsubpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_addsubpd_ymm
+ ;CHECK: vaddsubpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_addsubps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_addsubps
+ ;CHECK: vaddsubps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float>, <4 x float>) nounwind readnone
+
+define <8 x float> @stack_fold_addsubps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_addsubps_ymm
+ ;CHECK: vaddsubps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_andnpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_andnpd
+ ;CHECK: vandnpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <2 x double> %a0 to <2 x i64>
+ %3 = bitcast <2 x double> %a1 to <2 x i64>
+ %4 = xor <2 x i64> %2, <i64 -1, i64 -1>
+ %5 = and <2 x i64> %4, %3
+ %6 = bitcast <2 x i64> %5 to <2 x double>
+ ; fadd forces execution domain
+ %7 = fadd <2 x double> %6, <double 0x0, double 0x0>
+ ret <2 x double> %7
+}
+
+define <4 x double> @stack_fold_andnpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_andnpd_ymm
+ ;CHECK: vandnpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x double> %a0 to <4 x i64>
+ %3 = bitcast <4 x double> %a1 to <4 x i64>
+ %4 = xor <4 x i64> %2, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %5 = and <4 x i64> %4, %3
+ %6 = bitcast <4 x i64> %5 to <4 x double>
+ ; fadd forces execution domain
+ %7 = fadd <4 x double> %6, <double 0x0, double 0x0, double 0x0, double 0x0>
+ ret <4 x double> %7
+}
+
+define <4 x float> @stack_fold_andnps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_andnps
+ ;CHECK: vandnps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x float> %a0 to <2 x i64>
+ %3 = bitcast <4 x float> %a1 to <2 x i64>
+ %4 = xor <2 x i64> %2, <i64 -1, i64 -1>
+ %5 = and <2 x i64> %4, %3
+ %6 = bitcast <2 x i64> %5 to <4 x float>
+ ; fadd forces execution domain
+ %7 = fadd <4 x float> %6, <float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <4 x float> %7
+}
+
+define <8 x float> @stack_fold_andnps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_andnps_ymm
+ ;CHECK: vandnps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <8 x float> %a0 to <4 x i64>
+ %3 = bitcast <8 x float> %a1 to <4 x i64>
+ %4 = xor <4 x i64> %2, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %5 = and <4 x i64> %4, %3
+ %6 = bitcast <4 x i64> %5 to <8 x float>
+ ; fadd forces execution domain
+ %7 = fadd <8 x float> %6, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <8 x float> %7
+}
+
+define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_andpd
+ ;CHECK: vandpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <2 x double> %a0 to <2 x i64>
+ %3 = bitcast <2 x double> %a1 to <2 x i64>
+ %4 = and <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <2 x double>
+ ; fadd forces execution domain
+ %6 = fadd <2 x double> %5, <double 0x0, double 0x0>
+ ret <2 x double> %6
+}
+
+define <4 x double> @stack_fold_andpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_andpd_ymm
+ ;CHECK: vandpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x double> %a0 to <4 x i64>
+ %3 = bitcast <4 x double> %a1 to <4 x i64>
+ %4 = and <4 x i64> %2, %3
+ %5 = bitcast <4 x i64> %4 to <4 x double>
+ ; fadd forces execution domain
+ %6 = fadd <4 x double> %5, <double 0x0, double 0x0, double 0x0, double 0x0>
+ ret <4 x double> %6
+}
+
+define <4 x float> @stack_fold_andps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_andps
+ ;CHECK: vandps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x float> %a0 to <2 x i64>
+ %3 = bitcast <4 x float> %a1 to <2 x i64>
+ %4 = and <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <4 x float>
+ ; fadd forces execution domain
+ %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <4 x float> %6
+}
+
+define <8 x float> @stack_fold_andps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_andps_ymm
+ ;CHECK: vandps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <8 x float> %a0 to <4 x i64>
+ %3 = bitcast <8 x float> %a1 to <4 x i64>
+ %4 = and <4 x i64> %2, %3
+ %5 = bitcast <4 x i64> %4 to <8 x float>
+ ; fadd forces execution domain
+ %6 = fadd <8 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <8 x float> %6
+}
+
+define <2 x double> @stack_fold_blendpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_blendpd
+ ;CHECK: vblendpd $2, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = select <2 x i1> <i1 1, i1 0>, <2 x double> %a0, <2 x double> %a1
+ ret <2 x double> %2
+}
+
+define <4 x double> @stack_fold_blendpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_blendpd_ymm
+ ;CHECK: vblendpd $6, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = select <4 x i1> <i1 1, i1 0, i1 0, i1 1>, <4 x double> %a0, <4 x double> %a1
+ ret <4 x double> %2
+}
+
+define <4 x float> @stack_fold_blendps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_blendps
+ ;CHECK: vblendps $6, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = select <4 x i1> <i1 1, i1 0, i1 0, i1 1>, <4 x float> %a0, <4 x float> %a1
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_blendps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_blendps_ymm
+ ;CHECK: vblendps $102, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = select <8 x i1> <i1 1, i1 0, i1 0, i1 1, i1 1, i1 0, i1 0, i1 1>, <8 x float> %a0, <8 x float> %a1
+ ret <8 x float> %2
+}
+
+define <2 x double> @stack_fold_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %c) {
+ ;CHECK-LABEL: stack_fold_blendvpd
+ ;CHECK: vblendvpd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a1, <2 x double> %c, <2 x double> %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
+
+define <4 x double> @stack_fold_blendvpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %c) {
+ ;CHECK-LABEL: stack_fold_blendvpd_ymm
+ ;CHECK: vblendvpd {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a1, <4 x double> %c, <4 x double> %a0)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %c) {
+ ;CHECK-LABEL: stack_fold_blendvps
+ ;CHECK: vblendvps {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a1, <4 x float> %c, <4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
+
+define <8 x float> @stack_fold_blendvps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %c) {
+ ;CHECK-LABEL: stack_fold_blendvps_ymm
+ ;CHECK: vblendvps {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a1, <8 x float> %c, <8 x float> %a0)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_cmppd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_cmppd
+ ;CHECK: vcmpeqpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounwind readnone
+
+define <4 x double> @stack_fold_cmppd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_cmppd_ymm
+ ;CHECK: vcmpeqpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> %a0, <4 x double> %a1, i8 0)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double>, <4 x double>, i8) nounwind readnone
+
+define <4 x float> @stack_fold_cmpps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_cmpps
+ ;CHECK: vcmpeqps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
+
+define <8 x float> @stack_fold_cmpps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_cmpps_ymm
+ ;CHECK: vcmpeqps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 0)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone
+
+define i32 @stack_fold_cmpsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_cmpsd
+ ;CHECK: vcmpeqsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp oeq double %a0, %a1
+ %3 = zext i1 %2 to i32
+ ret i32 %3
+}
+
+define <2 x double> @stack_fold_cmpsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_cmpsd_int
+ ;CHECK: vcmpeqsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounwind readnone
+
+define i32 @stack_fold_cmpss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_cmpss
+ ;CHECK: vcmpeqss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp oeq float %a0, %a1
+ %3 = zext i1 %2 to i32
+ ret i32 %3
+}
+
+define <4 x float> @stack_fold_cmpss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_cmpss_int
+ ;CHECK: vcmpeqss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind readnone
+
+; TODO stack_fold_comisd
+
+define i32 @stack_fold_comisd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_comisd_int
+ ;CHECK: vcomisd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readnone
+
+; TODO stack_fold_comiss
+
+define i32 @stack_fold_comiss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_comiss_int
+ ;CHECK: vcomiss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_cvtdq2pd(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtdq2pd
+ ;CHECK: vcvtdq2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32> %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32>) nounwind readnone
+
+define <4 x double> @stack_fold_cvtdq2pd_ymm(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtdq2pd_ymm
+ ;CHECK: vcvtdq2pd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.avx.cvtdq2.pd.256(<4 x i32> %a0)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.avx.cvtdq2.pd.256(<4 x i32>) nounwind readnone
+
+define <4 x float> @stack_fold_cvtdq2ps(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtdq2ps
+ ;CHECK: vcvtdq2ps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sitofp <4 x i32> %a0 to <4 x float>
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_cvtdq2ps_ymm(<8 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtdq2ps_ymm
+ ;CHECK: vcvtdq2ps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sitofp <8 x i32> %a0 to <8 x float>
+ ret <8 x float> %2
+}
+
+define <4 x i32> @stack_fold_cvtpd2dq(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtpd2dq
+ ;CHECK: vcvtpd2dqx {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone
+
+define <4 x i32> @stack_fold_cvtpd2dq_ymm(<4 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtpd2dq_ymm
+ ;CHECK: vcvtpd2dqy {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double>) nounwind readnone
+
+define <2 x float> @stack_fold_cvtpd2ps(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtpd2ps
+ ;CHECK: vcvtpd2psx {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptrunc <2 x double> %a0 to <2 x float>
+ ret <2 x float> %2
+}
+
+define <4 x float> @stack_fold_cvtpd2ps_ymm(<4 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtpd2ps_ymm
+ ;CHECK: vcvtpd2psy {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptrunc <4 x double> %a0 to <4 x float>
+ ret <4 x float> %2
+}
+
+define <4 x float> @stack_fold_cvtph2ps(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtph2ps
+ ;CHECK: vcvtph2ps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16>) nounwind readonly
+
+define <8 x float> @stack_fold_cvtph2ps_ymm(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtph2ps_ymm
+ ;CHECK: vcvtph2ps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readonly
+
+define <4 x i32> @stack_fold_cvtps2dq(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtps2dq
+ ;CHECK: vcvtps2dq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone
+
+define <8 x i32> @stack_fold_cvtps2dq_ymm(<8 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtps2dq_ymm
+ ;CHECK: vcvtps2dq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float> %a0)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_cvtps2pd(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtps2pd
+ ;CHECK: vcvtps2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float> %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float>) nounwind readnone
+
+define <4 x double> @stack_fold_cvtps2pd_ymm(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtps2pd_ymm
+ ;CHECK: vcvtps2pd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.avx.cvt.ps2.pd.256(<4 x float> %a0)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.avx.cvt.ps2.pd.256(<4 x float>) nounwind readnone
+
+define <8 x i16> @stack_fold_cvtps2ph(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtps2ph
+ ;CHECK: vcvtps2ph $0, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
+ %1 = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0)
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ ret <8 x i16> %1
+}
+declare <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float>, i32) nounwind readonly
+
+define <8 x i16> @stack_fold_cvtps2ph_ymm(<8 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtps2ph_ymm
+ ;CHECK: vcvtps2ph $0, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
+ %1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0)
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ ret <8 x i16> %1
+}
+declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readonly
+
+; TODO stack_fold_cvtsd2si
+
+define i32 @stack_fold_cvtsd2si_int(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsd2si_int
+ ;CHECK: cvtsd2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
+
+; TODO stack_fold_cvtsd2si64
+
+define i64 @stack_fold_cvtsd2si64_int(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsd2si64_int
+ ;CHECK: cvtsd2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0)
+ ret i64 %2
+}
+declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
+
+; TODO stack_fold_cvtsd2ss
+
+define <4 x float> @stack_fold_cvtsd2ss_int(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsd2ss_int
+ ;CHECK: cvtsd2ss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> <float 0x0, float 0x0, float 0x0, float 0x0>, <2 x double> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind readnone
+
+define double @stack_fold_cvtsi2sd(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsi2sd
+ ;CHECK: cvtsi2sdl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sitofp i32 %a0 to double
+ ret double %2
+}
+
+define <2 x double> @stack_fold_cvtsi2sd_int(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsi2sd_int
+ ;CHECK: cvtsi2sdl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> <double 0x0, double 0x0>, i32 %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double>, i32) nounwind readnone
+
+define double @stack_fold_cvtsi642sd(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsi642sd
+ ;CHECK: cvtsi2sdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sitofp i64 %a0 to double
+ ret double %2
+}
+
+define <2 x double> @stack_fold_cvtsi642sd_int(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsi642sd_int
+ ;CHECK: cvtsi2sdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> <double 0x0, double 0x0>, i64 %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double>, i64) nounwind readnone
+
+define float @stack_fold_cvtsi2ss(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsi2ss
+ ;CHECK: cvtsi2ssl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sitofp i32 %a0 to float
+ ret float %2
+}
+
+define <4 x float> @stack_fold_cvtsi2ss_int(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsi2ss_int
+ ;CHECK: cvtsi2ssl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = call <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float> <float 0x0, float 0x0, float 0x0, float 0x0>, i32 %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float>, i32) nounwind readnone
+
+define float @stack_fold_cvtsi642ss(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsi642ss
+ ;CHECK: cvtsi2ssq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sitofp i64 %a0 to float
+ ret float %2
+}
+
+define <4 x float> @stack_fold_cvtsi642ss_int(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsi642ss_int
+ ;CHECK: cvtsi2ssq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> <float 0x0, float 0x0, float 0x0, float 0x0>, i64 %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
+
+; TODO stack_fold_cvtss2sd
+
+define <2 x double> @stack_fold_cvtss2sd_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtss2sd_int
+ ;CHECK: cvtss2sd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double> <double 0x0, double 0x0>, <4 x float> %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double>, <4 x float>) nounwind readnone
+
+; TODO stack_fold_cvtss2si
+
+define i32 @stack_fold_cvtss2si_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtss2si_int
+ ;CHECK: vcvtss2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %a0)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse.cvtss2si(<4 x float>) nounwind readnone
+
+; TODO stack_fold_cvtss2si64
+
+define i64 @stack_fold_cvtss2si64_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtss2si64_int
+ ;CHECK: vcvtss2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0)
+ ret i64 %2
+}
+declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
+
+define <4 x i32> @stack_fold_cvttpd2dq(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttpd2dq
+ ;CHECK: vcvttpd2dqx {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>) nounwind readnone
+
+define <4 x i32> @stack_fold_cvttpd2dq_ymm(<4 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttpd2dq_ymm
+ ;CHECK: vcvttpd2dqy {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptosi <4 x double> %a0 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <4 x i32> @stack_fold_cvttps2dq(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttps2dq
+ ;CHECK: vcvttps2dq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptosi <4 x float> %a0 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i32> @stack_fold_cvttps2dq_ymm(<8 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttps2dq_ymm
+ ;CHECK: vcvttps2dq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptosi <8 x float> %a0 to <8 x i32>
+ ret <8 x i32> %2
+}
+
+define i32 @stack_fold_cvttsd2si(double %a0) {
+ ;CHECK-LABEL: stack_fold_cvttsd2si
+ ;CHECK: vcvttsd2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptosi double %a0 to i32
+ ret i32 %2
+}
+
+define i32 @stack_fold_cvttsd2si_int(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttsd2si_int
+ ;CHECK: vcvttsd2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
+
+define i64 @stack_fold_cvttsd2si64(double %a0) {
+ ;CHECK-LABEL: stack_fold_cvttsd2si64
+ ;CHECK: vcvttsd2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptosi double %a0 to i64
+ ret i64 %2
+}
+
+define i64 @stack_fold_cvttsd2si64_int(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttsd2si64_int
+ ;CHECK: vcvttsd2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0)
+ ret i64 %2
+}
+declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
+
+define i32 @stack_fold_cvttss2si(float %a0) {
+ ;CHECK-LABEL: stack_fold_cvttss2si
+ ;CHECK: vcvttss2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptosi float %a0 to i32
+ ret i32 %2
+}
+
+define i32 @stack_fold_cvttss2si_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttss2si_int
+ ;CHECK: vcvttss2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %a0)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
+
+define i64 @stack_fold_cvttss2si64(float %a0) {
+ ;CHECK-LABEL: stack_fold_cvttss2si64
+ ;CHECK: vcvttss2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptosi float %a0 to i64
+ ret i64 %2
+}
+
+define i64 @stack_fold_cvttss2si64_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttss2si64_int
+ ;CHECK: cvttss2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0)
+ ret i64 %2
+}
+declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_divpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_divpd
+ ;CHECK: vdivpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fdiv <2 x double> %a0, %a1
+ ret <2 x double> %2
+}
+
+define <4 x double> @stack_fold_divpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_divpd_ymm
+ ;CHECK: vdivpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fdiv <4 x double> %a0, %a1
+ ret <4 x double> %2
+}
+
+define <4 x float> @stack_fold_divps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_divps
+ ;CHECK: vdivps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fdiv <4 x float> %a0, %a1
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_divps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_divps_ymm
+ ;CHECK: vdivps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fdiv <8 x float> %a0, %a1
+ ret <8 x float> %2
+}
+
+define double @stack_fold_divsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_divsd
+ ;CHECK: vdivsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fdiv double %a0, %a1
+ ret double %2
+}
+
+define <2 x double> @stack_fold_divsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_divsd_int
+ ;CHECK: vdivsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.div.sd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.div.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define float @stack_fold_divss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_divss
+ ;CHECK: vdivss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fdiv float %a0, %a1
+ ret float %2
+}
+
+define <4 x float> @stack_fold_divss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_divss_int
+ ;CHECK: vdivss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.div.ss(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.div.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_dppd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_dppd
+ ;CHECK: vdppd $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwind readnone
+
+define <4 x float> @stack_fold_dpps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_dpps
+ ;CHECK: vdpps $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind readnone
+
+define <8 x float> @stack_fold_dpps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_dpps_ymm
+ ;CHECK: vdpps $7, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone
+
+define <4 x float> @stack_fold_extractf128(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_extractf128
+ ;CHECK: vextractf128 $1, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
+ %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ ret <4 x float> %1
+}
+
+define i32 @stack_fold_extractps(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_extractps
+ ;CHECK: vextractps $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill
+ ;CHECK: movl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Reload
+ %1 = extractelement <4 x float> %a0, i32 1
+ %2 = bitcast float %1 to i32
+ %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ ret i32 %2
+}
+
+define <2 x double> @stack_fold_haddpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_haddpd
+ ;CHECK: vhaddpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double>, <2 x double>) nounwind readnone
+
+define <4 x double> @stack_fold_haddpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_haddpd_ymm
+ ;CHECK: vhaddpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_haddps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_haddps
+ ;CHECK: vhaddps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) nounwind readnone
+
+define <8 x float> @stack_fold_haddps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_haddps_ymm
+ ;CHECK: vhaddps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_hsubpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_hsubpd
+ ;CHECK: vhsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) nounwind readnone
+
+define <4 x double> @stack_fold_hsubpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_hsubpd_ymm
+ ;CHECK: vhsubpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_hsubps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_hsubps
+ ;CHECK: vhsubps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) nounwind readnone
+
+define <8 x float> @stack_fold_hsubps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_hsubps_ymm
+ ;CHECK: vhsubps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float>, <8 x float>) nounwind readnone
+
+define <8 x float> @stack_fold_insertf128(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_insertf128
+ ;CHECK: vinsertf128 $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %2
+}
+
+; TODO stack_fold_insertps
+
+define <2 x double> @stack_fold_maxpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_maxpd
+ ;CHECK: vmaxpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone
+
+define <4 x double> @stack_fold_maxpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_maxpd_ymm
+ ;CHECK: vmaxpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_maxps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_maxps
+ ;CHECK: vmaxps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
+
+define <8 x float> @stack_fold_maxps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_maxps_ymm
+ ;CHECK: vmaxps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind readnone
+
+define double @stack_fold_maxsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_maxsd
+ ;CHECK: vmaxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp ogt double %a0, %a1
+ %3 = select i1 %2, double %a0, double %a1
+ ret double %3
+}
+
+define <2 x double> @stack_fold_maxsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_maxsd_int
+ ;CHECK: vmaxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define float @stack_fold_maxss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_maxss
+ ;CHECK: vmaxss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp ogt float %a0, %a1
+ %3 = select i1 %2, float %a0, float %a1
+ ret float %3
+}
+
+define <4 x float> @stack_fold_maxss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_maxss_int
+ ;CHECK: vmaxss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_minpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_minpd
+ ;CHECK: vminpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
+
+define <4 x double> @stack_fold_minpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_minpd_ymm
+ ;CHECK: vminpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_minps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_minps
+ ;CHECK: vminps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
+
+define <8 x float> @stack_fold_minps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_minps_ymm
+ ;CHECK: vminps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind readnone
+
+define double @stack_fold_minsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_minsd
+ ;CHECK: vminsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp olt double %a0, %a1
+ %3 = select i1 %2, double %a0, double %a1
+ ret double %3
+}
+
+define <2 x double> @stack_fold_minsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_minsd_int
+ ;CHECK: vminsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define float @stack_fold_minss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_minss
+ ;CHECK: vminss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp olt float %a0, %a1
+ %3 = select i1 %2, float %a0, float %a1
+ ret float %3
+}
+
+define <4 x float> @stack_fold_minss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_minss_int
+ ;CHECK: vminss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_movddup(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_movddup
+ ;CHECK: vmovddup {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ ret <2 x double> %2
+}
+
+define <4 x double> @stack_fold_movddup_ymm(<4 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_movddup_ymm
+ ;CHECK: vmovddup {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
+ ret <4 x double> %2
+}
+
+; TODO stack_fold_movhpd (load / store)
+; TODO stack_fold_movhps (load / store)
+
+; TODO stack_fold_movlpd (load / store)
+; TODO stack_fold_movlps (load / store)
+
+define <4 x float> @stack_fold_movshdup(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_movshdup
+ ;CHECK: vmovshdup {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_movshdup_ymm(<8 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_movshdup_ymm
+ ;CHECK: vmovshdup {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
+ ret <8 x float> %2
+}
+
+define <4 x float> @stack_fold_movsldup(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_movsldup
+ ;CHECK: vmovsldup {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_movsldup_ymm(<8 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_movsldup_ymm
+ ;CHECK: vmovsldup {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ ret <8 x float> %2
+}
+
+define <2 x double> @stack_fold_mulpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_mulpd
+ ;CHECK: vmulpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fmul <2 x double> %a0, %a1
+ ret <2 x double> %2
+}
+
+define <4 x double> @stack_fold_mulpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_mulpd_ymm
+ ;CHECK: vmulpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fmul <4 x double> %a0, %a1
+ ret <4 x double> %2
+}
+
+define <4 x float> @stack_fold_mulps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_mulps
+ ;CHECK: vmulps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fmul <4 x float> %a0, %a1
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_mulps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_mulps_ymm
+ ;CHECK: vmulps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fmul <8 x float> %a0, %a1
+ ret <8 x float> %2
+}
+
+define double @stack_fold_mulsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_mulsd
+ ;CHECK: vmulsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fmul double %a0, %a1
+ ret double %2
+}
+
+define <2 x double> @stack_fold_mulsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_mulsd_int
+ ;CHECK: vmulsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.mul.sd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.mul.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define float @stack_fold_mulss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_mulss
+ ;CHECK: vmulss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fmul float %a0, %a1
+ ret float %2
+}
+
+define <4 x float> @stack_fold_mulss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_mulss_int
+ ;CHECK: vmulss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.mul.ss(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_orpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_orpd
+ ;CHECK: vorpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <2 x double> %a0 to <2 x i64>
+ %3 = bitcast <2 x double> %a1 to <2 x i64>
+ %4 = or <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <2 x double>
+ ; fadd forces execution domain
+ %6 = fadd <2 x double> %5, <double 0x0, double 0x0>
+ ret <2 x double> %6
+}
+
+define <4 x double> @stack_fold_orpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_orpd_ymm
+ ;CHECK: vorpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x double> %a0 to <4 x i64>
+ %3 = bitcast <4 x double> %a1 to <4 x i64>
+ %4 = or <4 x i64> %2, %3
+ %5 = bitcast <4 x i64> %4 to <4 x double>
+ ; fadd forces execution domain
+ %6 = fadd <4 x double> %5, <double 0x0, double 0x0, double 0x0, double 0x0>
+ ret <4 x double> %6
+}
+
+define <4 x float> @stack_fold_orps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_orps
+ ;CHECK: vorps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x float> %a0 to <2 x i64>
+ %3 = bitcast <4 x float> %a1 to <2 x i64>
+ %4 = or <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <4 x float>
+ ; fadd forces execution domain
+ %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <4 x float> %6
+}
+
+define <8 x float> @stack_fold_orps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_orps_ymm
+ ;CHECK: vorps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <8 x float> %a0 to <4 x i64>
+ %3 = bitcast <8 x float> %a1 to <4 x i64>
+ %4 = or <4 x i64> %2, %3
+ %5 = bitcast <4 x i64> %4 to <8 x float>
+ ; fadd forces execution domain
+ %6 = fadd <8 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <8 x float> %6
+}
+
+define <8 x float> @stack_fold_perm2f128(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_perm2f128
+ ;CHECK: vperm2f128 $33, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+ ret <8 x float> %2
+}
+
+define <2 x double> @stack_fold_permilpd(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_permilpd
+ ;CHECK: vpermilpd $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+ ret <2 x double> %2
+}
+
+define <4 x double> @stack_fold_permilpd_ymm(<4 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_permilpd_ymm
+ ;CHECK: vpermilpd $5, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ ret <4 x double> %2
+}
+
+define <2 x double> @stack_fold_permilpdvar(<2 x double> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_permilpdvar
+ ;CHECK: vpermilpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) nounwind readnone
+
+define <4 x double> @stack_fold_permilpdvar_ymm(<4 x double> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_permilpdvar_ymm
+ ;CHECK: vpermilpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>) nounwind readnone
+
+define <4 x float> @stack_fold_permilps(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_permilps
+ ;CHECK: vpermilps $27, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_permilps_ymm(<8 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_permilps_ymm
+ ;CHECK: vpermilps $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ ret <8 x float> %2
+}
+
+define <4 x float> @stack_fold_permilpsvar(<4 x float> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_permilpsvar
+ ;CHECK: vpermilps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) nounwind readnone
+
+define <8 x float> @stack_fold_permilpsvar_ymm(<8 x float> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_permilpsvar_ymm
+ ;CHECK: vpermilps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) nounwind readnone
+
+; TODO stack_fold_rcpps
+
+define <4 x float> @stack_fold_rcpps_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_rcpps_int
+ ;CHECK: vrcpps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
+
+; TODO stack_fold_rcpps_ymm
+
+define <8 x float> @stack_fold_rcpps_ymm_int(<8 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_rcpps_ymm_int
+ ;CHECK: vrcpps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float>) nounwind readnone
+
+; TODO stack_fold_rcpss
+
+define <4 x float> @stack_fold_rcpss_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_rcpss_int
+ ;CHECK: vrcpss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_roundpd(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_roundpd
+ ;CHECK: vroundpd $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readnone
+
+define <4 x double> @stack_fold_roundpd_ymm(<4 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_roundpd_ymm
+ ;CHECK: vroundpd $7, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.avx.round.pd.256(<4 x double>, i32) nounwind readnone
+
+define <4 x float> @stack_fold_roundps(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_roundps
+ ;CHECK: vroundps $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
+
+define <8 x float> @stack_fold_roundps_ymm(<8 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_roundps_ymm
+ ;CHECK: vroundps $7, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32) nounwind readnone
+
+; TODO stack_fold_roundsd
+
+; TODO stack_fold_roundsd_int
+declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) nounwind readnone
+
+; TODO stack_fold_roundss
+
+; TODO stack_fold_roundss_int
+declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32) nounwind readnone
+
+; TODO stack_fold_rsqrtps
+
+define <4 x float> @stack_fold_rsqrtps_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_rsqrtps_int
+ ;CHECK: vrsqrtps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone
+
+; TODO stack_fold_rsqrtps_ymm
+
+define <8 x float> @stack_fold_rsqrtps_ymm_int(<8 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_rsqrtps_ymm_int
+ ;CHECK: vrsqrtps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone
+
+; TODO stack_fold_rsqrtss
+
+define <4 x float> @stack_fold_rsqrtss_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_rsqrtss_int
+ ;CHECK: vrsqrtss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_shufpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_shufpd
+ ;CHECK: vshufpd $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 2>
+ ret <2 x double> %2
+}
+
+define <4 x double> @stack_fold_shufpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_shufpd_ymm
+ ;CHECK: vshufpd $5, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 4, i32 3, i32 6>
+ ret <4 x double> %2
+}
+
+define <4 x float> @stack_fold_shufps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_shufps
+ ;CHECK: vshufps $200, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 2, i32 4, i32 7>
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_shufps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_shufps_ymm
+ ;CHECK: vshufps $148, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 1, i32 9, i32 10, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x float> %2
+}
+
+define <2 x double> @stack_fold_sqrtpd(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_sqrtpd
+ ;CHECK: vsqrtpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double>) nounwind readnone
+
+define <4 x double> @stack_fold_sqrtpd_ymm(<4 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_sqrtpd_ymm
+ ;CHECK: vsqrtpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_sqrtps(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_sqrtps
+ ;CHECK: vsqrtps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>) nounwind readnone
+
+define <8 x float> @stack_fold_sqrtps_ymm(<8 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_sqrtps_ymm
+ ;CHECK: vsqrtps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone
+
+define double @stack_fold_sqrtsd(double %a0) {
+ ;CHECK-LABEL: stack_fold_sqrtsd
+ ;CHECK: vsqrtsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call double @llvm.sqrt.f64(double %a0)
+ ret double %2
+}
+declare double @llvm.sqrt.f64(double) nounwind readnone
+
+define <2 x double> @stack_fold_sqrtsd_int(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_sqrtsd_int
+ ;CHECK: vsqrtsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone
+
+define float @stack_fold_sqrtss(float %a0) {
+ ;CHECK-LABEL: stack_fold_sqrtss
+ ;CHECK: vsqrtss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call float @llvm.sqrt.f32(float %a0)
+ ret float %2
+}
+declare float @llvm.sqrt.f32(float) nounwind readnone
+
+define <4 x float> @stack_fold_sqrtss_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_sqrtss_int
+ ;CHECK: vsqrtss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_subpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_subpd
+ ;CHECK: vsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub <2 x double> %a0, %a1
+ ret <2 x double> %2
+}
+
+define <4 x double> @stack_fold_subpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_subpd_ymm
+ ;CHECK: vsubpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub <4 x double> %a0, %a1
+ ret <4 x double> %2
+}
+
+define <4 x float> @stack_fold_subps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_subps
+ ;CHECK: vsubps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub <4 x float> %a0, %a1
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_subps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_subps_ymm
+ ;CHECK: vsubps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub <8 x float> %a0, %a1
+ ret <8 x float> %2
+}
+
+define double @stack_fold_subsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_subsd
+ ;CHECK: vsubsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub double %a0, %a1
+ ret double %2
+}
+
+define <2 x double> @stack_fold_subsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_subsd_int
+ ;CHECK: vsubsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.sub.sd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.sub.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define float @stack_fold_subss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_subss
+ ;CHECK: vsubss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub float %a0, %a1
+ ret float %2
+}
+
+define <4 x float> @stack_fold_subss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_subss_int
+ ;CHECK: vsubss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.sub.ss(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define i32 @stack_fold_testpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_testpd
+ ;CHECK: vtestpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnone
+
+define i32 @stack_fold_testpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_testpd_ymm
+ ;CHECK: vtestpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind readnone
+
+define i32 @stack_fold_testps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_testps
+ ;CHECK: vtestps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone
+
+define i32 @stack_fold_testps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_testps_ymm
+ ;CHECK: vtestps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readnone
+
+define i32 @stack_fold_ucomisd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_ucomisd
+ ;CHECK: vucomisd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp ueq double %a0, %a1
+ %3 = select i1 %2, i32 1, i32 -1
+ ret i32 %3
+}
+
+define i32 @stack_fold_ucomisd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_ucomisd_int
+ ;CHECK: vucomisd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define i32 @stack_fold_ucomiss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_ucomiss
+ ;CHECK: vucomiss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp ueq float %a0, %a1
+ %3 = select i1 %2, i32 1, i32 -1
+ ret i32 %3
+}
+
+define i32 @stack_fold_ucomiss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_ucomiss_int
+ ;CHECK: vucomiss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_unpckhpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_unpckhpd
+ ;CHECK: vunpckhpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3>
+ ret <2 x double> %2
+}
+
+define <4 x double> @stack_fold_unpckhpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_unpckhpd_ymm
+ ;CHECK: vunpckhpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x double> %2
+}
+
+define <4 x float> @stack_fold_unpckhps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_unpckhps
+ ;CHECK: vunpckhps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_unpckhps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_unpckhps_ymm
+ ;CHECK: vunpckhps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x float> %2
+}
+
+define <2 x double> @stack_fold_unpcklpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_unpcklpd
+ ;CHECK: vunpcklpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 2>
+ ret <2 x double> %2
+}
+
+define <4 x double> @stack_fold_unpcklpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_unpcklpd_ymm
+ ;CHECK: vunpcklpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x double> %2
+}
+
+define <4 x float> @stack_fold_unpcklps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_unpcklps
+ ;CHECK: vunpcklps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_unpcklps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_unpcklps_ymm
+ ;CHECK: vunpcklps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
+ ret <8 x float> %2
+}
+
+define <2 x double> @stack_fold_xorpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_xorpd
+ ;CHECK: vxorpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <2 x double> %a0 to <2 x i64>
+ %3 = bitcast <2 x double> %a1 to <2 x i64>
+ %4 = xor <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <2 x double>
+ ; fadd forces execution domain
+ %6 = fadd <2 x double> %5, <double 0x0, double 0x0>
+ ret <2 x double> %6
+}
+
+define <4 x double> @stack_fold_xorpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_xorpd_ymm
+ ;CHECK: vxorpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x double> %a0 to <4 x i64>
+ %3 = bitcast <4 x double> %a1 to <4 x i64>
+ %4 = xor <4 x i64> %2, %3
+ %5 = bitcast <4 x i64> %4 to <4 x double>
+ ; fadd forces execution domain
+ %6 = fadd <4 x double> %5, <double 0x0, double 0x0, double 0x0, double 0x0>
+ ret <4 x double> %6
+}
+
+define <4 x float> @stack_fold_xorps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_xorps
+ ;CHECK: vxorps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x float> %a0 to <2 x i64>
+ %3 = bitcast <4 x float> %a1 to <2 x i64>
+ %4 = xor <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <4 x float>
+ ; fadd forces execution domain
+ %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <4 x float> %6
+}
+
+define <8 x float> @stack_fold_xorps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_xorps_ymm
+ ;CHECK: vxorps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <8 x float> %a0 to <4 x i64>
+ %3 = bitcast <8 x float> %a1 to <4 x i64>
+ %4 = xor <4 x i64> %2, %3
+ %5 = bitcast <4 x i64> %4 to <8 x float>
+ ; fadd forces execution domain
+ %6 = fadd <8 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <8 x float> %6
+}
diff --git a/test/CodeGen/X86/stack-folding-fp-sse42.ll b/test/CodeGen/X86/stack-folding-fp-sse42.ll
new file mode 100644
index 0000000..c26cc9d
--- /dev/null
+++ b/test/CodeGen/X86/stack-folding-fp-sse42.ll
@@ -0,0 +1,1089 @@
+; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.2 < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+; Stack reload folding tests.
+;
+; By including a nop call with sideeffects we can force a partial register spill of the
+; relevant registers and check that the reload is correctly folded into the instruction.
+
+define <2 x double> @stack_fold_addpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_addpd
+ ;CHECK: addpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd <2 x double> %a0, %a1
+ ret <2 x double> %2
+}
+
+define <4 x float> @stack_fold_addps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_addps
+ ;CHECK: addps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd <4 x float> %a0, %a1
+ ret <4 x float> %2
+}
+
+define double @stack_fold_addsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_addsd
+ ;CHECK: addsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd double %a0, %a1
+ ret double %2
+}
+
+define <2 x double> @stack_fold_addsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_addsd_int
+ ;CHECK: addsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.add.sd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.add.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define float @stack_fold_addss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_addss
+ ;CHECK: addss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd float %a0, %a1
+ ret float %2
+}
+
+define <4 x float> @stack_fold_addss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_addss_int
+ ;CHECK: addss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.add.ss(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.add.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_addsubpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_addsubpd
+ ;CHECK: addsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double>, <2 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_addsubps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_addsubps
+ ;CHECK: addsubps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_andnpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_andnpd
+ ;CHECK: andnpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <2 x double> %a0 to <2 x i64>
+ %3 = bitcast <2 x double> %a1 to <2 x i64>
+ %4 = xor <2 x i64> %2, <i64 -1, i64 -1>
+ %5 = and <2 x i64> %4, %3
+ %6 = bitcast <2 x i64> %5 to <2 x double>
+ ; fadd forces execution domain
+ %7 = fadd <2 x double> %6, <double 0x0, double 0x0>
+ ret <2 x double> %7
+}
+
+define <4 x float> @stack_fold_andnps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_andnps
+ ;CHECK: andnps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x float> %a0 to <2 x i64>
+ %3 = bitcast <4 x float> %a1 to <2 x i64>
+ %4 = xor <2 x i64> %2, <i64 -1, i64 -1>
+ %5 = and <2 x i64> %4, %3
+ %6 = bitcast <2 x i64> %5 to <4 x float>
+ ; fadd forces execution domain
+ %7 = fadd <4 x float> %6, <float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <4 x float> %7
+}
+
+define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_andpd
+ ;CHECK: andpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <2 x double> %a0 to <2 x i64>
+ %3 = bitcast <2 x double> %a1 to <2 x i64>
+ %4 = and <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <2 x double>
+ ; fadd forces execution domain
+ %6 = fadd <2 x double> %5, <double 0x0, double 0x0>
+ ret <2 x double> %6
+}
+
+define <4 x float> @stack_fold_andps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_andps
+ ;CHECK: andps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x float> %a0 to <2 x i64>
+ %3 = bitcast <4 x float> %a1 to <2 x i64>
+ %4 = and <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <4 x float>
+ ; fadd forces execution domain
+ %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <4 x float> %6
+}
+
+define <2 x double> @stack_fold_blendpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_blendpd
+ ;CHECK: blendpd $2, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = select <2 x i1> <i1 1, i1 0>, <2 x double> %a0, <2 x double> %a1
+ ret <2 x double> %2
+}
+
+define <4 x float> @stack_fold_blendps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_blendps
+ ;CHECK: blendps $6, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = select <4 x i1> <i1 1, i1 0, i1 0, i1 1>, <4 x float> %a0, <4 x float> %a1
+ ret <4 x float> %2
+}
+
+define <2 x double> @stack_fold_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %c) {
+ ;CHECK-LABEL: stack_fold_blendvpd
+ ;CHECK: blendvpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a1, <2 x double> %c, <2 x double> %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %c) {
+ ;CHECK-LABEL: stack_fold_blendvps
+ ;CHECK: blendvps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a1, <4 x float> %c, <4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_cmppd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_cmppd
+ ;CHECK: cmpeqpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounwind readnone
+
+define <4 x float> @stack_fold_cmpps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_cmpps
+ ;CHECK: cmpeqps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
+
+define i32 @stack_fold_cmpsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_cmpsd
+ ;CHECK: cmpeqsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp oeq double %a0, %a1
+ %3 = zext i1 %2 to i32
+ ret i32 %3
+}
+
+define <2 x double> @stack_fold_cmpsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_cmpsd_int
+ ;CHECK: cmpeqsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounwind readnone
+
+define i32 @stack_fold_cmpss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_cmpss
+ ;CHECK: cmpeqss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp oeq float %a0, %a1
+ %3 = zext i1 %2 to i32
+ ret i32 %3
+}
+
+define <4 x float> @stack_fold_cmpss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_cmpss_int
+ ;CHECK: cmpeqss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind readnone
+
+; TODO stack_fold_comisd
+
+define i32 @stack_fold_comisd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_comisd_int
+ ;CHECK: comisd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readnone
+
+; TODO stack_fold_comiss
+
+define i32 @stack_fold_comiss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_comiss_int
+ ;CHECK: comiss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_cvtdq2pd(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtdq2pd
+ ;CHECK: cvtdq2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32> %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32>) nounwind readnone
+
+define <4 x float> @stack_fold_cvtdq2ps(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtdq2ps
+ ;CHECK: cvtdq2ps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sitofp <4 x i32> %a0 to <4 x float>
+ ret <4 x float> %2
+}
+
+define <4 x i32> @stack_fold_cvtpd2dq(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtpd2dq
+ ;CHECK: cvtpd2dq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone
+
+define <2 x float> @stack_fold_cvtpd2ps(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtpd2ps
+ ;CHECK: cvtpd2ps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptrunc <2 x double> %a0 to <2 x float>
+ ret <2 x float> %2
+}
+
+define <4 x i32> @stack_fold_cvtps2dq(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtps2dq
+ ;CHECK: cvtps2dq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_cvtps2pd(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtps2pd
+ ;CHECK: cvtps2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float> %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float>) nounwind readnone
+
+; TODO stack_fold_cvtsd2si
+
+define i32 @stack_fold_cvtsd2si_int(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsd2si_int
+ ;CHECK: cvtsd2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
+
+; TODO stack_fold_cvtsd2si64
+
+define i64 @stack_fold_cvtsd2si64_int(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsd2si64_int
+ ;CHECK: cvtsd2siq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0)
+ ret i64 %2
+}
+declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
+
+; TODO stack_fold_cvtsd2ss
+
+define <4 x float> @stack_fold_cvtsd2ss_int(<2 x double> %a0) optsize {
+ ;CHECK-LABEL: stack_fold_cvtsd2ss_int
+ ;CHECK: cvtsd2ss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> <float 0x0, float 0x0, float 0x0, float 0x0>, <2 x double> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind readnone
+
+define double @stack_fold_cvtsi2sd(i32 %a0) optsize {
+ ;CHECK-LABEL: stack_fold_cvtsi2sd
+ ;CHECK: cvtsi2sdl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sitofp i32 %a0 to double
+ ret double %2
+}
+
+define <2 x double> @stack_fold_cvtsi2sd_int(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsi2sd_int
+ ;CHECK: cvtsi2sdl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> <double 0x0, double 0x0>, i32 %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double>, i32) nounwind readnone
+
+define double @stack_fold_cvtsi642sd(i64 %a0) optsize {
+ ;CHECK-LABEL: stack_fold_cvtsi642sd
+ ;CHECK: cvtsi2sdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sitofp i64 %a0 to double
+ ret double %2
+}
+
+define <2 x double> @stack_fold_cvtsi642sd_int(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsi642sd_int
+ ;CHECK: cvtsi2sdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> <double 0x0, double 0x0>, i64 %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double>, i64) nounwind readnone
+
+define float @stack_fold_cvtsi2ss(i32 %a0) optsize {
+ ;CHECK-LABEL: stack_fold_cvtsi2ss
+ ;CHECK: cvtsi2ssl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sitofp i32 %a0 to float
+ ret float %2
+}
+
+define <4 x float> @stack_fold_cvtsi2ss_int(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsi2ss_int
+ ;CHECK: cvtsi2ssl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = call <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float> <float 0x0, float 0x0, float 0x0, float 0x0>, i32 %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.cvtsi2ss(<4 x float>, i32) nounwind readnone
+
+define float @stack_fold_cvtsi642ss(i64 %a0) optsize {
+ ;CHECK-LABEL: stack_fold_cvtsi642ss
+ ;CHECK: cvtsi2ssq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = sitofp i64 %a0 to float
+ ret float %2
+}
+
+define <4 x float> @stack_fold_cvtsi642ss_int(i64 %a0) {
+ ;CHECK-LABEL: stack_fold_cvtsi642ss_int
+ ;CHECK: cvtsi2ssq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> <float 0x0, float 0x0, float 0x0, float 0x0>, i64 %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
+
+define double @stack_fold_cvtss2sd(float %a0) optsize {
+ ;CHECK-LABEL: stack_fold_cvtss2sd
+ ;CHECK: cvtss2sd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fpext float %a0 to double
+ ret double %2
+}
+
+define <2 x double> @stack_fold_cvtss2sd_int(<4 x float> %a0) optsize {
+ ;CHECK-LABEL: stack_fold_cvtss2sd_int
+ ;CHECK: cvtss2sd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double> <double 0x0, double 0x0>, <4 x float> %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double>, <4 x float>) nounwind readnone
+
+; TODO stack_fold_cvtss2si
+
+define i32 @stack_fold_cvtss2si_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtss2si_int
+ ;CHECK: cvtss2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %a0)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse.cvtss2si(<4 x float>) nounwind readnone
+
+; TODO stack_fold_cvtss2si64
+
+define i64 @stack_fold_cvtss2si64_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvtss2si64_int
+ ;CHECK: cvtss2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0)
+ ret i64 %2
+}
+declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
+
+define <4 x i32> @stack_fold_cvttpd2dq(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttpd2dq
+ ;CHECK: cvttpd2dq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>) nounwind readnone
+
+define <4 x i32> @stack_fold_cvttps2dq(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttps2dq
+ ;CHECK: cvttps2dq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptosi <4 x float> %a0 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define i32 @stack_fold_cvttsd2si(double %a0) {
+ ;CHECK-LABEL: stack_fold_cvttsd2si
+ ;CHECK: cvttsd2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptosi double %a0 to i32
+ ret i32 %2
+}
+
+define i32 @stack_fold_cvttsd2si_int(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttsd2si_int
+ ;CHECK: cvttsd2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
+
+define i64 @stack_fold_cvttsd2si64(double %a0) {
+ ;CHECK-LABEL: stack_fold_cvttsd2si64
+ ;CHECK: cvttsd2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptosi double %a0 to i64
+ ret i64 %2
+}
+
+define i64 @stack_fold_cvttsd2si64_int(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttsd2si64_int
+ ;CHECK: cvttsd2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0)
+ ret i64 %2
+}
+declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
+
+define i32 @stack_fold_cvttss2si(float %a0) {
+ ;CHECK-LABEL: stack_fold_cvttss2si
+ ;CHECK: cvttss2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptosi float %a0 to i32
+ ret i32 %2
+}
+
+define i32 @stack_fold_cvttss2si_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttss2si_int
+ ;CHECK: cvttss2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %a0)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
+
+define i64 @stack_fold_cvttss2si64(float %a0) {
+ ;CHECK-LABEL: stack_fold_cvttss2si64
+ ;CHECK: cvttss2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fptosi float %a0 to i64
+ ret i64 %2
+}
+
+define i64 @stack_fold_cvttss2si64_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_cvttss2si64_int
+ ;CHECK: cvttss2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0)
+ ret i64 %2
+}
+declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_divpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_divpd
+ ;CHECK: divpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fdiv <2 x double> %a0, %a1
+ ret <2 x double> %2
+}
+
+define <4 x float> @stack_fold_divps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_divps
+ ;CHECK: divps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fdiv <4 x float> %a0, %a1
+ ret <4 x float> %2
+}
+
+define double @stack_fold_divsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_divsd
+ ;CHECK: divsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fdiv double %a0, %a1
+ ret double %2
+}
+
+define <2 x double> @stack_fold_divsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_divsd_int
+ ;CHECK: divsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.div.sd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.div.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define float @stack_fold_divss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_divss
+ ;CHECK: divss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fdiv float %a0, %a1
+ ret float %2
+}
+
+define <4 x float> @stack_fold_divss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_divss_int
+ ;CHECK: divss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.div.ss(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.div.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_dppd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_dppd
+ ;CHECK: dppd $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwind readnone
+
+define <4 x float> @stack_fold_dpps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_dpps
+ ;CHECK: dpps $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind readnone
+
+define i32 @stack_fold_extractps(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_extractps
+ ;CHECK: extractps $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill
+ ;CHECK: movl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Reload
+ %1 = extractelement <4 x float> %a0, i32 1
+ %2 = bitcast float %1 to i32
+ %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ ret i32 %2
+}
+
+define <2 x double> @stack_fold_haddpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_haddpd
+ ;CHECK: haddpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double>, <2 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_haddps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_haddps
+ ;CHECK: haddps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_hsubpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_hsubpd
+ ;CHECK: hsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_hsubps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_hsubps
+ ;CHECK: hsubps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) nounwind readnone
+
+; TODO stack_fold_insertps
+
+define <2 x double> @stack_fold_maxpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_maxpd
+ ;CHECK: maxpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_maxps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_maxps
+ ;CHECK: maxps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
+
+define double @stack_fold_maxsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_maxsd
+ ;CHECK: maxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp ogt double %a0, %a1
+ %3 = select i1 %2, double %a0, double %a1
+ ret double %3
+}
+
+define <2 x double> @stack_fold_maxsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_maxsd_int
+ ;CHECK: maxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define float @stack_fold_maxss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_maxss
+ ;CHECK: maxss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp ogt float %a0, %a1
+ %3 = select i1 %2, float %a0, float %a1
+ ret float %3
+}
+
+define <4 x float> @stack_fold_maxss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_maxss_int
+ ;CHECK: maxss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_minpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_minpd
+ ;CHECK: minpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_minps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_minps
+ ;CHECK: minps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
+
+define double @stack_fold_minsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_minsd
+ ;CHECK: minsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp olt double %a0, %a1
+ %3 = select i1 %2, double %a0, double %a1
+ ret double %3
+}
+
+define <2 x double> @stack_fold_minsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_minsd_int
+ ;CHECK: minsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define float @stack_fold_minss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_minss
+ ;CHECK: minss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp olt float %a0, %a1
+ %3 = select i1 %2, float %a0, float %a1
+ ret float %3
+}
+
+define <4 x float> @stack_fold_minss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_minss_int
+ ;CHECK: minss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_movddup(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_movddup
+ ;CHECK: movddup {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ ret <2 x double> %2
+}
+; TODO stack_fold_movhpd (load / store)
+; TODO stack_fold_movhps (load / store)
+
+; TODO stack_fold_movlpd (load / store)
+; TODO stack_fold_movlps (load / store)
+
+define <4 x float> @stack_fold_movshdup(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_movshdup
+ ;CHECK: movshdup {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
+ ret <4 x float> %2
+}
+
+define <4 x float> @stack_fold_movsldup(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_movsldup
+ ;CHECK: movsldup {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
+ ret <4 x float> %2
+}
+
+define <2 x double> @stack_fold_mulpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_mulpd
+ ;CHECK: mulpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fmul <2 x double> %a0, %a1
+ ret <2 x double> %2
+}
+
+define <4 x float> @stack_fold_mulps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_mulps
+ ;CHECK: mulps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fmul <4 x float> %a0, %a1
+ ret <4 x float> %2
+}
+
+define double @stack_fold_mulsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_mulsd
+ ;CHECK: mulsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fmul double %a0, %a1
+ ret double %2
+}
+
+define <2 x double> @stack_fold_mulsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_mulsd_int
+ ;CHECK: mulsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.mul.sd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.mul.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define float @stack_fold_mulss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_mulss
+ ;CHECK: mulss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fmul float %a0, %a1
+ ret float %2
+}
+
+define <4 x float> @stack_fold_mulss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_mulss_int
+ ;CHECK: mulss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.mul.ss(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_orpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_orpd
+ ;CHECK: orpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <2 x double> %a0 to <2 x i64>
+ %3 = bitcast <2 x double> %a1 to <2 x i64>
+ %4 = or <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <2 x double>
+ ; fadd forces execution domain
+ %6 = fadd <2 x double> %5, <double 0x0, double 0x0>
+ ret <2 x double> %6
+}
+
+define <4 x float> @stack_fold_orps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_orps
+ ;CHECK: orps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x float> %a0 to <2 x i64>
+ %3 = bitcast <4 x float> %a1 to <2 x i64>
+ %4 = or <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <4 x float>
+ ; fadd forces execution domain
+ %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <4 x float> %6
+}
+
+; TODO stack_fold_rcpps
+
+define <4 x float> @stack_fold_rcpps_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_rcpps_int
+ ;CHECK: rcpps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
+
+; TODO stack_fold_rcpss
+; TODO stack_fold_rcpss_int
+
+define <2 x double> @stack_fold_roundpd(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_roundpd
+ ;CHECK: roundpd $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readnone
+
+define <4 x float> @stack_fold_roundps(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_roundps
+ ;CHECK: roundps $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
+
+; TODO stack_fold_roundsd
+; TODO stack_fold_roundsd_int
+
+; TODO stack_fold_roundss
+; TODO stack_fold_roundss_int
+
+; TODO stack_fold_rsqrtps
+
+define <4 x float> @stack_fold_rsqrtps_int(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_rsqrtps_int
+ ;CHECK: rsqrtps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone
+
+; TODO stack_fold_rsqrtss
+; TODO stack_fold_rsqrtss_int
+
+define <2 x double> @stack_fold_shufpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_shufpd
+ ;CHECK: shufpd $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 2>
+ ret <2 x double> %2
+}
+
+define <4 x float> @stack_fold_shufps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_shufps
+ ;CHECK: shufps $200, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 2, i32 4, i32 7>
+ ret <4 x float> %2
+}
+
+define <2 x double> @stack_fold_sqrtpd(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_sqrtpd
+ ;CHECK: sqrtpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_sqrtps(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_sqrtps
+ ;CHECK: sqrtps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>) nounwind readnone
+
+; TODO stack_fold_sqrtsd
+declare double @llvm.sqrt.f64(double) nounwind readnone
+
+; TODO stack_fold_sqrtsd_int
+declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone
+
+; TODO stack_fold_sqrtss
+declare float @llvm.sqrt.f32(float) nounwind readnone
+
+; TODO stack_fold_sqrtss_int
+declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_subpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_subpd
+ ;CHECK: subpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub <2 x double> %a0, %a1
+ ret <2 x double> %2
+}
+
+define <4 x float> @stack_fold_subps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_subps
+ ;CHECK: subps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub <4 x float> %a0, %a1
+ ret <4 x float> %2
+}
+
+define double @stack_fold_subsd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_subsd
+ ;CHECK: subsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub double %a0, %a1
+ ret double %2
+}
+
+define <2 x double> @stack_fold_subsd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_subsd_int
+ ;CHECK: subsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.sse2.sub.sd(<2 x double> %a0, <2 x double> %a1)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.sse2.sub.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define float @stack_fold_subss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_subss
+ ;CHECK: subss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub float %a0, %a1
+ ret float %2
+}
+
+define <4 x float> @stack_fold_subss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_subss_int
+ ;CHECK: subss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.sse.sub.ss(<4 x float> %a0, <4 x float> %a1)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define i32 @stack_fold_ucomisd(double %a0, double %a1) {
+ ;CHECK-LABEL: stack_fold_ucomisd
+ ;CHECK: ucomisd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp ueq double %a0, %a1
+ %3 = select i1 %2, i32 1, i32 -1
+ ret i32 %3
+}
+
+define i32 @stack_fold_ucomisd_int(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_ucomisd_int
+ ;CHECK: ucomisd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readnone
+
+define i32 @stack_fold_ucomiss(float %a0, float %a1) {
+ ;CHECK-LABEL: stack_fold_ucomiss
+ ;CHECK: ucomiss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fcmp ueq float %a0, %a1
+ %3 = select i1 %2, i32 1, i32 -1
+ ret i32 %3
+}
+
+define i32 @stack_fold_ucomiss_int(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_ucomiss_int
+ ;CHECK: ucomiss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_unpckhpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_unpckhpd
+ ;CHECK: unpckhpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3>
+ ret <2 x double> %2
+}
+
+define <4 x float> @stack_fold_unpckhps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_unpckhps
+ ;CHECK: unpckhps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x float> %2
+}
+
+define <2 x double> @stack_fold_unpcklpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_unpcklpd
+ ;CHECK: unpcklpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 2>
+ ret <2 x double> %2
+}
+
+define <4 x float> @stack_fold_unpcklps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_unpcklps
+ ;CHECK: unpcklps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x float> %2
+}
+
+define <2 x double> @stack_fold_xorpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_xorpd
+ ;CHECK: xorpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <2 x double> %a0 to <2 x i64>
+ %3 = bitcast <2 x double> %a1 to <2 x i64>
+ %4 = xor <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <2 x double>
+ ; fadd forces execution domain
+ %6 = fadd <2 x double> %5, <double 0x0, double 0x0>
+ ret <2 x double> %6
+}
+
+define <4 x float> @stack_fold_xorps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_xorps
+ ;CHECK: xorps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x float> %a0 to <2 x i64>
+ %3 = bitcast <4 x float> %a1 to <2 x i64>
+ %4 = xor <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <4 x float>
+ ; fadd forces execution domain
+ %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <4 x float> %6
+}
diff --git a/test/CodeGen/X86/stack-folding-int-avx1.ll b/test/CodeGen/X86/stack-folding-int-avx1.ll
new file mode 100644
index 0000000..2387493
--- /dev/null
+++ b/test/CodeGen/X86/stack-folding-int-avx1.ll
@@ -0,0 +1,1152 @@
+; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx,+aes,+pclmul < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+; Stack reload folding tests.
+;
+; By including a nop call with sideeffects we can force a partial register spill of the
+; relevant registers and check that the reload is correctly folded into the instruction.
+
+define <2 x i64> @stack_fold_aesdec(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_aesdec
+ ;CHECK: vaesdec {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @stack_fold_aesdeclast(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_aesdeclast
+ ;CHECK: vaesdeclast {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @stack_fold_aesenc(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_aesenc
+ ;CHECK: vaesenc {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @stack_fold_aesenclast(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_aesenclast
+ ;CHECK: vaesenclast {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @stack_fold_aesimc(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_aesimc
+ ;CHECK: vaesimc {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64>) nounwind readnone
+
+define <2 x i64> @stack_fold_aeskeygenassist(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_aeskeygenassist
+ ;CHECK: vaeskeygenassist $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %a0, i8 7)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64>, i8) nounwind readnone
+
+define <4 x i32> @stack_fold_movd_load(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_movd_load
+ ;CHECK: movd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <4 x i32> zeroinitializer, i32 %a0, i32 0
+ ; add forces execution domain
+ %3 = add <4 x i32> %2, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %3
+}
+
+define i32 @stack_fold_movd_store(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_movd_store
+ ;CHECK: movd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill
+ ; add forces execution domain
+ %1 = add <4 x i32> %a0, <i32 1, i32 1, i32 1, i32 1>
+ %2 = extractelement <4 x i32> %1, i32 0
+ %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ ret i32 %2
+}
+
+define <2 x i64> @stack_fold_movq_load(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_movq_load
+ ;CHECK: movq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %2
+}
+
+define i64 @stack_fold_movq_store(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_movq_store
+ ;CHECK: movq {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 8-byte Folded Spill
+ %1 = extractelement <2 x i64> %a0, i32 0
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ ret i64 %1
+}
+
+define <8 x i16> @stack_fold_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_mpsadbw
+ ;CHECK: vmpsadbw $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind readnone
+
+define <16 x i8> @stack_fold_pabsb(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsb
+ ;CHECK: vpabsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pabsd(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsd
+ ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_pabsw(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsw
+ ;CHECK: vpabsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_packssdw(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_packssdw
+ ;CHECK: vpackssdw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <16 x i8> @stack_fold_packsswb(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_packsswb
+ ;CHECK: vpacksswb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_packusdw(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_packusdw
+ ;CHECK: vpackusdw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <16 x i8> @stack_fold_packuswb(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_packuswb
+ ;CHECK: vpackuswb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_paddb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_paddb
+ ;CHECK: vpaddb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = add <16 x i8> %a0, %a1
+ ret <16 x i8> %2
+}
+
+define <4 x i32> @stack_fold_paddd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_paddd
+ ;CHECK: vpaddd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = add <4 x i32> %a0, %a1
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @stack_fold_paddq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_paddq
+ ;CHECK: vpaddq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = add <2 x i64> %a0, %a1
+ ret <2 x i64> %2
+}
+
+define <16 x i8> @stack_fold_paddsb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_paddsb
+ ;CHECK: vpaddsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_paddsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_paddsw
+ ;CHECK: vpaddsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_paddusb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_paddusb
+ ;CHECK: vpaddusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_paddusw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_paddusw
+ ;CHECK: vpaddusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_paddw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_paddw
+ ;CHECK: vpaddw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = add <8 x i16> %a0, %a1
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @stack_fold_palignr(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_palignr
+ ;CHECK: vpalignr $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <16 x i8> %a1, <16 x i8> %a0, <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @stack_fold_pand(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pand
+ ;CHECK: vpand {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = and <16 x i8> %a0, %a1
+ ; add forces execution domain
+ %3 = add <16 x i8> %2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %3
+}
+
+define <16 x i8> @stack_fold_pandn(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pandn
+ ;CHECK: vpandn {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = xor <16 x i8> %a0, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %3 = and <16 x i8> %2, %a1
+ ; add forces execution domain
+ %4 = add <16 x i8> %3, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %4
+}
+
+define <16 x i8> @stack_fold_pavgb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pavgb
+ ;CHECK: vpavgb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_pavgw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pavgw
+ ;CHECK: vpavgw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %c) {
+ ;CHECK-LABEL: stack_fold_pblendvb
+ ;CHECK: vpblendvb {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a1, <16 x i8> %c, <16 x i8> %a0)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_pblendw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pblendw
+ ;CHECK: vpblendw $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i8 7)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind readnone
+
+define <2 x i64> @stack_fold_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pclmulqdq
+ ;CHECK: vpclmulqdq $0, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, i8 0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.pclmulqdq(<2 x i64>, <2 x i64>, i8) nounwind readnone
+
+define <16 x i8> @stack_fold_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpeqb
+ ;CHECK: vpcmpeqb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp eq <16 x i8> %a0, %a1
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <4 x i32> @stack_fold_pcmpeqd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpeqd
+ ;CHECK: vpcmpeqd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp eq <4 x i32> %a0, %a1
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <2 x i64> @stack_fold_pcmpeqq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpeqq
+ ;CHECK: vpcmpeqq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp eq <2 x i64> %a0, %a1
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <8 x i16> @stack_fold_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpeqw
+ ;CHECK: vpcmpeqw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp eq <8 x i16> %a0, %a1
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define i32 @stack_fold_pcmpestri(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpestri
+ ;CHECK: vpcmpestri $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{rax},~{flags}"()
+ %2 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse42.pcmpestri128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
+
+define <16 x i8> @stack_fold_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpestrm
+ ;CHECK: vpcmpestrm $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{rax},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
+
+define <16 x i8> @stack_fold_pcmpgtb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpgtb
+ ;CHECK: vpcmpgtb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp sgt <16 x i8> %a0, %a1
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <4 x i32> @stack_fold_pcmpgtd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpgtd
+ ;CHECK: vpcmpgtd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp sgt <4 x i32> %a0, %a1
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <2 x i64> @stack_fold_pcmpgtq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpgtq
+ ;CHECK: vpcmpgtq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp sgt <2 x i64> %a0, %a1
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <8 x i16> @stack_fold_pcmpgtw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpgtw
+ ;CHECK: vpcmpgtw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp sgt <8 x i16> %a0, %a1
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define i32 @stack_fold_pcmpistri(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpistri
+ ;CHECK: vpcmpistri $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind readnone
+
+define <16 x i8> @stack_fold_pcmpistrm(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpistrm
+ ;CHECK: vpcmpistrm $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwind readnone
+
+; TODO stack_fold_pextrb
+
+define i32 @stack_fold_pextrd(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pextrd
+ ;CHECK: pextrd $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill
+ ;CHECK: movl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Reload
+ %1 = extractelement <4 x i32> %a0, i32 1
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ ret i32 %1
+}
+
+define i64 @stack_fold_pextrq(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_pextrq
+ ;CHECK: pextrq $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 8-byte Folded Spill
+ ;CHECK: movq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Reload
+ %1 = extractelement <2 x i64> %a0, i32 1
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ ret i64 %1
+}
+
+; TODO stack_fold_pextrw
+
+define <4 x i32> @stack_fold_phaddd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_phaddd
+ ;CHECK: vphaddd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_phaddsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_phaddsw
+ ;CHECK: vphaddsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_phaddw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_phaddw
+ ;CHECK: vphaddw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_phminposuw(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_phminposuw
+ ;CHECK: vphminposuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_phsubd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_phsubd
+ ;CHECK: vphsubd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_phsubsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_phsubsw
+ ;CHECK: vphsubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_phsubw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_phsubw
+ ;CHECK: vphsubw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_pinsrb(<16 x i8> %a0, i8 %a1) {
+ ;CHECK-LABEL: stack_fold_pinsrb
+ ;CHECK: vpinsrb $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <16 x i8> %a0, i8 %a1, i32 1
+ ret <16 x i8> %2
+}
+
+define <4 x i32> @stack_fold_pinsrd(<4 x i32> %a0, i32 %a1) {
+ ;CHECK-LABEL: stack_fold_pinsrd
+ ;CHECK: vpinsrd $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <4 x i32> %a0, i32 %a1, i32 1
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @stack_fold_pinsrq(<2 x i64> %a0, i64 %a1) {
+ ;CHECK-LABEL: stack_fold_pinsrq
+ ;CHECK: vpinsrq $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <2 x i64> %a0, i64 %a1, i32 1
+ ret <2 x i64> %2
+}
+
+define <8 x i16> @stack_fold_pinsrw(<8 x i16> %a0, i16 %a1) {
+ ;CHECK-LABEL: stack_fold_pinsrw
+ ;CHECK: vpinsrw $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <8 x i16> %a0, i16 %a1, i32 1
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @stack_fold_pmaddubsw(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaddubsw
+ ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmaddwd(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaddwd
+ ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_pmaxsb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsb
+ ;CHECK: vpmaxsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmaxsd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsd
+ ;CHECK: vpmaxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmaxsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsw
+ ;CHECK: vpmaxsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_pmaxub(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxub
+ ;CHECK: vpmaxub {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmaxud(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxud
+ ;CHECK: vpmaxud {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmaxuw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxuw
+ ;CHECK: vpmaxuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_pminsb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsb
+ ;CHECK: vpminsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pminsd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsd
+ ;CHECK: vpminsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_pminsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsw
+ ;CHECK: vpminsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_pminub(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pminub
+ ;CHECK: vpminub {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pminud(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pminud
+ ;CHECK: vpminud {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_pminuw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pminuw
+ ;CHECK: vpminuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmovsxbd(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxbd
+ ;CHECK: vpmovsxbd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmovsxbq(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxbq
+ ;CHECK: pmovsxbq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmovsxbw(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxbw
+ ;CHECK: vpmovsxbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmovsxdq(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxdq
+ ;CHECK: vpmovsxdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmovsxwd(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxwd
+ ;CHECK: vpmovsxwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmovsxwq(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxwq
+ ;CHECK: vpmovsxwq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmovzxbd(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxbd
+ ;CHECK: vpmovzxbd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmovzxbq(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxbq
+ ;CHECK: vpmovzxbq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmovzxbw(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxbw
+ ;CHECK: vpmovzxbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmovzxdq(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxdq
+ ;CHECK: vpmovzxdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmovzxwd(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxwd
+ ;CHECK: vpmovzxwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmovzxwq(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxwq
+ ;CHECK: vpmovzxwq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmuldq
+ ;CHECK: vpmuldq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmulhrsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmulhrsw
+ ;CHECK: vpmulhrsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmulhuw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmulhuw
+ ;CHECK: vpmulhuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmulhw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmulhw
+ ;CHECK: vpmulhw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmulld(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmulld
+ ;CHECK: vpmulld {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = mul <4 x i32> %a0, %a1
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @stack_fold_pmullw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmullw
+ ;CHECK: vpmullw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = mul <8 x i16> %a0, %a1
+ ret <8 x i16> %2
+}
+
+define <2 x i64> @stack_fold_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmuludq
+ ;CHECK: vpmuludq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <16 x i8> @stack_fold_por(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_por
+ ;CHECK: vpor {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = or <16 x i8> %a0, %a1
+ ; add forces execution domain
+ %3 = add <16 x i8> %2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %3
+}
+
+define <2 x i64> @stack_fold_psadbw(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psadbw
+ ;CHECK: vpsadbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <16 x i8> @stack_fold_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pshufb
+ ;CHECK: vpshufb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pshufd(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pshufd
+ ;CHECK: vpshufd $27, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x i32> %a0, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @stack_fold_pshufhw(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pshufhw
+ ;CHECK: vpshufhw $11, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 4, i32 4>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @stack_fold_pshuflw(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pshuflw
+ ;CHECK: vpshuflw $27, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @stack_fold_psignb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psignb
+ ;CHECK: vpsignb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_psignd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psignd
+ ;CHECK: vpsignd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_psignw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psignw
+ ;CHECK: vpsignw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_pslld(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pslld
+ ;CHECK: vpslld {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_psllq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psllq
+ ;CHECK: vpsllq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i16> @stack_fold_psllw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psllw
+ ;CHECK: vpsllw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_psrad(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrad
+ ;CHECK: vpsrad {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_psraw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psraw
+ ;CHECK: vpsraw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_psrld(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrld
+ ;CHECK: vpsrld {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_psrlq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlq
+ ;CHECK: vpsrlq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i16> @stack_fold_psrlw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlw
+ ;CHECK: vpsrlw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_psubb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubb
+ ;CHECK: vpsubb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sub <16 x i8> %a0, %a1
+ ret <16 x i8> %2
+}
+
+define <4 x i32> @stack_fold_psubd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psubd
+ ;CHECK: vpsubd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sub <4 x i32> %a0, %a1
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @stack_fold_psubq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psubq
+ ;CHECK: vpsubq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sub <2 x i64> %a0, %a1
+ ret <2 x i64> %2
+}
+
+define <16 x i8> @stack_fold_psubsb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubsb
+ ;CHECK: vpsubsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_psubsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubsw
+ ;CHECK: vpsubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_psubusb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubusb
+ ;CHECK: vpsubusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_psubusw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubusw
+ ;CHECK: vpsubusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_psubw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubw
+ ;CHECK: vpsubw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sub <8 x i16> %a0, %a1
+ ret <8 x i16> %2
+}
+
+define i32 @stack_fold_ptest(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_ptest
+ ;CHECK: vptest {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
+
+define i32 @stack_fold_ptest_ymm(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_ptest_ymm
+ ;CHECK: vptest {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %a0, <4 x i64> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.avx.ptestc.256(<4 x i64>, <4 x i64>) nounwind readnone
+
+define <16 x i8> @stack_fold_punpckhbw(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhbw
+ ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i8> %2
+}
+
+define <4 x i32> @stack_fold_punpckhdq(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhdq
+ ;CHECK: vpunpckhdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ; add forces execution domain
+ %3 = add <4 x i32> %2, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %3
+}
+
+define <2 x i64> @stack_fold_punpckhqdq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhqdq
+ ;CHECK: vpunpckhqdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 1, i32 3>
+ ; add forces execution domain
+ %3 = add <2 x i64> %2, <i64 1, i64 1>
+ ret <2 x i64> %3
+}
+
+define <8 x i16> @stack_fold_punpckhwd(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhwd
+ ;CHECK: vpunpckhwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @stack_fold_punpcklbw(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_punpcklbw
+ ;CHECK: vpunpcklbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %2
+}
+
+define <4 x i32> @stack_fold_punpckldq(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckldq
+ ;CHECK: vpunpckldq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ; add forces execution domain
+ %3 = add <4 x i32> %2, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %3
+}
+
+define <2 x i64> @stack_fold_punpcklqdq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_punpcklqdq
+ ;CHECK: vpunpcklqdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 0, i32 2>
+ ; add forces execution domain
+ %3 = add <2 x i64> %2, <i64 1, i64 1>
+ ret <2 x i64> %3
+}
+
+define <8 x i16> @stack_fold_punpcklwd(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_punpcklwd
+ ;CHECK: vpunpcklwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @stack_fold_pxor(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pxor
+ ;CHECK: vpxor {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = xor <16 x i8> %a0, %a1
+ ; add forces execution domain
+ %3 = add <16 x i8> %2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %3
+}
diff --git a/test/CodeGen/X86/stack-folding-int-avx2.ll b/test/CodeGen/X86/stack-folding-int-avx2.ll
new file mode 100644
index 0000000..39169e6
--- /dev/null
+++ b/test/CodeGen/X86/stack-folding-int-avx2.ll
@@ -0,0 +1,1200 @@
+; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+; Stack reload folding tests.
+;
+; By including a nop call with sideeffects we can force a partial register spill of the
+; relevant registers and check that the reload is correctly folded into the instruction.
+
+define <4 x double> @stack_fold_broadcastsd_ymm(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_broadcastsd_ymm
+ ;CHECK: vbroadcastsd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.avx2.vbroadcast.sd.pd.256(<2 x double> %a0)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.avx2.vbroadcast.sd.pd.256(<2 x double>) nounwind readonly
+
+define <4 x float> @stack_fold_broadcastss(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_broadcastss
+ ;CHECK: vbroadcastss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.avx2.vbroadcast.ss.ps(<4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.avx2.vbroadcast.ss.ps(<4 x float>) nounwind readonly
+
+define <8 x float> @stack_fold_broadcastss_ymm(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_broadcastss_ymm
+ ;CHECK: vbroadcastss {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx2.vbroadcast.ss.ps.256(<4 x float> %a0)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx2.vbroadcast.ss.ps.256(<4 x float>) nounwind readonly
+
+define <4 x i32> @stack_fold_extracti128(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_extracti128
+ ;CHECK: vextracti128 $1, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
+ ; add forces execution domain
+ %1 = add <8 x i32> %a0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %2 = shufflevector <8 x i32> %1, <8 x i32> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ ret <4 x i32> %2
+}
+
+define <8 x i32> @stack_fold_inserti128(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_inserti128
+ ;CHECK: vinserti128 $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ; add forces execution domain
+ %3 = add <8 x i32> %2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ret <8 x i32> %3
+}
+
+define <16 x i16> @stack_fold_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_mpsadbw
+ ;CHECK: vmpsadbw $7, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i8 7)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind readnone
+
+define <32 x i8> @stack_fold_pabsb(<32 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsb
+ ;CHECK: vpabsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8>) nounwind readnone
+
+define <8 x i32> @stack_fold_pabsd(<8 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsd
+ ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32>) nounwind readnone
+
+define <16 x i16> @stack_fold_pabsw(<16 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsw
+ ;CHECK: vpabsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_packssdw(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_packssdw
+ ;CHECK: vpackssdw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <32 x i8> @stack_fold_packsswb(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_packsswb
+ ;CHECK: vpacksswb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_packusdw(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_packusdw
+ ;CHECK: vpackusdw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <32 x i8> @stack_fold_packuswb(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_packuswb
+ ;CHECK: vpackuswb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a0, <16 x i16> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <32 x i8> @stack_fold_paddb(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_paddb
+ ;CHECK: vpaddb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = add <32 x i8> %a0, %a1
+ ret <32 x i8> %2
+}
+
+define <8 x i32> @stack_fold_paddd(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_paddd
+ ;CHECK: vpaddd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = add <8 x i32> %a0, %a1
+ ret <8 x i32> %2
+}
+
+define <4 x i64> @stack_fold_paddq(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_paddq
+ ;CHECK: vpaddq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = add <4 x i64> %a0, %a1
+ ret <4 x i64> %2
+}
+
+define <32 x i8> @stack_fold_paddsb(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_paddsb
+ ;CHECK: vpaddsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <16 x i16> @stack_fold_paddsw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_paddsw
+ ;CHECK: vpaddsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <32 x i8> @stack_fold_paddusb(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_paddusb
+ ;CHECK: vpaddusb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <16 x i16> @stack_fold_paddusw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_paddusw
+ ;CHECK: vpaddusw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_paddw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_paddw
+ ;CHECK: vpaddw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = add <16 x i16> %a0, %a1
+ ret <16 x i16> %2
+}
+
+; TODO stack_fold_palignr
+; define <32 x i8> @stack_fold_palignr(<32 x i8> %a0, <32 x i8> %a1)
+
+define <32 x i8> @stack_fold_pand(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pand
+ ;CHECK: vpand {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = and <32 x i8> %a0, %a1
+ ; add forces execution domain
+ %3 = add <32 x i8> %2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <32 x i8> %3
+}
+
+define <32 x i8> @stack_fold_pandn(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pandn
+ ;CHECK: vpandn {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = xor <32 x i8> %a0, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %3 = and <32 x i8> %2, %a1
+ ; add forces execution domain
+ %4 = add <32 x i8> %3, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <32 x i8> %4
+}
+
+define <32 x i8> @stack_fold_pavgb(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pavgb
+ ;CHECK: vpavgb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <16 x i16> @stack_fold_pavgw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pavgw
+ ;CHECK: vpavgw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_pblendd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pblendd
+ ;CHECK: vpblendd $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 3>
+ ret <4 x i32> %2
+}
+
+define <8 x i32> @stack_fold_pblendd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pblendd_ymm
+ ;CHECK: vpblendd $7, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 8, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i32> %2
+}
+
+define <32 x i8> @stack_fold_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %c) {
+ ;CHECK-LABEL: stack_fold_pblendvb
+ ;CHECK: vpblendvb {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a1, <32 x i8> %c, <32 x i8> %a0)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounwind readnone
+
+define <16 x i16> @stack_fold_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pblendw
+ ;CHECK: vpblendw $7, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i8 7)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i8) nounwind readnone
+
+define <16 x i8> @stack_fold_pbroadcastb(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pbroadcastb
+ ;CHECK: vpbroadcastb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.avx2.pbroadcastb.128(<16 x i8> %a0)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.avx2.pbroadcastb.128(<16 x i8>) nounwind readonly
+
+define <32 x i8> @stack_fold_pbroadcastb_ymm(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pbroadcastb_ymm
+ ;CHECK: vpbroadcastb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.pbroadcastb.256(<16 x i8> %a0)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.pbroadcastb.256(<16 x i8>) nounwind readonly
+
+define <4 x i32> @stack_fold_pbroadcastd(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pbroadcastd
+ ;CHECK: vpbroadcastd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.avx2.pbroadcastd.128(<4 x i32> %a0)
+ ; add forces execution domain
+ %3 = add <4 x i32> %2, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %3
+}
+declare <4 x i32> @llvm.x86.avx2.pbroadcastd.128(<4 x i32>) nounwind readonly
+
+define <8 x i32> @stack_fold_pbroadcastd_ymm(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pbroadcastd_ymm
+ ;CHECK: vpbroadcastd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pbroadcastd.256(<4 x i32> %a0)
+ ; add forces execution domain
+ %3 = add <8 x i32> %2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ret <8 x i32> %3
+}
+declare <8 x i32> @llvm.x86.avx2.pbroadcastd.256(<4 x i32>) nounwind readonly
+
+define <2 x i64> @stack_fold_pbroadcastq(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_pbroadcastq
+ ;CHECK: vpbroadcastq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.avx2.pbroadcastq.128(<2 x i64> %a0)
+ ; add forces execution domain
+ %3 = add <2 x i64> %2, <i64 1, i64 1>
+ ret <2 x i64> %3
+}
+declare <2 x i64> @llvm.x86.avx2.pbroadcastq.128(<2 x i64>) nounwind readonly
+
+define <4 x i64> @stack_fold_pbroadcastq_ymm(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_pbroadcastq_ymm
+ ;CHECK: vpbroadcastq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.pbroadcastq.256(<2 x i64> %a0)
+ ; add forces execution domain
+ %3 = add <4 x i64> %2, <i64 1, i64 1, i64 1, i64 1>
+ ret <4 x i64> %3
+}
+declare <4 x i64> @llvm.x86.avx2.pbroadcastq.256(<2 x i64>) nounwind readonly
+
+define <8 x i16> @stack_fold_pbroadcastw(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pbroadcastw
+ ;CHECK: vpbroadcastw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.avx2.pbroadcastw.128(<8 x i16> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.avx2.pbroadcastw.128(<8 x i16>) nounwind readonly
+
+define <16 x i16> @stack_fold_pbroadcastw_ymm(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pbroadcastw_ymm
+ ;CHECK: vpbroadcastw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pbroadcastw.256(<8 x i16> %a0)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pbroadcastw.256(<8 x i16>) nounwind readonly
+
+define <32 x i8> @stack_fold_pcmpeqb(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpeqb
+ ;CHECK: vpcmpeqb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp eq <32 x i8> %a0, %a1
+ %3 = sext <32 x i1> %2 to <32 x i8>
+ ret <32 x i8> %3
+}
+
+define <8 x i32> @stack_fold_pcmpeqd(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpeqd
+ ;CHECK: vpcmpeqd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp eq <8 x i32> %a0, %a1
+ %3 = sext <8 x i1> %2 to <8 x i32>
+ ret <8 x i32> %3
+}
+
+define <4 x i64> @stack_fold_pcmpeqq(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpeqq
+ ;CHECK: vpcmpeqq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp eq <4 x i64> %a0, %a1
+ %3 = sext <4 x i1> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <16 x i16> @stack_fold_pcmpeqw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpeqw
+ ;CHECK: vpcmpeqw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp eq <16 x i16> %a0, %a1
+ %3 = sext <16 x i1> %2 to <16 x i16>
+ ret <16 x i16> %3
+}
+
+define <32 x i8> @stack_fold_pcmpgtb(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpgtb
+ ;CHECK: vpcmpgtb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp sgt <32 x i8> %a0, %a1
+ %3 = sext <32 x i1> %2 to <32 x i8>
+ ret <32 x i8> %3
+}
+
+define <8 x i32> @stack_fold_pcmpgtd(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpgtd
+ ;CHECK: vpcmpgtd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp sgt <8 x i32> %a0, %a1
+ %3 = sext <8 x i1> %2 to <8 x i32>
+ ret <8 x i32> %3
+}
+
+define <4 x i64> @stack_fold_pcmpgtq(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpgtq
+ ;CHECK: vpcmpgtq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp sgt <4 x i64> %a0, %a1
+ %3 = sext <4 x i1> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <16 x i16> @stack_fold_pcmpgtw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpgtw
+ ;CHECK: vpcmpgtw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp sgt <16 x i16> %a0, %a1
+ %3 = sext <16 x i1> %2 to <16 x i16>
+ ret <16 x i16> %3
+}
+
+define <8 x i32> @stack_fold_perm2i128(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_perm2i128
+ ;CHECK: vperm2i128 $33, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+ ; add forces execution domain
+ %3 = add <8 x i32> %2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ret <8 x i32> %3
+}
+
+define <8 x i32> @stack_fold_permd(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_permd
+ ;CHECK: vpermd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
+
+define <4 x double> @stack_fold_permpd(<4 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_permpd
+ ;CHECK: vpermpd $255, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ ; fadd forces execution domain
+ %3 = fadd <4 x double> %2, <double 0x0, double 0x0, double 0x0, double 0x0>
+ ret <4 x double> %3
+}
+
+define <8 x float> @stack_fold_permps(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_permps
+ ;CHECK: vpermps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x float> %a1)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x float>) nounwind readonly
+
+define <4 x i64> @stack_fold_permq(<4 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_permq
+ ;CHECK: vpermq $255, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ ; add forces execution domain
+ %3 = add <4 x i64> %2, <i64 1, i64 1, i64 1, i64 1>
+ ret <4 x i64> %3
+}
+
+define <8 x i32> @stack_fold_phaddd(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_phaddd
+ ;CHECK: vphaddd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <16 x i16> @stack_fold_phaddsw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_phaddsw
+ ;CHECK: vphaddsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_phaddw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_phaddw
+ ;CHECK: vphaddw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <8 x i32> @stack_fold_phsubd(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_phsubd
+ ;CHECK: vphsubd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <16 x i16> @stack_fold_phsubsw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_phsubsw
+ ;CHECK: vphsubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_phsubw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_phsubw
+ ;CHECK: vphsubw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_pmaddubsw(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaddubsw
+ ;CHECK: vpmaddubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <8 x i32> @stack_fold_pmaddwd(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaddwd
+ ;CHECK: vpmaddwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <32 x i8> @stack_fold_pmaxsb(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsb
+ ;CHECK: vpmaxsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <8 x i32> @stack_fold_pmaxsd(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsd
+ ;CHECK: vpmaxsd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <16 x i16> @stack_fold_pmaxsw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsw
+ ;CHECK: vpmaxsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <32 x i8> @stack_fold_pmaxub(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxub
+ ;CHECK: vpmaxub {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <8 x i32> @stack_fold_pmaxud(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxud
+ ;CHECK: vpmaxud {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <16 x i16> @stack_fold_pmaxuw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxuw
+ ;CHECK: vpmaxuw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <32 x i8> @stack_fold_pminsb(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsb
+ ;CHECK: vpminsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <8 x i32> @stack_fold_pminsd(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsd
+ ;CHECK: vpminsd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <16 x i16> @stack_fold_pminsw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsw
+ ;CHECK: vpminsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <32 x i8> @stack_fold_pminub(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pminub
+ ;CHECK: vpminub {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <8 x i32> @stack_fold_pminud(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pminud
+ ;CHECK: vpminud {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <16 x i16> @stack_fold_pminuw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pminuw
+ ;CHECK: vpminuw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <8 x i32> @stack_fold_pmovsxbd(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxbd
+ ;CHECK: vpmovsxbd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8> %a0)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8>) nounwind readnone
+
+define <4 x i64> @stack_fold_pmovsxbq(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxbq
+ ;CHECK: pmovsxbq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8> %a0)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8>) nounwind readnone
+
+define <16 x i16> @stack_fold_pmovsxbw(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxbw
+ ;CHECK: vpmovsxbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8> %a0)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8>) nounwind readnone
+
+define <4 x i64> @stack_fold_pmovsxdq(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxdq
+ ;CHECK: vpmovsxdq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32> %a0)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32>) nounwind readnone
+
+define <8 x i32> @stack_fold_pmovsxwd(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxwd
+ ;CHECK: vpmovsxwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16> %a0)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16>) nounwind readnone
+
+define <4 x i64> @stack_fold_pmovsxwq(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxwq
+ ;CHECK: vpmovsxwq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16> %a0)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16>) nounwind readnone
+
+define <8 x i32> @stack_fold_pmovzxbd(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxbd
+ ;CHECK: vpmovzxbd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8> %a0)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8>) nounwind readnone
+
+define <4 x i64> @stack_fold_pmovzxbq(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxbq
+ ;CHECK: vpmovzxbq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8> %a0)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8>) nounwind readnone
+
+define <16 x i16> @stack_fold_pmovzxbw(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxbw
+ ;CHECK: vpmovzxbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8> %a0)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8>) nounwind readnone
+
+define <4 x i64> @stack_fold_pmovzxdq(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxdq
+ ;CHECK: vpmovzxdq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32> %a0)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32>) nounwind readnone
+
+define <8 x i32> @stack_fold_pmovzxwd(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxwd
+ ;CHECK: vpmovzxwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16> %a0)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16>) nounwind readnone
+
+define <4 x i64> @stack_fold_pmovzxwq(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxwq
+ ;CHECK: vpmovzxwq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16> %a0)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16>) nounwind readnone
+
+define <4 x i64> @stack_fold_pmuldq(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmuldq
+ ;CHECK: vpmuldq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32> %a0, <8 x i32> %a1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <16 x i16> @stack_fold_pmulhrsw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmulhrsw
+ ;CHECK: vpmulhrsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_pmulhuw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmulhuw
+ ;CHECK: vpmulhuw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_pmulhw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmulhw
+ ;CHECK: vpmulhw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <8 x i32> @stack_fold_pmulld(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmulld
+ ;CHECK: vpmulld {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = mul <8 x i32> %a0, %a1
+ ret <8 x i32> %2
+}
+
+define <16 x i16> @stack_fold_pmullw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmullw
+ ;CHECK: vpmullw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = mul <16 x i16> %a0, %a1
+ ret <16 x i16> %2
+}
+
+define <4 x i64> @stack_fold_pmuludq(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmuludq
+ ;CHECK: vpmuludq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %a0, <8 x i32> %a1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <32 x i8> @stack_fold_por(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_por
+ ;CHECK: vpor {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = or <32 x i8> %a0, %a1
+ ; add forces execution domain
+ %3 = add <32 x i8> %2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <32 x i8> %3
+}
+
+define <4 x i64> @stack_fold_psadbw(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psadbw
+ ;CHECK: vpsadbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %a0, <32 x i8> %a1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <32 x i8> @stack_fold_pshufb(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pshufb
+ ;CHECK: vpshufb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <8 x i32> @stack_fold_pshufd(<8 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pshufd
+ ;CHECK: vpshufd $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ ret <8 x i32> %2
+}
+
+; TODO stack_fold_pshufhw
+
+; TODO stack_fold_pshuflw
+
+define <32 x i8> @stack_fold_psignb(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psignb
+ ;CHECK: vpsignb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <8 x i32> @stack_fold_psignd(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psignd
+ ;CHECK: vpsignd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <16 x i16> @stack_fold_psignw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psignw
+ ;CHECK: vpsignw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <8 x i32> @stack_fold_pslld(<8 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pslld
+ ;CHECK: vpslld {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %a0, <4 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone
+
+define <4 x i64> @stack_fold_psllq(<4 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psllq
+ ;CHECK: vpsllq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone
+
+define <4 x i32> @stack_fold_psllvd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psllvd
+ ;CHECK: vpsllvd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i32> @stack_fold_psllvd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psllvd_ymm
+ ;CHECK: vpsllvd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_psllvq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psllvq
+ ;CHECK: vpsllvq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <4 x i64> @stack_fold_psllvq_ymm(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psllvq_ymm
+ ;CHECK: vpsllvq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
+
+define <16 x i16> @stack_fold_psllw(<16 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psllw
+ ;CHECK: vpsllw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %a0, <8 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i32> @stack_fold_psrad(<8 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrad
+ ;CHECK: vpsrad {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %a0, <4 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_psravd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psravd
+ ;CHECK: vpsravd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i32> @stack_fold_psravd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psravd_ymm
+ ;CHECK: vpsravd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <16 x i16> @stack_fold_psraw(<16 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psraw
+ ;CHECK: vpsraw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %a0, <8 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i32> @stack_fold_psrld(<8 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrld
+ ;CHECK: vpsrld {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %a0, <4 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone
+
+define <4 x i64> @stack_fold_psrlq(<4 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlq
+ ;CHECK: vpsrlq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone
+
+define <4 x i32> @stack_fold_psrlvd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlvd
+ ;CHECK: vpsrlvd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i32> @stack_fold_psrlvd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlvd_ymm
+ ;CHECK: vpsrlvd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %a0, <8 x i32> %a1)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_psrlvq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlvq
+ ;CHECK: vpsrlvq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <4 x i64> @stack_fold_psrlvq_ymm(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlvq_ymm
+ ;CHECK: vpsrlvq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
+
+define <16 x i16> @stack_fold_psrlw(<16 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlw
+ ;CHECK: vpsrlw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnone
+
+define <32 x i8> @stack_fold_psubb(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubb
+ ;CHECK: vpsubb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sub <32 x i8> %a0, %a1
+ ret <32 x i8> %2
+}
+
+define <8 x i32> @stack_fold_psubd(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psubd
+ ;CHECK: vpsubd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sub <8 x i32> %a0, %a1
+ ret <8 x i32> %2
+}
+
+define <4 x i64> @stack_fold_psubq(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psubq
+ ;CHECK: vpsubq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sub <4 x i64> %a0, %a1
+ ret <4 x i64> %2
+}
+
+define <32 x i8> @stack_fold_psubsb(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubsb
+ ;CHECK: vpsubsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <16 x i16> @stack_fold_psubsw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubsw
+ ;CHECK: vpsubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <32 x i8> @stack_fold_psubusb(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubusb
+ ;CHECK: vpsubusb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %a0, <32 x i8> %a1)
+ ret <32 x i8> %2
+}
+declare <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8>, <32 x i8>) nounwind readnone
+
+define <16 x i16> @stack_fold_psubusw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubusw
+ ;CHECK: vpsubusw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %a0, <16 x i16> %a1)
+ ret <16 x i16> %2
+}
+declare <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16>, <16 x i16>) nounwind readnone
+
+define <16 x i16> @stack_fold_psubw(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubw
+ ;CHECK: vpsubw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sub <16 x i16> %a0, %a1
+ ret <16 x i16> %2
+}
+
+define <32 x i8> @stack_fold_punpckhbw(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhbw
+ ;CHECK: vpunpckhbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
+ ret <32 x i8> %2
+}
+
+define <8 x i32> @stack_fold_punpckhdq(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhdq
+ ;CHECK: vpunpckhdq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
+ ; add forces execution domain
+ %3 = add <8 x i32> %2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ret <8 x i32> %3
+}
+
+define <4 x i64> @stack_fold_punpckhqdq(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhqdq
+ ;CHECK: vpunpckhqdq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ; add forces execution domain
+ %3 = add <4 x i64> %2, <i64 1, i64 1, i64 1, i64 1>
+ ret <4 x i64> %3
+}
+
+define <16 x i16> @stack_fold_punpckhwd(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhwd
+ ;CHECK: vpunpckhwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i16> %2
+}
+
+define <32 x i8> @stack_fold_punpcklbw(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_punpcklbw
+ ;CHECK: vpunpcklbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
+ ret <32 x i8> %2
+}
+
+define <8 x i32> @stack_fold_punpckldq(<8 x i32> %a0, <8 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckldq
+ ;CHECK: vpunpckldq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
+ ; add forces execution domain
+ %3 = add <8 x i32> %2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ ret <8 x i32> %3
+}
+
+define <4 x i64> @stack_fold_punpcklqdq(<4 x i64> %a0, <4 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_punpcklqdq
+ ;CHECK: vpunpcklqdq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ; add forces execution domain
+ %3 = add <4 x i64> %2, <i64 1, i64 1, i64 1, i64 1>
+ ret <4 x i64> %3
+}
+
+define <16 x i16> @stack_fold_punpcklwd(<16 x i16> %a0, <16 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_punpcklwd
+ ;CHECK: vpunpcklwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27>
+ ret <16 x i16> %2
+}
+
+define <32 x i8> @stack_fold_pxor(<32 x i8> %a0, <32 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pxor
+ ;CHECK: vpxor {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = xor <32 x i8> %a0, %a1
+ ; add forces execution domain
+ %3 = add <32 x i8> %2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <32 x i8> %3
+}
diff --git a/test/CodeGen/X86/stack-folding-int-sse42.ll b/test/CodeGen/X86/stack-folding-int-sse42.ll
new file mode 100644
index 0000000..099a5db
--- /dev/null
+++ b/test/CodeGen/X86/stack-folding-int-sse42.ll
@@ -0,0 +1,1143 @@
+; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.2,+aes,+pclmul < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+; Stack reload folding tests.
+;
+; By including a nop call with sideeffects we can force a partial register spill of the
+; relevant registers and check that the reload is correctly folded into the instruction.
+
+define <2 x i64> @stack_fold_aesdec(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_aesdec
+ ;CHECK: aesdec {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @stack_fold_aesdeclast(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_aesdeclast
+ ;CHECK: aesdeclast {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @stack_fold_aesenc(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_aesenc
+ ;CHECK: aesenc {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @stack_fold_aesenclast(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_aesenclast
+ ;CHECK: aesenclast {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @stack_fold_aesimc(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_aesimc
+ ;CHECK: aesimc {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64>) nounwind readnone
+
+define <2 x i64> @stack_fold_aeskeygenassist(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_aeskeygenassist
+ ;CHECK: aeskeygenassist $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %a0, i8 7)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64>, i8) nounwind readnone
+
+define <4 x i32> @stack_fold_movd_load(i32 %a0) {
+ ;CHECK-LABEL: stack_fold_movd_load
+ ;CHECK: movd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <4 x i32> zeroinitializer, i32 %a0, i32 0
+ ; add forces execution domain
+ %3 = add <4 x i32> %2, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %3
+}
+
+define i32 @stack_fold_movd_store(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_movd_store
+ ;CHECK: movd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill
+ ; add forces execution domain
+ %1 = add <4 x i32> %a0, <i32 1, i32 1, i32 1, i32 1>
+ %2 = extractelement <4 x i32> %1, i32 0
+ %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ ret i32 %2
+}
+
+define <2 x i64> @stack_fold_movq_load(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_movq_load
+ ;CHECK: movq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %2
+}
+
+define i64 @stack_fold_movq_store(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_movq_store
+ ;CHECK: movq {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 8-byte Folded Spill
+ %1 = extractelement <2 x i64> %a0, i32 0
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ ret i64 %1
+}
+
+define <8 x i16> @stack_fold_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_mpsadbw
+ ;CHECK: mpsadbw $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind readnone
+
+define <16 x i8> @stack_fold_pabsb(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsb
+ ;CHECK: pabsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pabsd(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsd
+ ;CHECK: pabsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_pabsw(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pabsw
+ ;CHECK: pabsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_packssdw(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_packssdw
+ ;CHECK: packssdw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <16 x i8> @stack_fold_packsswb(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_packsswb
+ ;CHECK: packsswb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_packusdw(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_packusdw
+ ;CHECK: packusdw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <16 x i8> @stack_fold_packuswb(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_packuswb
+ ;CHECK: packuswb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_paddb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_paddb
+ ;CHECK: paddb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = add <16 x i8> %a0, %a1
+ ret <16 x i8> %2
+}
+
+define <4 x i32> @stack_fold_paddd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_paddd
+ ;CHECK: paddd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = add <4 x i32> %a0, %a1
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @stack_fold_paddq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_paddq
+ ;CHECK: paddq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = add <2 x i64> %a0, %a1
+ ret <2 x i64> %2
+}
+
+define <16 x i8> @stack_fold_paddsb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_paddsb
+ ;CHECK: paddsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_paddsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_paddsw
+ ;CHECK: paddsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_paddusb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_paddusb
+ ;CHECK: paddusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_paddusw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_paddusw
+ ;CHECK: paddusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_paddw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_paddw
+ ;CHECK: paddw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = add <8 x i16> %a0, %a1
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @stack_fold_palignr(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_palignr
+ ;CHECK: palignr $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <16 x i8> %a1, <16 x i8> %a0, <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @stack_fold_pand(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pand
+ ;CHECK: pand {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = and <16 x i8> %a0, %a1
+ ; add forces execution domain
+ %3 = add <16 x i8> %2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %3
+}
+
+define <16 x i8> @stack_fold_pandn(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pandn
+ ;CHECK: pandn {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = xor <16 x i8> %a0, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %3 = and <16 x i8> %2, %a1
+ ; add forces execution domain
+ %4 = add <16 x i8> %3, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %4
+}
+
+define <16 x i8> @stack_fold_pavgb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pavgb
+ ;CHECK: pavgb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_pavgw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pavgw
+ ;CHECK: pavgw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %c) {
+ ;CHECK-LABEL: stack_fold_pblendvb
+ ;CHECK: pblendvb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a1, <16 x i8> %c, <16 x i8> %a0)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_pblendw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pblendw
+ ;CHECK: pblendw $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i8 7)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind readnone
+
+define <2 x i64> @stack_fold_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pclmulqdq
+ ;CHECK: pclmulqdq $0, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, i8 0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.pclmulqdq(<2 x i64>, <2 x i64>, i8) nounwind readnone
+
+define <16 x i8> @stack_fold_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpeqb
+ ;CHECK: pcmpeqb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp eq <16 x i8> %a0, %a1
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <4 x i32> @stack_fold_pcmpeqd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpeqd
+ ;CHECK: pcmpeqd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp eq <4 x i32> %a0, %a1
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <2 x i64> @stack_fold_pcmpeqq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpeqq
+ ;CHECK: pcmpeqq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp eq <2 x i64> %a0, %a1
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <8 x i16> @stack_fold_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpeqw
+ ;CHECK: pcmpeqw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp eq <8 x i16> %a0, %a1
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define i32 @stack_fold_pcmpestri(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpestri
+ ;CHECK: pcmpestri $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{rax},~{flags}"()
+ %2 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse42.pcmpestri128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
+
+define <16 x i8> @stack_fold_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpestrm
+ ;CHECK: pcmpestrm $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{rax},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
+
+define <16 x i8> @stack_fold_pcmpgtb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpgtb
+ ;CHECK: pcmpgtb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp sgt <16 x i8> %a0, %a1
+ %3 = sext <16 x i1> %2 to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <4 x i32> @stack_fold_pcmpgtd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpgtd
+ ;CHECK: pcmpgtd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp sgt <4 x i32> %a0, %a1
+ %3 = sext <4 x i1> %2 to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <2 x i64> @stack_fold_pcmpgtq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpgtq
+ ;CHECK: pcmpgtq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp sgt <2 x i64> %a0, %a1
+ %3 = sext <2 x i1> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <8 x i16> @stack_fold_pcmpgtw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpgtw
+ ;CHECK: pcmpgtw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = icmp sgt <8 x i16> %a0, %a1
+ %3 = sext <8 x i1> %2 to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define i32 @stack_fold_pcmpistri(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpistri
+ ;CHECK: pcmpistri $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind readnone
+
+define <16 x i8> @stack_fold_pcmpistrm(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pcmpistrm
+ ;CHECK: pcmpistrm $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwind readnone
+
+; TODO stack_fold_pextrb
+
+define i32 @stack_fold_pextrd(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pextrd
+ ;CHECK: pextrd $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill
+ ;CHECK: movl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Reload
+ %1 = extractelement <4 x i32> %a0, i32 1
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ ret i32 %1
+}
+
+define i64 @stack_fold_pextrq(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_pextrq
+ ;CHECK: pextrq $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 8-byte Folded Spill
+ ;CHECK: movq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Reload
+ %1 = extractelement <2 x i64> %a0, i32 1
+ %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ ret i64 %1
+}
+
+; TODO stack_fold_pextrw
+
+define <4 x i32> @stack_fold_phaddd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_phaddd
+ ;CHECK: phaddd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_phaddsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_phaddsw
+ ;CHECK: phaddsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_phaddw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_phaddw
+ ;CHECK: phaddw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_phminposuw(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_phminposuw
+ ;CHECK: phminposuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_phsubd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_phsubd
+ ;CHECK: phsubd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_phsubsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_phsubsw
+ ;CHECK: phsubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_phsubw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_phsubw
+ ;CHECK: phsubw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_pinsrb(<16 x i8> %a0, i8 %a1) {
+ ;CHECK-LABEL: stack_fold_pinsrb
+ ;CHECK: pinsrb $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <16 x i8> %a0, i8 %a1, i32 1
+ ret <16 x i8> %2
+}
+
+define <4 x i32> @stack_fold_pinsrd(<4 x i32> %a0, i32 %a1) {
+ ;CHECK-LABEL: stack_fold_pinsrd
+ ;CHECK: pinsrd $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <4 x i32> %a0, i32 %a1, i32 1
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @stack_fold_pinsrq(<2 x i64> %a0, i64 %a1) {
+ ;CHECK-LABEL: stack_fold_pinsrq
+ ;CHECK: pinsrq $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <2 x i64> %a0, i64 %a1, i32 1
+ ret <2 x i64> %2
+}
+
+define <8 x i16> @stack_fold_pinsrw(<8 x i16> %a0, i16 %a1) {
+ ;CHECK-LABEL: stack_fold_pinsrw
+ ;CHECK: pinsrw $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
+ %2 = insertelement <8 x i16> %a0, i16 %a1, i32 1
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @stack_fold_pmaddubsw(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaddubsw
+ ;CHECK: pmaddubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmaddwd(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaddwd
+ ;CHECK: pmaddwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_pmaxsb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsb
+ ;CHECK: pmaxsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmaxsd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsd
+ ;CHECK: pmaxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmaxsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxsw
+ ;CHECK: pmaxsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_pmaxub(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxub
+ ;CHECK: pmaxub {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmaxud(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxud
+ ;CHECK: pmaxud {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmaxuw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmaxuw
+ ;CHECK: pmaxuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_pminsb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsb
+ ;CHECK: pminsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pminsd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsd
+ ;CHECK: pminsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_pminsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pminsw
+ ;CHECK: pminsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_pminub(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pminub
+ ;CHECK: pminub {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pminud(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pminud
+ ;CHECK: pminud {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_pminuw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pminuw
+ ;CHECK: pminuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmovsxbd(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxbd
+ ;CHECK: pmovsxbd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmovsxbq(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxbq
+ ;CHECK: pmovsxbq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmovsxbw(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxbw
+ ;CHECK: pmovsxbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmovsxdq(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxdq
+ ;CHECK: pmovsxdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmovsxwd(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxwd
+ ;CHECK: pmovsxwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmovsxwq(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovsxwq
+ ;CHECK: pmovsxwq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmovzxbd(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxbd
+ ;CHECK: pmovzxbd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmovzxbq(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxbq
+ ;CHECK: pmovzxbq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmovzxbw(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxbw
+ ;CHECK: pmovzxbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmovzxdq(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxdq
+ ;CHECK: pmovzxdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmovzxwd(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxwd
+ ;CHECK: pmovzxwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmovzxwq(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pmovzxwq
+ ;CHECK: pmovzxwq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16>) nounwind readnone
+
+define <2 x i64> @stack_fold_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmuldq
+ ;CHECK: pmuldq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmulhrsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmulhrsw
+ ;CHECK: pmulhrsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmulhuw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmulhuw
+ ;CHECK: pmulhuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_pmulhw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmulhw
+ ;CHECK: pmulhw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_pmulld(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmulld
+ ;CHECK: pmulld {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = mul <4 x i32> %a0, %a1
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @stack_fold_pmullw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_pmullw
+ ;CHECK: pmullw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = mul <8 x i16> %a0, %a1
+ ret <8 x i16> %2
+}
+
+define <2 x i64> @stack_fold_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pmuludq
+ ;CHECK: pmuludq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <16 x i8> @stack_fold_por(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_por
+ ;CHECK: por {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = or <16 x i8> %a0, %a1
+ ; add forces execution domain
+ %3 = add <16 x i8> %2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %3
+}
+
+define <2 x i64> @stack_fold_psadbw(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psadbw
+ ;CHECK: psadbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <16 x i8> @stack_fold_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pshufb
+ ;CHECK: pshufb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_pshufd(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_pshufd
+ ;CHECK: pshufd $27, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x i32> %a0, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @stack_fold_pshufhw(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pshufhw
+ ;CHECK: pshufhw $11, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 4, i32 4>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @stack_fold_pshuflw(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_pshuflw
+ ;CHECK: pshuflw $27, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @stack_fold_psignb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psignb
+ ;CHECK: psignb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_psignd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psignd
+ ;CHECK: psignd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_psignw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psignw
+ ;CHECK: psignw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_pslld(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_pslld
+ ;CHECK: pslld {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_psllq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psllq
+ ;CHECK: psllq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i16> @stack_fold_psllw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psllw
+ ;CHECK: psllw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_psrad(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrad
+ ;CHECK: psrad {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_psraw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psraw
+ ;CHECK: psraw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_psrld(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psrld
+ ;CHECK: psrld {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_psrlq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlq
+ ;CHECK: psrlq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i16> @stack_fold_psrlw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psrlw
+ ;CHECK: psrlw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_psubb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubb
+ ;CHECK: psubb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sub <16 x i8> %a0, %a1
+ ret <16 x i8> %2
+}
+
+define <4 x i32> @stack_fold_psubd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_psubd
+ ;CHECK: psubd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sub <4 x i32> %a0, %a1
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @stack_fold_psubq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_psubq
+ ;CHECK: psubq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sub <2 x i64> %a0, %a1
+ ret <2 x i64> %2
+}
+
+define <16 x i8> @stack_fold_psubsb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubsb
+ ;CHECK: psubsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_psubsw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubsw
+ ;CHECK: psubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_psubusb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_psubusb
+ ;CHECK: psubusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_psubusw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubusw
+ ;CHECK: psubusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_psubw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_psubw
+ ;CHECK: psubw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = sub <8 x i16> %a0, %a1
+ ret <8 x i16> %2
+}
+
+define i32 @stack_fold_ptest(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_ptest
+ ;CHECK: ptest {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1)
+ ret i32 %2
+}
+declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <16 x i8> @stack_fold_punpckhbw(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhbw
+ ;CHECK: punpckhbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i8> %2
+}
+
+define <4 x i32> @stack_fold_punpckhdq(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhdq
+ ;CHECK: punpckhdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ; add forces execution domain
+ %3 = add <4 x i32> %2, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %3
+}
+
+define <2 x i64> @stack_fold_punpckhqdq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhqdq
+ ;CHECK: punpckhqdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 1, i32 3>
+ ; add forces execution domain
+ %3 = add <2 x i64> %2, <i64 1, i64 1>
+ ret <2 x i64> %3
+}
+
+define <8 x i16> @stack_fold_punpckhwd(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckhwd
+ ;CHECK: punpckhwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @stack_fold_punpcklbw(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_punpcklbw
+ ;CHECK: punpcklbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %2
+}
+
+define <4 x i32> @stack_fold_punpckldq(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_punpckldq
+ ;CHECK: punpckldq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ; add forces execution domain
+ %3 = add <4 x i32> %2, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %3
+}
+
+define <2 x i64> @stack_fold_punpcklqdq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_punpcklqdq
+ ;CHECK: punpcklqdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 0, i32 2>
+ ; add forces execution domain
+ %3 = add <2 x i64> %2, <i64 1, i64 1>
+ ret <2 x i64> %3
+}
+
+define <8 x i16> @stack_fold_punpcklwd(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_punpcklwd
+ ;CHECK: punpcklwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @stack_fold_pxor(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_pxor
+ ;CHECK: pxor {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = xor <16 x i8> %a0, %a1
+ ; add forces execution domain
+ %3 = add <16 x i8> %2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %3
+}
diff --git a/test/CodeGen/X86/stack-folding-xop.ll b/test/CodeGen/X86/stack-folding-xop.ll
new file mode 100644
index 0000000..44a0d1d
--- /dev/null
+++ b/test/CodeGen/X86/stack-folding-xop.ll
@@ -0,0 +1,718 @@
+; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx,+xop < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-unknown"
+
+; Stack reload folding tests.
+;
+; By including a nop call with sideeffects we can force a partial register spill of the
+; relevant registers and check that the reload is correctly folded into the instruction.
+
+define <2 x double> @stack_fold_vfrczpd(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_vfrczpd
+ ;CHECK: vfrczpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.xop.vfrcz.pd(<2 x double> %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.xop.vfrcz.pd(<2 x double>) nounwind readnone
+
+define <4 x double> @stack_fold_vfrczpd_ymm(<4 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_vfrczpd_ymm
+ ;CHECK: vfrczpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.xop.vfrcz.pd.256(<4 x double> %a0)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.xop.vfrcz.pd.256(<4 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_vfrczps(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_vfrczps
+ ;CHECK: vfrczps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.xop.vfrcz.ps(<4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.xop.vfrcz.ps(<4 x float>) nounwind readnone
+
+define <8 x float> @stack_fold_vfrczps_ymm(<8 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_vfrczps_ymm
+ ;CHECK: vfrczps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.xop.vfrcz.ps.256(<8 x float> %a0)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.xop.vfrcz.ps.256(<8 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_vfrczsd(<2 x double> %a0) {
+ ;CHECK-LABEL: stack_fold_vfrczsd
+ ;CHECK: vfrczsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double> %a0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double>) nounwind readnone
+
+define <4 x float> @stack_fold_vfrczss(<4 x float> %a0) {
+ ;CHECK-LABEL: stack_fold_vfrczss
+ ;CHECK: vfrczss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float> %a0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float>) nounwind readnone
+
+define <2 x i64> @stack_fold_vpcmov_rm(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) {
+ ;CHECK-LABEL: stack_fold_vpcmov_rm
+ ;CHECK: vpcmov {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2)
+ ret <2 x i64> %2
+}
+define <2 x i64> @stack_fold_vpcmov_mr(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) {
+ ;CHECK-LABEL: stack_fold_vpcmov_mr
+ ;CHECK: vpcmov {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64> %a0, <2 x i64> %a2, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpcmov(<2 x i64>, <2 x i64>, <2 x i64>) nounwind readnone
+
+define <4 x i64> @stack_fold_vpcmov_rm_ymm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) {
+ ;CHECK-LABEL: stack_fold_vpcmov_rm_ymm
+ ;CHECK: vpcmov {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2)
+ ret <4 x i64> %2
+}
+define <4 x i64> @stack_fold_vpcmov_mr_ymm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) {
+ ;CHECK-LABEL: stack_fold_vpcmov_mr_ymm
+ ;CHECK: vpcmov {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %a0, <4 x i64> %a2, <4 x i64> %a1)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64>, <4 x i64>, <4 x i64>) nounwind readnone
+
+define <16 x i8> @stack_fold_vpcomb(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_vpcomb
+ ;CHECK: vpcomltb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %a0, <16 x i8> %a1, i8 0)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8>, <16 x i8>, i8) nounwind readnone
+
+define <4 x i32> @stack_fold_vpcomd(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_vpcomd
+ ;CHECK: vpcomltd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %a0, <4 x i32> %a1, i8 0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32>, <4 x i32>, i8) nounwind readnone
+
+define <2 x i64> @stack_fold_vpcomq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_vpcomq
+ ;CHECK: vpcomltq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %a0, <2 x i64> %a1, i8 0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64>, <2 x i64>, i8) nounwind readnone
+
+define <16 x i8> @stack_fold_vpcomub(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_vpcomub
+ ;CHECK: vpcomltub {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %a0, <16 x i8> %a1, i8 0)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8>, <16 x i8>, i8) nounwind readnone
+
+define <4 x i32> @stack_fold_vpcomud(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_vpcomud
+ ;CHECK: vpcomltud {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %a0, <4 x i32> %a1, i8 0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32>, <4 x i32>, i8) nounwind readnone
+
+define <2 x i64> @stack_fold_vpcomuq(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_vpcomuq
+ ;CHECK: vpcomltuq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %a0, <2 x i64> %a1, i8 0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64>, <2 x i64>, i8) nounwind readnone
+
+define <8 x i16> @stack_fold_vpcomuw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_vpcomuw
+ ;CHECK: vpcomltuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %a0, <8 x i16> %a1, i8 0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16>, <8 x i16>, i8) nounwind readnone
+
+define <8 x i16> @stack_fold_vpcomw(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_vpcomw
+ ;CHECK: vpcomltw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %a0, <8 x i16> %a1, i8 0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16>, <8 x i16>, i8) nounwind readnone
+
+define <2 x double> @stack_fold_vpermil2pd_rm(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
+ ;CHECK-LABEL: stack_fold_vpermil2pd_rm
+ ;CHECK: vpermil2pd $0, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 0)
+ ret <2 x double> %2
+}
+define <2 x double> @stack_fold_vpermil2pd_mr(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
+ ;CHECK-LABEL: stack_fold_vpermil2pd_mr
+ ;CHECK: vpermil2pd $0, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a0, <2 x double> %a2, <2 x double> %a1, i8 0)
+ ret <2 x double> %2
+}
+declare <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone
+
+define <4 x double> @stack_fold_vpermil2pd_rm_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
+ ;CHECK-LABEL: stack_fold_vpermil2pd_rm
+ ;CHECK: vpermil2pd $0, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 0)
+ ret <4 x double> %2
+}
+define <4 x double> @stack_fold_vpermil2pd_mr_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
+ ;CHECK-LABEL: stack_fold_vpermil2pd_mr
+ ;CHECK: vpermil2pd $0, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a0, <4 x double> %a2, <4 x double> %a1, i8 0)
+ ret <4 x double> %2
+}
+declare <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone
+
+define <4 x float> @stack_fold_vpermil2ps_rm(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
+ ;CHECK-LABEL: stack_fold_vpermil2ps_rm
+ ;CHECK: vpermil2ps $0, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 0)
+ ret <4 x float> %2
+}
+define <4 x float> @stack_fold_vpermil2ps_mr(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
+ ;CHECK-LABEL: stack_fold_vpermil2ps_mr
+ ;CHECK: vpermil2ps $0, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a0, <4 x float> %a2, <4 x float> %a1, i8 0)
+ ret <4 x float> %2
+}
+declare <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone
+
+define <8 x float> @stack_fold_vpermil2ps_rm_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
+ ;CHECK-LABEL: stack_fold_vpermil2ps_rm
+ ;CHECK: vpermil2ps $0, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 0)
+ ret <8 x float> %2
+}
+define <8 x float> @stack_fold_vpermil2ps_mr_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
+ ;CHECK-LABEL: stack_fold_vpermil2ps_mr
+ ;CHECK: vpermil2ps $0, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a0, <8 x float> %a2, <8 x float> %a1, i8 0)
+ ret <8 x float> %2
+}
+declare <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone
+
+define <4 x i32> @stack_fold_vphaddbd(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_vphaddbd
+ ;CHECK: vphaddbd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vphaddbd(<16 x i8> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vphaddbd(<16 x i8>) nounwind readnone
+
+define <2 x i64> @stack_fold_vphaddbq(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_vphaddbq
+ ;CHECK: vphaddbq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vphaddbq(<16 x i8> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vphaddbq(<16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_vphaddbw(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_vphaddbw
+ ;CHECK: vphaddbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vphaddbw(<16 x i8> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vphaddbw(<16 x i8>) nounwind readnone
+
+define <2 x i64> @stack_fold_vphadddq(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_vphadddq
+ ;CHECK: vphadddq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vphadddq(<4 x i32> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vphadddq(<4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_vphaddubd(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_vphaddubd
+ ;CHECK: vphaddubd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vphaddubd(<16 x i8> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vphaddubd(<16 x i8>) nounwind readnone
+
+define <2 x i64> @stack_fold_vphaddubq(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_vphaddubq
+ ;CHECK: vphaddubq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vphaddubq(<16 x i8> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vphaddubq(<16 x i8>) nounwind readnone
+
+define <8 x i16> @stack_fold_vphaddubw(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_vphaddubw
+ ;CHECK: vphaddubw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vphaddubw(<16 x i8> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vphaddubw(<16 x i8>) nounwind readnone
+
+define <2 x i64> @stack_fold_vphaddudq(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_vphaddudq
+ ;CHECK: vphaddudq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vphaddudq(<4 x i32> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vphaddudq(<4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_vphadduwd(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_vphadduwd
+ ;CHECK: vphadduwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vphadduwd(<8 x i16> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vphadduwd(<8 x i16>) nounwind readnone
+
+define <2 x i64> @stack_fold_vphadduwq(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_vphadduwq
+ ;CHECK: vphadduwq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vphadduwq(<8 x i16> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vphadduwq(<8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_vphaddwd(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_vphaddwd
+ ;CHECK: vphaddwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vphaddwd(<8 x i16> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vphaddwd(<8 x i16>) nounwind readnone
+
+define <2 x i64> @stack_fold_vphaddwq(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_vphaddwq
+ ;CHECK: vphaddwq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vphaddwq(<8 x i16> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vphaddwq(<8 x i16>) nounwind readnone
+
+define <8 x i16> @stack_fold_vphsubbw(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_vphsubbw
+ ;CHECK: vphsubbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vphsubbw(<16 x i8> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vphsubbw(<16 x i8>) nounwind readnone
+
+define <2 x i64> @stack_fold_vphsubdq(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_vphsubdq
+ ;CHECK: vphsubdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vphsubdq(<4 x i32> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vphsubdq(<4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_vphsubwd(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_vphsubwd
+ ;CHECK: vphsubwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vphsubwd(<8 x i16> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vphsubwd(<8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_vpmacsdd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+ ;CHECK-LABEL: stack_fold_vpmacsdd
+ ;CHECK: vpmacsdd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_vpmacsdqh(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) {
+ ;CHECK-LABEL: stack_fold_vpmacsdqh
+ ;CHECK: vpmacsdqh {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @stack_fold_vpmacsdql(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) {
+ ;CHECK-LABEL: stack_fold_vpmacsdql
+ ;CHECK: vpmacsdql {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
+
+define <4 x i32> @stack_fold_vpmacssdd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+ ;CHECK-LABEL: stack_fold_vpmacssdd
+ ;CHECK: vpmacssdd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_vpmacssdqh(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) {
+ ;CHECK-LABEL: stack_fold_vpmacssdqh
+ ;CHECK: vpmacssdqh {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
+
+define <2 x i64> @stack_fold_vpmacssdql(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2) {
+ ;CHECK-LABEL: stack_fold_vpmacssdql
+ ;CHECK: vpmacssdql {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32> %a0, <4 x i32> %a1, <2 x i64> %a2)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32>, <4 x i32>, <2 x i64>) nounwind readnone
+
+define <4 x i32> @stack_fold_vpmacsswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) {
+ ;CHECK-LABEL: stack_fold_vpmacsswd
+ ;CHECK: vpmacsswd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_vpmacssww(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) {
+ ;CHECK-LABEL: stack_fold_vpmacssww
+ ;CHECK: vpmacssww {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_vpmacswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) {
+ ;CHECK-LABEL: stack_fold_vpmacswd
+ ;CHECK: vpmacswd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
+
+define <8 x i16> @stack_fold_vpmacsww(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) {
+ ;CHECK-LABEL: stack_fold_vpmacsww
+ ;CHECK: vpmacsww {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
+
+define <4 x i32> @stack_fold_vpmadcsswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) {
+ ;CHECK-LABEL: stack_fold_vpmadcsswd
+ ;CHECK: vpmadcsswd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
+
+define <4 x i32> @stack_fold_vpmadcswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2) {
+ ;CHECK-LABEL: stack_fold_vpmadcswd
+ ;CHECK: vpmadcswd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vpmadcswd(<8 x i16> %a0, <8 x i16> %a1, <4 x i32> %a2)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpmadcswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwind readnone
+
+define <16 x i8> @stack_fold_vpperm_rm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) {
+ ;CHECK-LABEL: stack_fold_vpperm_rm
+ ;CHECK: vpperm {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2)
+ ret <16 x i8> %2
+}
+define <16 x i8> @stack_fold_vpperm_mr(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) {
+ ;CHECK-LABEL: stack_fold_vpperm_mr
+ ;CHECK: vpperm {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a2, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.xop.vpperm(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
+
+define <16 x i8> @stack_fold_vprotb(<16 x i8> %a0) {
+ ;CHECK-LABEL: stack_fold_vprotb
+ ;CHECK: vprotb $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.xop.vprotbi(<16 x i8> %a0, i8 7)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.xop.vprotbi(<16 x i8>, i8) nounwind readnone
+
+define <16 x i8> @stack_fold_vprotb_rm(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_vprotb_rm
+ ;CHECK: vprotb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.xop.vprotb(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+define <16 x i8> @stack_fold_vprotb_mr(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_vprotb_mr
+ ;CHECK: vprotb {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.xop.vprotb(<16 x i8> %a1, <16 x i8> %a0)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.xop.vprotb(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_vprotd(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_vprotd
+ ;CHECK: vprotd $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vprotdi(<4 x i32> %a0, i8 7)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vprotdi(<4 x i32>, i8) nounwind readnone
+
+define <4 x i32> @stack_fold_vprotd_rm(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_vprotd_rm
+ ;CHECK: vprotd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vprotd(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+define <4 x i32> @stack_fold_vprotd_mr(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_vprotd_mr
+ ;CHECK: vprotd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vprotd(<4 x i32> %a1, <4 x i32> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vprotd(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_vprotq(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_vprotq
+ ;CHECK: vprotq $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vprotqi(<2 x i64> %a0, i8 7)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vprotqi(<2 x i64>, i8) nounwind readnone
+
+define <2 x i64> @stack_fold_vprotq_rm(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_vprotq_rm
+ ;CHECK: vprotq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vprotq(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+define <2 x i64> @stack_fold_vprotq_mr(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_vprotq_mr
+ ;CHECK: vprotq {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vprotq(<2 x i64> %a1, <2 x i64> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vprotq(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i16> @stack_fold_vprotw(<8 x i16> %a0) {
+ ;CHECK-LABEL: stack_fold_vprotw
+ ;CHECK: vprotw $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vprotwi(<8 x i16> %a0, i8 7)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vprotwi(<8 x i16>, i8) nounwind readnone
+
+define <8 x i16> @stack_fold_vprotw_rm(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_vprotw_rm
+ ;CHECK: vprotw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vprotw(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+define <8 x i16> @stack_fold_vprotw_mr(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_vprotw_mr
+ ;CHECK: vprotw {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vprotw(<8 x i16> %a1, <8 x i16> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vprotw(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_vpshab_rm(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshab_rm
+ ;CHECK: vpshab {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.xop.vpshab(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+define <16 x i8> @stack_fold_vpshab_mr(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshab_mr
+ ;CHECK: vpshab {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.xop.vpshab(<16 x i8> %a1, <16 x i8> %a0)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.xop.vpshab(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_vpshad_rm(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshad_rm
+ ;CHECK: vpshad {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vpshad(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+define <4 x i32> @stack_fold_vpshad_mr(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshad_mr
+ ;CHECK: vpshad {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vpshad(<4 x i32> %a1, <4 x i32> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpshad(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_vpshaq_rm(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshaq_rm
+ ;CHECK: vpshaq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vpshaq(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+define <2 x i64> @stack_fold_vpshaq_mr(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshaq_mr
+ ;CHECK: vpshaq {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vpshaq(<2 x i64> %a1, <2 x i64> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpshaq(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i16> @stack_fold_vpshaw_rm(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshaw_rm
+ ;CHECK: vpshaw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vpshaw(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+define <8 x i16> @stack_fold_vpshaw_mr(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshaw_mr
+ ;CHECK: vpshaw {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vpshaw(<8 x i16> %a1, <8 x i16> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vpshaw(<8 x i16>, <8 x i16>) nounwind readnone
+
+define <16 x i8> @stack_fold_vpshlb_rm(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshlb_rm
+ ;CHECK: vpshlb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.xop.vpshlb(<16 x i8> %a0, <16 x i8> %a1)
+ ret <16 x i8> %2
+}
+define <16 x i8> @stack_fold_vpshlb_mr(<16 x i8> %a0, <16 x i8> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshlb_mr
+ ;CHECK: vpshlb {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <16 x i8> @llvm.x86.xop.vpshlb(<16 x i8> %a1, <16 x i8> %a0)
+ ret <16 x i8> %2
+}
+declare <16 x i8> @llvm.x86.xop.vpshlb(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @stack_fold_vpshld_rm(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshld_rm
+ ;CHECK: vpshld {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vpshld(<4 x i32> %a0, <4 x i32> %a1)
+ ret <4 x i32> %2
+}
+define <4 x i32> @stack_fold_vpshld_mr(<4 x i32> %a0, <4 x i32> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshld_mr
+ ;CHECK: vpshld {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <4 x i32> @llvm.x86.xop.vpshld(<4 x i32> %a1, <4 x i32> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.x86.xop.vpshld(<4 x i32>, <4 x i32>) nounwind readnone
+
+define <2 x i64> @stack_fold_vpshlq_rm(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshlq_rm
+ ;CHECK: vpshlq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vpshlq(<2 x i64> %a0, <2 x i64> %a1)
+ ret <2 x i64> %2
+}
+define <2 x i64> @stack_fold_vpshlq_mr(<2 x i64> %a0, <2 x i64> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshlq_mr
+ ;CHECK: vpshlq {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <2 x i64> @llvm.x86.xop.vpshlq(<2 x i64> %a1, <2 x i64> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.x86.xop.vpshlq(<2 x i64>, <2 x i64>) nounwind readnone
+
+define <8 x i16> @stack_fold_vpshlw_rm(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshlw_rm
+ ;CHECK: vpshlw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vpshlw(<8 x i16> %a0, <8 x i16> %a1)
+ ret <8 x i16> %2
+}
+define <8 x i16> @stack_fold_vpshlw_mr(<8 x i16> %a0, <8 x i16> %a1) {
+ ;CHECK-LABEL: stack_fold_vpshlw_mr
+ ;CHECK: vpshlw {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = call <8 x i16> @llvm.x86.xop.vpshlw(<8 x i16> %a1, <8 x i16> %a0)
+ ret <8 x i16> %2
+}
+declare <8 x i16> @llvm.x86.xop.vpshlw(<8 x i16>, <8 x i16>) nounwind readnone
diff --git a/test/CodeGen/X86/stack-probe-size.ll b/test/CodeGen/X86/stack-probe-size.ll
new file mode 100644
index 0000000..21482c3
--- /dev/null
+++ b/test/CodeGen/X86/stack-probe-size.ll
@@ -0,0 +1,78 @@
+; This test is attempting to detect that the compiler correctly generates stack
+; probe calls when the size of the local variables exceeds the specified stack
+; probe size.
+;
+; Testing the default value of 4096 bytes makes sense, because the default
+; stack probe size equals the page size (4096 bytes for all x86 targets), and
+; this is unlikely to change in the future.
+;
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"
+target triple = "i686-pc-windows-msvc"
+
+define i32 @test1() "stack-probe-size"="0" {
+ %buffer = alloca [4095 x i8]
+
+ ret i32 0
+
+; CHECK-LABEL: _test1:
+; CHECK-NOT: subl $4095, %esp
+; CHECK: movl $4095, %eax
+; CHECK: calll __chkstk
+}
+
+define i32 @test2() {
+ %buffer = alloca [4095 x i8]
+
+ ret i32 0
+
+; CHECK-LABEL: _test2:
+; CHECK-NOT: movl $4095, %eax
+; CHECK: subl $4095, %esp
+; CHECK-NOT: calll __chkstk
+}
+
+define i32 @test3() "stack-probe-size"="8192" {
+ %buffer = alloca [4095 x i8]
+
+ ret i32 0
+
+; CHECK-LABEL: _test3:
+; CHECK-NOT: movl $4095, %eax
+; CHECK: subl $4095, %esp
+; CHECK-NOT: calll __chkstk
+}
+
+define i32 @test4() "stack-probe-size"="0" {
+ %buffer = alloca [4096 x i8]
+
+ ret i32 0
+
+; CHECK-LABEL: _test4:
+; CHECK-NOT: subl $4096, %esp
+; CHECK: movl $4096, %eax
+; CHECK: calll __chkstk
+}
+
+define i32 @test5() {
+ %buffer = alloca [4096 x i8]
+
+ ret i32 0
+
+; CHECK-LABEL: _test5:
+; CHECK-NOT: subl $4096, %esp
+; CHECK: movl $4096, %eax
+; CHECK: calll __chkstk
+}
+
+define i32 @test6() "stack-probe-size"="8192" {
+ %buffer = alloca [4096 x i8]
+
+ ret i32 0
+
+; CGECK-LABEL: _test6:
+; CGECK-NOT: movl $4096, %eax
+; CGECK: subl $4096, %esp
+; CGECK-NOT: calll __chkstk
+}
diff --git a/test/CodeGen/X86/stack-protector-dbginfo.ll b/test/CodeGen/X86/stack-protector-dbginfo.ll
index cf0f999..a84b77e 100644
--- a/test/CodeGen/X86/stack-protector-dbginfo.ll
+++ b/test/CodeGen/X86/stack-protector-dbginfo.ll
@@ -10,9 +10,9 @@
; Function Attrs: nounwind sspreq
define i32 @_Z18read_response_sizev() #0 {
entry:
- tail call void @llvm.dbg.value(metadata !22, i64 0, metadata !23, metadata !{metadata !"0x102"}), !dbg !39
+ tail call void @llvm.dbg.value(metadata !22, i64 0, metadata !23, metadata !{!"0x102"}), !dbg !39
%0 = load i64* getelementptr inbounds ({ i64, [56 x i8] }* @a, i32 0, i32 0), align 8, !dbg !40
- tail call void @llvm.dbg.value(metadata !63, i64 0, metadata !64, metadata !{metadata !"0x102"}), !dbg !71
+ tail call void @llvm.dbg.value(metadata i32 undef, i64 0, metadata !64, metadata !{!"0x102"}), !dbg !71
%1 = trunc i64 %0 to i32
ret i32 %1
}
@@ -25,73 +25,73 @@ attributes #0 = { sspreq }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!21, !72}
-!0 = metadata !{metadata !"0x11\004\00clang version 3.4 \001\00\000\00\001", metadata !1, metadata !2, metadata !5, metadata !8, metadata !20, metadata !5} ; [ DW_TAG_compile_unit ] [/Users/matt/ryan_bug/<unknown>] [DW_LANG_C_plus_plus]
-!1 = metadata !{metadata !"<unknown>", metadata !"/Users/matt/ryan_bug"}
-!2 = metadata !{metadata !3}
-!3 = metadata !{metadata !"0x4\00\0020\0032\0032\000\000\000", metadata !1, metadata !4, null, metadata !6, null, null, null} ; [ DW_TAG_enumeration_type ] [line 20, size 32, align 32, offset 0] [def] [from ]
-!4 = metadata !{metadata !"0x13\00C\0019\008\008\000\000\000", metadata !1, null, null, metadata !5, null, null, null} ; [ DW_TAG_structure_type ] [C] [line 19, size 8, align 8, offset 0] [def] [from ]
-!5 = metadata !{}
-!6 = metadata !{metadata !7}
-!7 = metadata !{metadata !"0x28\00max_frame_size\000"} ; [ DW_TAG_enumerator ] [max_frame_size :: 0]
-!8 = metadata !{metadata !9, metadata !24, metadata !41, metadata !65}
-!9 = metadata !{metadata !"0x2e\00read_response_size\00read_response_size\00_Z18read_response_sizev\0027\000\001\000\006\00256\001\0027", metadata !1, metadata !10, metadata !11, null, i32 ()* @_Z18read_response_sizev, null, null, metadata !14} ; [ DW_TAG_subprogram ] [line 27] [def] [read_response_size]
-!10 = metadata !{metadata !"0x29", metadata !1} ; [ DW_TAG_file_type ] [/Users/matt/ryan_bug/<unknown>]
-!11 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !12, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!12 = metadata !{metadata !13}
-!13 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
-!14 = metadata !{metadata !15, metadata !19}
-!15 = metadata !{metadata !"0x100\00b\0028\000", metadata !9, metadata !10, metadata !16} ; [ DW_TAG_auto_variable ] [b] [line 28]
-!16 = metadata !{metadata !"0x13\00B\0016\0032\0032\000\000\000", metadata !1, null, null, metadata !17, null, null} ; [ DW_TAG_structure_type ] [B] [line 16, size 32, align 32, offset 0] [def] [from ]
-!17 = metadata !{metadata !18}
-!18 = metadata !{metadata !"0xd\00end_of_file\0017\0032\0032\000\000", metadata !1, metadata !16, metadata !13} ; [ DW_TAG_member ] [end_of_file] [line 17, size 32, align 32, offset 0] [from int]
-!19 = metadata !{metadata !"0x100\00c\0029\000", metadata !9, metadata !10, metadata !13} ; [ DW_TAG_auto_variable ] [c] [line 29]
-!20 = metadata !{}
-!21 = metadata !{i32 2, metadata !"Dwarf Version", i32 2}
-!22 = metadata !{i64* getelementptr inbounds ({ i64, [56 x i8] }* @a, i32 0, i32 0)}
-!23 = metadata !{metadata !"0x101\00p2\0033554444\000", metadata !24, metadata !10, metadata !32, metadata !38} ; [ DW_TAG_arg_variable ] [p2] [line 12]
-!24 = metadata !{metadata !"0x2e\00min<unsigned long long>\00min<unsigned long long>\00_ZN3__13minIyEERKT_S3_RS1_\0012\000\001\000\006\00256\001\0012", metadata !1, metadata !25, metadata !27, null, null, metadata !33, null, metadata !35} ; [ DW_TAG_subprogram ] [line 12] [def] [min<unsigned long long>]
-!25 = metadata !{metadata !"0x39\00__1\001", metadata !26, null} ; [ DW_TAG_namespace ] [__1] [line 1]
-!26 = metadata !{metadata !"main.cpp", metadata !"/Users/matt/ryan_bug"}
-!27 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !28, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!28 = metadata !{metadata !29, metadata !29, metadata !32}
-!29 = metadata !{metadata !"0x10\00\000\000\000\000\000", null, null, metadata !30} ; [ DW_TAG_reference_type ] [line 0, size 0, align 0, offset 0] [from ]
-!30 = metadata !{metadata !"0x26\00\000\000\000\000\000", null, null, metadata !31} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from long long unsigned int]
-!31 = metadata !{metadata !"0x24\00long long unsigned int\000\0064\0064\000\000\007", null, null} ; [ DW_TAG_base_type ] [long long unsigned int] [line 0, size 64, align 64, offset 0, enc DW_ATE_unsigned]
-!32 = metadata !{metadata !"0x10\00\000\000\000\000\000", null, null, metadata !31} ; [ DW_TAG_reference_type ] [line 0, size 0, align 0, offset 0] [from long long unsigned int]
-!33 = metadata !{metadata !34}
-!34 = metadata !{metadata !"0x2f\00_Tp\000\000", null, metadata !31, null} ; [ DW_TAG_template_type_parameter ]
-!35 = metadata !{metadata !36, metadata !37}
-!36 = metadata !{metadata !"0x101\00p1\0016777228\000", metadata !24, metadata !10, metadata !29} ; [ DW_TAG_arg_variable ] [p1] [line 12]
-!37 = metadata !{metadata !"0x101\00p2\0033554444\000", metadata !24, metadata !10, metadata !32} ; [ DW_TAG_arg_variable ] [p2] [line 12]
-!38 = metadata !{i32 33, i32 0, metadata !9, null}
-!39 = metadata !{i32 12, i32 0, metadata !24, metadata !38}
-!40 = metadata !{i32 9, i32 0, metadata !41, metadata !59}
-!41 = metadata !{metadata !"0x2e\00min<unsigned long long, __1::A>\00min<unsigned long long, __1::A>\00_ZN3__13minIyNS_1AEEERKT_S4_RS2_T0_\007\000\001\000\006\00256\001\008", metadata !1, metadata !25, metadata !42, null, null, metadata !53, null, metadata !55} ; [ DW_TAG_subprogram ] [line 7] [def] [scope 8] [min<unsigned long long, __1::A>]
-!42 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !43, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!43 = metadata !{metadata !29, metadata !29, metadata !32, metadata !44}
-!44 = metadata !{metadata !"0x13\00A\000\008\008\000\000\000", metadata !1, metadata !25, null, metadata !45, null, null, null} ; [ DW_TAG_structure_type ] [A] [line 0, size 8, align 8, offset 0] [def] [from ]
-!45 = metadata !{metadata !46}
-!46 = metadata !{metadata !"0x2e\00operator()\00operator()\00_ZN3__11AclERKiS2_\001\000\000\000\006\00256\001\001", metadata !1, metadata !44, metadata !47, null, null, null, i32 0, metadata !52} ; [ DW_TAG_subprogram ] [line 1] [operator()]
-!47 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !48, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!48 = metadata !{metadata !13, metadata !49, metadata !50, metadata !50}
-!49 = metadata !{metadata !"0xf\00\000\0064\0064\000\001088", i32 0, null, metadata !44} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from A]
-!50 = metadata !{metadata !"0x10\00\000\000\000\000\000", null, null, metadata !51} ; [ DW_TAG_reference_type ] [line 0, size 0, align 0, offset 0] [from ]
-!51 = metadata !{metadata !"0x26\00\000\000\000\000\000", null, null, metadata !13} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from int]
-!52 = metadata !{i32 786468}
-!53 = metadata !{metadata !34, metadata !54}
-!54 = metadata !{metadata !"0x2f\00_Compare\000\000", null, metadata !44, null} ; [ DW_TAG_template_type_parameter ]
-!55 = metadata !{metadata !56, metadata !57, metadata !58}
-!56 = metadata !{metadata !"0x101\00p1\0016777223\000", metadata !41, metadata !10, metadata !29} ; [ DW_TAG_arg_variable ] [p1] [line 7]
-!57 = metadata !{metadata !"0x101\00p2\0033554439\000", metadata !41, metadata !10, metadata !32} ; [ DW_TAG_arg_variable ] [p2] [line 7]
-!58 = metadata !{metadata !"0x101\00p3\0050331656\000", metadata !41, metadata !10, metadata !44} ; [ DW_TAG_arg_variable ] [p3] [line 8]
-!59 = metadata !{i32 13, i32 0, metadata !24, metadata !38}
-!63 = metadata !{i32 undef}
-!64 = metadata !{metadata !"0x101\00p1\0033554433\000", metadata !65, metadata !10, metadata !50, metadata !40} ; [ DW_TAG_arg_variable ] [p1] [line 1]
-!65 = metadata !{metadata !"0x2e\00operator()\00operator()\00_ZN3__11AclERKiS2_\001\000\001\000\006\00256\001\002", metadata !1, metadata !25, metadata !47, null, null, null, metadata !46, metadata !66} ; [ DW_TAG_subprogram ] [line 1] [def] [scope 2] [operator()]
-!66 = metadata !{metadata !67, metadata !69, metadata !70}
-!67 = metadata !{metadata !"0x101\00this\0016777216\001088", metadata !65, null, metadata !68} ; [ DW_TAG_arg_variable ] [this] [line 0]
-!68 = metadata !{metadata !"0xf\00\000\0064\0064\000\000", null, null, metadata !44} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from A]
-!69 = metadata !{metadata !"0x101\00p1\0033554433\000", metadata !65, metadata !10, metadata !50} ; [ DW_TAG_arg_variable ] [p1] [line 1]
-!70 = metadata !{metadata !"0x101\00\0050331650\000", metadata !65, metadata !10, metadata !50} ; [ DW_TAG_arg_variable ] [line 2]
-!71 = metadata !{i32 1, i32 0, metadata !65, metadata !40}
-!72 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x11\004\00clang version 3.4 \001\00\000\00\001", !1, !2, !5, !8, !20, !5} ; [ DW_TAG_compile_unit ] [/Users/matt/ryan_bug/<unknown>] [DW_LANG_C_plus_plus]
+!1 = !{!"<unknown>", !"/Users/matt/ryan_bug"}
+!2 = !{!3}
+!3 = !{!"0x4\00\0020\0032\0032\000\000\000", !1, !4, null, !6, null, null, null} ; [ DW_TAG_enumeration_type ] [line 20, size 32, align 32, offset 0] [def] [from ]
+!4 = !{!"0x13\00C\0019\008\008\000\000\000", !1, null, null, !5, null, null, null} ; [ DW_TAG_structure_type ] [C] [line 19, size 8, align 8, offset 0] [def] [from ]
+!5 = !{}
+!6 = !{!7}
+!7 = !{!"0x28\00max_frame_size\000"} ; [ DW_TAG_enumerator ] [max_frame_size :: 0]
+!8 = !{!9, !24, !41, !65}
+!9 = !{!"0x2e\00read_response_size\00read_response_size\00_Z18read_response_sizev\0027\000\001\000\006\00256\001\0027", !1, !10, !11, null, i32 ()* @_Z18read_response_sizev, null, null, !14} ; [ DW_TAG_subprogram ] [line 27] [def] [read_response_size]
+!10 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/Users/matt/ryan_bug/<unknown>]
+!11 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !12, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!12 = !{!13}
+!13 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!14 = !{!15, !19}
+!15 = !{!"0x100\00b\0028\000", !9, !10, !16} ; [ DW_TAG_auto_variable ] [b] [line 28]
+!16 = !{!"0x13\00B\0016\0032\0032\000\000\000", !1, null, null, !17, null, null} ; [ DW_TAG_structure_type ] [B] [line 16, size 32, align 32, offset 0] [def] [from ]
+!17 = !{!18}
+!18 = !{!"0xd\00end_of_file\0017\0032\0032\000\000", !1, !16, !13} ; [ DW_TAG_member ] [end_of_file] [line 17, size 32, align 32, offset 0] [from int]
+!19 = !{!"0x100\00c\0029\000", !9, !10, !13} ; [ DW_TAG_auto_variable ] [c] [line 29]
+!20 = !{}
+!21 = !{i32 2, !"Dwarf Version", i32 2}
+!22 = !{i64* getelementptr inbounds ({ i64, [56 x i8] }* @a, i32 0, i32 0)}
+!23 = !{!"0x101\00p2\0033554444\000", !24, !10, !32, !38} ; [ DW_TAG_arg_variable ] [p2] [line 12]
+!24 = !{!"0x2e\00min<unsigned long long>\00min<unsigned long long>\00_ZN3__13minIyEERKT_S3_RS1_\0012\000\001\000\006\00256\001\0012", !1, !25, !27, null, null, !33, null, !35} ; [ DW_TAG_subprogram ] [line 12] [def] [min<unsigned long long>]
+!25 = !{!"0x39\00__1\001", !26, null} ; [ DW_TAG_namespace ] [__1] [line 1]
+!26 = !{!"main.cpp", !"/Users/matt/ryan_bug"}
+!27 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !28, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!28 = !{!29, !29, !32}
+!29 = !{!"0x10\00\000\000\000\000\000", null, null, !30} ; [ DW_TAG_reference_type ] [line 0, size 0, align 0, offset 0] [from ]
+!30 = !{!"0x26\00\000\000\000\000\000", null, null, !31} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from long long unsigned int]
+!31 = !{!"0x24\00long long unsigned int\000\0064\0064\000\000\007", null, null} ; [ DW_TAG_base_type ] [long long unsigned int] [line 0, size 64, align 64, offset 0, enc DW_ATE_unsigned]
+!32 = !{!"0x10\00\000\000\000\000\000", null, null, !31} ; [ DW_TAG_reference_type ] [line 0, size 0, align 0, offset 0] [from long long unsigned int]
+!33 = !{!34}
+!34 = !{!"0x2f\00_Tp\000\000", null, !31, null} ; [ DW_TAG_template_type_parameter ]
+!35 = !{!36, !37}
+!36 = !{!"0x101\00p1\0016777228\000", !24, !10, !29} ; [ DW_TAG_arg_variable ] [p1] [line 12]
+!37 = !{!"0x101\00p2\0033554444\000", !24, !10, !32} ; [ DW_TAG_arg_variable ] [p2] [line 12]
+!38 = !MDLocation(line: 33, scope: !9)
+!39 = !MDLocation(line: 12, scope: !24, inlinedAt: !38)
+!40 = !MDLocation(line: 9, scope: !41, inlinedAt: !59)
+!41 = !{!"0x2e\00min<unsigned long long, __1::A>\00min<unsigned long long, __1::A>\00_ZN3__13minIyNS_1AEEERKT_S4_RS2_T0_\007\000\001\000\006\00256\001\008", !1, !25, !42, null, null, !53, null, !55} ; [ DW_TAG_subprogram ] [line 7] [def] [scope 8] [min<unsigned long long, __1::A>]
+!42 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !43, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!43 = !{!29, !29, !32, !44}
+!44 = !{!"0x13\00A\000\008\008\000\000\000", !1, !25, null, !45, null, null, null} ; [ DW_TAG_structure_type ] [A] [line 0, size 8, align 8, offset 0] [def] [from ]
+!45 = !{!46}
+!46 = !{!"0x2e\00operator()\00operator()\00_ZN3__11AclERKiS2_\001\000\000\000\006\00256\001\001", !1, !44, !47, null, null, null, i32 0, !52} ; [ DW_TAG_subprogram ] [line 1] [operator()]
+!47 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !48, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!48 = !{!13, !49, !50, !50}
+!49 = !{!"0xf\00\000\0064\0064\000\001088", i32 0, null, !44} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [artificial] [from A]
+!50 = !{!"0x10\00\000\000\000\000\000", null, null, !51} ; [ DW_TAG_reference_type ] [line 0, size 0, align 0, offset 0] [from ]
+!51 = !{!"0x26\00\000\000\000\000\000", null, null, !13} ; [ DW_TAG_const_type ] [line 0, size 0, align 0, offset 0] [from int]
+!52 = !{i32 786468}
+!53 = !{!34, !54}
+!54 = !{!"0x2f\00_Compare\000\000", null, !44, null} ; [ DW_TAG_template_type_parameter ]
+!55 = !{!56, !57, !58}
+!56 = !{!"0x101\00p1\0016777223\000", !41, !10, !29} ; [ DW_TAG_arg_variable ] [p1] [line 7]
+!57 = !{!"0x101\00p2\0033554439\000", !41, !10, !32} ; [ DW_TAG_arg_variable ] [p2] [line 7]
+!58 = !{!"0x101\00p3\0050331656\000", !41, !10, !44} ; [ DW_TAG_arg_variable ] [p3] [line 8]
+!59 = !MDLocation(line: 13, scope: !24, inlinedAt: !38)
+!63 = !{i32 undef}
+!64 = !{!"0x101\00p1\0033554433\000", !65, !10, !50, !40} ; [ DW_TAG_arg_variable ] [p1] [line 1]
+!65 = !{!"0x2e\00operator()\00operator()\00_ZN3__11AclERKiS2_\001\000\001\000\006\00256\001\002", !1, !25, !47, null, null, null, !46, !66} ; [ DW_TAG_subprogram ] [line 1] [def] [scope 2] [operator()]
+!66 = !{!67, !69, !70}
+!67 = !{!"0x101\00this\0016777216\001088", !65, null, !68} ; [ DW_TAG_arg_variable ] [this] [line 0]
+!68 = !{!"0xf\00\000\0064\0064\000\000", null, null, !44} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from A]
+!69 = !{!"0x101\00p1\0033554433\000", !65, !10, !50} ; [ DW_TAG_arg_variable ] [p1] [line 1]
+!70 = !{!"0x101\00\0050331650\000", !65, !10, !50} ; [ DW_TAG_arg_variable ] [line 2]
+!71 = !MDLocation(line: 1, scope: !65, inlinedAt: !40)
+!72 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/stack-protector-weight.ll b/test/CodeGen/X86/stack-protector-weight.ll
new file mode 100644
index 0000000..c5bf491
--- /dev/null
+++ b/test/CodeGen/X86/stack-protector-weight.ll
@@ -0,0 +1,36 @@
+; RUN: llc -mtriple=x86_64-apple-darwin -print-machineinstrs=expand-isel-pseudos -enable-selectiondag-sp=true %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=SELDAG
+; RUN: llc -mtriple=x86_64-apple-darwin -print-machineinstrs=expand-isel-pseudos -enable-selectiondag-sp=false %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=IR
+
+; SELDAG: # Machine code for function test_branch_weights:
+; SELDAG: Successors according to CFG: BB#[[SUCCESS:[0-9]+]](1048575) BB#[[FAILURE:[0-9]+]](1)
+; SELDAG: BB#[[FAILURE]]:
+; SELDAG: CALL64pcrel32 <es:__stack_chk_fail>
+; SELDAG: BB#[[SUCCESS]]:
+
+; IR: # Machine code for function test_branch_weights:
+; IR: Successors according to CFG: BB#[[SUCCESS:[0-9]+]](1048575) BB#[[FAILURE:[0-9]+]](1)
+; IR: BB#[[SUCCESS]]:
+; IR: BB#[[FAILURE]]:
+; IR: CALL64pcrel32 <ga:@__stack_chk_fail>
+
+define i32 @test_branch_weights(i32 %n) #0 {
+entry:
+ %a = alloca [128 x i32], align 16
+ %0 = bitcast [128 x i32]* %a to i8*
+ call void @llvm.lifetime.start(i64 512, i8* %0)
+ %arraydecay = getelementptr inbounds [128 x i32]* %a, i64 0, i64 0
+ call void @foo2(i32* %arraydecay)
+ %idxprom = sext i32 %n to i64
+ %arrayidx = getelementptr inbounds [128 x i32]* %a, i64 0, i64 %idxprom
+ %1 = load i32* %arrayidx, align 4
+ call void @llvm.lifetime.end(i64 512, i8* %0)
+ ret i32 %1
+}
+
+declare void @llvm.lifetime.start(i64, i8* nocapture)
+
+declare void @foo2(i32*)
+
+declare void @llvm.lifetime.end(i64, i8* nocapture)
+
+attributes #0 = { ssp "stack-protector-buffer-size"="8" }
diff --git a/test/CodeGen/X86/stackpointer.ll b/test/CodeGen/X86/stackpointer.ll
index 80bcfbf..094856b 100644
--- a/test/CodeGen/X86/stackpointer.ll
+++ b/test/CodeGen/X86/stackpointer.ll
@@ -25,4 +25,4 @@ declare void @llvm.write_register.i64(metadata, i64) nounwind
; register unsigned long current_stack_pointer asm("rsp");
; CHECK-NOT: .asciz "rsp"
-!0 = metadata !{metadata !"rsp\00"}
+!0 = !{!"rsp\00"}
diff --git a/test/CodeGen/X86/statepoint-call-lowering.ll b/test/CodeGen/X86/statepoint-call-lowering.ll
new file mode 100644
index 0000000..e1a1369
--- /dev/null
+++ b/test/CodeGen/X86/statepoint-call-lowering.ll
@@ -0,0 +1,104 @@
+; RUN: llc < %s | FileCheck %s
+; This file contains a collection of basic tests to ensure we didn't
+; screw up normal call lowering when there are no deopt or gc arguments.
+
+target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-linux-gnu"
+
+declare zeroext i1 @return_i1()
+declare zeroext i32 @return_i32()
+declare i32* @return_i32ptr()
+declare float @return_float()
+declare void @varargf(i32, ...)
+
+define i1 @test_i1_return() gc "statepoint-example" {
+; CHECK-LABEL: test_i1_return
+; This is just checking that a i1 gets lowered normally when there's no extra
+; state arguments to the statepoint
+; CHECK: pushq %rax
+; CHECK: callq return_i1
+; CHECK: popq %rdx
+; CHECK: retq
+entry:
+ %safepoint_token = tail call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0)
+ %call1 = call zeroext i1 @llvm.experimental.gc.result.i1(i32 %safepoint_token)
+ ret i1 %call1
+}
+
+define i32 @test_i32_return() gc "statepoint-example" {
+; CHECK-LABEL: test_i32_return
+; CHECK: pushq %rax
+; CHECK: callq return_i32
+; CHECK: popq %rdx
+; CHECK: retq
+entry:
+ %safepoint_token = tail call i32 (i32 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i32f(i32 ()* @return_i32, i32 0, i32 0, i32 0)
+ %call1 = call zeroext i32 @llvm.experimental.gc.result.i32(i32 %safepoint_token)
+ ret i32 %call1
+}
+
+define i32* @test_i32ptr_return() gc "statepoint-example" {
+; CHECK-LABEL: test_i32ptr_return
+; CHECK: pushq %rax
+; CHECK: callq return_i32ptr
+; CHECK: popq %rdx
+; CHECK: retq
+entry:
+ %safepoint_token = tail call i32 (i32* ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_p0i32f(i32* ()* @return_i32ptr, i32 0, i32 0, i32 0)
+ %call1 = call i32* @llvm.experimental.gc.result.p0i32(i32 %safepoint_token)
+ ret i32* %call1
+}
+
+define float @test_float_return() gc "statepoint-example" {
+; CHECK-LABEL: test_float_return
+; CHECK: pushq %rax
+; CHECK: callq return_float
+; CHECK: popq %rax
+; CHECK: retq
+entry:
+ %safepoint_token = tail call i32 (float ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_f32f(float ()* @return_float, i32 0, i32 0, i32 0)
+ %call1 = call float @llvm.experimental.gc.result.f32(i32 %safepoint_token)
+ ret float %call1
+}
+
+define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" {
+; CHECK-LABEL: test_relocate
+; Check that an ununsed relocate has no code-generation impact
+; CHECK: pushq %rax
+; CHECK: callq return_i1
+; CHECK-NEXT: .Ltmp13:
+; CHECK-NEXT: popq %rdx
+; CHECK-NEXT: retq
+entry:
+ %safepoint_token = tail call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 addrspace(1)* %a)
+ %call1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 4, i32 4)
+ %call2 = call zeroext i1 @llvm.experimental.gc.result.i1(i32 %safepoint_token)
+ ret i1 %call2
+}
+
+define void @test_void_vararg() gc "statepoint-example" {
+; CHECK-LABEL: test_void_vararg
+; Check a statepoint wrapping a *void* returning vararg function works
+; CHECK: callq varargf
+entry:
+ %safepoint_token = tail call i32 (void (i32, ...)*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidi32varargf(void (i32, ...)* @varargf, i32 2, i32 0, i32 42, i32 43, i32 0)
+ ;; if we try to use the result from a statepoint wrapping a
+ ;; non-void-returning varargf, we will experience a crash.
+ ret void
+}
+
+declare i32 @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()*, i32, i32, ...)
+declare i1 @llvm.experimental.gc.result.i1(i32)
+
+declare i32 @llvm.experimental.gc.statepoint.p0f_i32f(i32 ()*, i32, i32, ...)
+declare i32 @llvm.experimental.gc.result.i32(i32)
+
+declare i32 @llvm.experimental.gc.statepoint.p0f_p0i32f(i32* ()*, i32, i32, ...)
+declare i32* @llvm.experimental.gc.result.p0i32(i32)
+
+declare i32 @llvm.experimental.gc.statepoint.p0f_f32f(float ()*, i32, i32, ...)
+declare float @llvm.experimental.gc.result.f32(i32)
+
+declare i32 @llvm.experimental.gc.statepoint.p0f_isVoidi32varargf(void (i32, ...)*, i32, i32, ...)
+
+declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32, i32, i32)
diff --git a/test/CodeGen/X86/statepoint-forward.ll b/test/CodeGen/X86/statepoint-forward.ll
new file mode 100644
index 0000000..12a6ac2
--- /dev/null
+++ b/test/CodeGen/X86/statepoint-forward.ll
@@ -0,0 +1,107 @@
+; RUN: opt -O3 -S < %s | FileCheck --check-prefix=CHECK-OPT %s
+; RUN: llc < %s | FileCheck --check-prefix=CHECK-LLC %s
+; These tests are targetted at making sure we don't retain information
+; about memory which contains potential gc references across a statepoint.
+; They're carefully written to only outlaw forwarding of references.
+; Depending on the collector, forwarding non-reference fields or
+; constant null references may be perfectly legal. (If unimplemented.)
+; The general structure of these tests is:
+; - learn a fact about memory (via an assume)
+; - cross a statepoint
+; - check the same fact about memory (which we no longer know)
+
+target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-linux-gnu"
+
+; If not at a statepoint, we could forward known memory values
+; across this call.
+declare void @func() readonly
+
+;; Forwarding the value of a pointer load is invalid since it may have
+;; changed at the safepoint. Forwarding a non-gc pointer value would
+;; be valid, but is not currently implemented.
+define i1 @test_load_forward(i32 addrspace(1)* addrspace(1)* %p) gc "statepoint-example" {
+entry:
+ %before = load i32 addrspace(1)* addrspace(1)* %p
+ %cmp1 = call i1 @f(i32 addrspace(1)* %before)
+ call void @llvm.assume(i1 %cmp1)
+ %safepoint_token = tail call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @func, i32 0, i32 0, i32 0, i32 addrspace(1)* addrspace(1)* %p)
+ %pnew = call i32 addrspace(1)* addrspace(1)* @llvm.experimental.gc.relocate.p1p1i32(i32 %safepoint_token, i32 4, i32 4)
+ %after = load i32 addrspace(1)* addrspace(1)* %pnew
+ %cmp2 = call i1 @f(i32 addrspace(1)* %after)
+ ret i1 %cmp2
+
+; CHECK-OPT-LABEL: test_load_forward
+; CHECK-OPT: ret i1 %cmp2
+; CHECK-LLC-LABEL: test_load_forward
+; CHECK-LLC: callq f
+}
+
+;; Same as above, but forwarding from a store
+define i1 @test_store_forward(i32 addrspace(1)* addrspace(1)* %p,
+ i32 addrspace(1)* %v) gc "statepoint-example" {
+entry:
+ %cmp1 = call i1 @f(i32 addrspace(1)* %v)
+ call void @llvm.assume(i1 %cmp1)
+ store i32 addrspace(1)* %v, i32 addrspace(1)* addrspace(1)* %p
+ %safepoint_token = tail call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @func, i32 0, i32 0, i32 0, i32 addrspace(1)* addrspace(1)* %p)
+ %pnew = call i32 addrspace(1)* addrspace(1)* @llvm.experimental.gc.relocate.p1p1i32(i32 %safepoint_token, i32 4, i32 4)
+ %after = load i32 addrspace(1)* addrspace(1)* %pnew
+ %cmp2 = call i1 @f(i32 addrspace(1)* %after)
+ ret i1 %cmp2
+
+; CHECK-OPT-LABEL: test_store_forward
+; CHECK-OPT: ret i1 %cmp2
+; CHECK-LLC-LABEL: test_store_forward
+; CHECK-LLC: callq f
+}
+
+; A predicate on the pointer which is not simply null, but whose value
+; would be known unchanged if the pointer value could be forwarded.
+; The implementation of such a function could inspect the integral value
+; of the pointer and is thus not safe to reuse after a statepoint.
+declare i1 @f(i32 addrspace(1)* %v) readnone
+
+; This is a variant of the test_load_forward test which is intended to
+; highlight the fact that a gc pointer can be stored in part of the heap
+; that is not itself GC managed. The GC may have an external mechanism
+; to know about and update that value at a safepoint. Note that the
+; statepoint does not provide the collector with this root.
+define i1 @test_load_forward_nongc_heap(i32 addrspace(1)** %p) gc "statepoint-example" {
+entry:
+ %before = load i32 addrspace(1)** %p
+ %cmp1 = call i1 @f(i32 addrspace(1)* %before)
+ call void @llvm.assume(i1 %cmp1)
+ call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @func, i32 0, i32 0, i32 0)
+ %after = load i32 addrspace(1)** %p
+ %cmp2 = call i1 @f(i32 addrspace(1)* %after)
+ ret i1 %cmp2
+
+; CHECK-OPT-LABEL: test_load_forward_nongc_heap
+; CHECK-OPT: ret i1 %cmp2
+; CHECK-LLC-LABEL: test_load_forward_nongc_heap
+; CHECK-LLC: callq f
+}
+
+;; Same as above, but forwarding from a store
+define i1 @test_store_forward_nongc_heap(i32 addrspace(1)** %p,
+ i32 addrspace(1)* %v) gc "statepoint-example" {
+entry:
+ %cmp1 = call i1 @f(i32 addrspace(1)* %v)
+ call void @llvm.assume(i1 %cmp1)
+ store i32 addrspace(1)* %v, i32 addrspace(1)** %p
+ call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @func, i32 0, i32 0, i32 0)
+ %after = load i32 addrspace(1)** %p
+ %cmp2 = call i1 @f(i32 addrspace(1)* %after)
+ ret i1 %cmp2
+
+; CHECK-OPT-LABEL: test_store_forward_nongc_heap
+; CHECK-OPT: ret i1 %cmp2
+; CHECK-LLC-LABEL: test_store_forward_nongc_heap
+; CHECK-LLC: callq f
+}
+
+declare void @llvm.assume(i1)
+declare i32 @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()*, i32, i32, ...)
+declare i32 addrspace(1)* addrspace(1)* @llvm.experimental.gc.relocate.p1p1i32(i32, i32, i32) #3
+
diff --git a/test/CodeGen/X86/statepoint-stack-usage.ll b/test/CodeGen/X86/statepoint-stack-usage.ll
new file mode 100644
index 0000000..3ecef33
--- /dev/null
+++ b/test/CodeGen/X86/statepoint-stack-usage.ll
@@ -0,0 +1,60 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-linux-gnu"
+
+; This test is checking to make sure that we reuse the same stack slots
+; for GC values spilled over two different call sites. Since the order
+; of GC arguments differ, niave lowering code would insert loads and
+; stores to rearrange items on the stack. We need to make sure (for
+; performance) that this doesn't happen.
+define i32 @back_to_back_calls(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c) #1 {
+; CHECK-LABEL: back_to_back_calls
+; The exact stores don't matter, but there need to be three stack slots created
+; CHECK: movq %rdx, 16(%rsp)
+; CHECK: movq %rdi, 8(%rsp)
+; CHECK: movq %rsi, (%rsp)
+ %safepoint_token = tail call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* undef, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c)
+ %a1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 9, i32 9)
+ %b1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 9, i32 10)
+ %c1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 9, i32 11)
+; CHECK: callq
+; This is the key check. There should NOT be any memory moves here
+; CHECK-NOT: movq
+ %safepoint_token2 = tail call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* undef, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 addrspace(1)* %c1, i32 addrspace(1)* %b1, i32 addrspace(1)* %a1)
+ %a2 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token2, i32 9, i32 11)
+ %b2 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token2, i32 9, i32 10)
+ %c2 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token2, i32 9, i32 9)
+; CHECK: callq
+ ret i32 1
+}
+
+; This test simply checks that minor changes in vm state don't prevent slots
+; being reused for gc values.
+define i32 @reserve_first(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c) #1 {
+; CHECK-LABEL: reserve_first
+; The exact stores don't matter, but there need to be three stack slots created
+; CHECK: movq %rdx, 16(%rsp)
+; CHECK: movq %rdi, 8(%rsp)
+; CHECK: movq %rsi, (%rsp)
+ %safepoint_token = tail call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* undef, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c)
+ %a1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 9, i32 9)
+ %b1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 9, i32 10)
+ %c1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 9, i32 11)
+; CHECK: callq
+; This is the key check. There should NOT be any memory moves here
+; CHECK-NOT: movq
+ %safepoint_token2 = tail call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* undef, i32 0, i32 0, i32 5, i32 addrspace(1)* %a1, i32 0, i32 addrspace(1)* %c1, i32 0, i32 0, i32 addrspace(1)* %c1, i32 addrspace(1)* %b1, i32 addrspace(1)* %a1)
+ %a2 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token2, i32 9, i32 11)
+ %b2 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token2, i32 9, i32 10)
+ %c2 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token2, i32 9, i32 9)
+; CHECK: callq
+ ret i32 1
+}
+
+; Function Attrs: nounwind
+declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32, i32, i32) #3
+
+declare i32 @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()*, i32, i32, ...)
+
+attributes #1 = { uwtable }
diff --git a/test/CodeGen/X86/statepoint-stackmap-format.ll b/test/CodeGen/X86/statepoint-stackmap-format.ll
new file mode 100644
index 0000000..e452a63
--- /dev/null
+++ b/test/CodeGen/X86/statepoint-stackmap-format.ll
@@ -0,0 +1,109 @@
+; RUN: llc < %s | FileCheck %s
+; This test is a sanity check to ensure statepoints are generating StackMap
+; sections correctly. This is not intended to be a rigorous test of the
+; StackMap format (see the stackmap tests for that).
+
+target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-linux-gnu"
+
+declare zeroext i1 @return_i1()
+
+define i1 @test(i32 addrspace(1)* %ptr) gc "statepoint-example" {
+; CHECK-LABEL: test
+; Do we see one spill for the local value and the store to the
+; alloca?
+; CHECK: subq $24, %rsp
+; CHECK: movq $0, 8(%rsp)
+; CHECK: movq %rdi, (%rsp)
+; CHECK: callq return_i1
+; CHECK: addq $24, %rsp
+; CHECK: retq
+entry:
+ %metadata1 = alloca i32 addrspace(1)*, i32 2, align 8
+ store i32 addrspace(1)* null, i32 addrspace(1)** %metadata1
+ %safepoint_token = tail call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 2, i32 addrspace(1)* %ptr, i32 addrspace(1)* null, i32 addrspace(1)* %ptr, i32 addrspace(1)* null)
+ %call1 = call zeroext i1 @llvm.experimental.gc.result.i1(i32 %safepoint_token)
+ %a = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 6, i32 6)
+ %b = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 7, i32 7)
+;
+ ret i1 %call1
+}
+
+declare i32 @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()*, i32, i32, ...)
+declare i1 @llvm.experimental.gc.result.i1(i32)
+declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32, i32, i32) #3
+
+
+; CHECK-LABEL: .section .llvm_stackmaps
+; CHECK-NEXT: __LLVM_StackMaps:
+; Header
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 0
+; Num Functions
+; CHECK-NEXT: .long 1
+; Num LargeConstants
+; CHECK-NEXT: .long 0
+; Num Callsites
+; CHECK-NEXT: .long 1
+
+; Functions and stack size
+; CHECK-NEXT: .quad test
+; CHECK-NEXT: .quad 24
+
+; Large Constants
+; Statepoint ID only
+; CHECK: .quad 2882400000
+
+; Callsites
+; Constant arguments
+; CHECK: .long .Ltmp1-test
+; CHECK: .short 0
+; CHECK: .short 8
+; SmallConstant (0)
+; CHECK: .byte 4
+; CHECK: .byte 8
+; CHECK: .short 0
+; CHECK: .long 0
+; SmallConstant (2)
+; CHECK: .byte 4
+; CHECK: .byte 8
+; CHECK: .short 0
+; CHECK: .long 2
+; Direct Spill Slot [RSP+0]
+; CHECK: .byte 2
+; CHECK: .byte 8
+; CHECK: .short 7
+; CHECK: .long 0
+; SmallConstant (0)
+; CHECK: .byte 4
+; CHECK: .byte 8
+; CHECK: .short 0
+; CHECK: .long 0
+; SmallConstant (0)
+; CHECK: .byte 4
+; CHECK: .byte 8
+; CHECK: .short 0
+; CHECK: .long 0
+; SmallConstant (0)
+; CHECK: .byte 4
+; CHECK: .byte 8
+; CHECK: .short 0
+; CHECK: .long 0
+; Direct Spill Slot [RSP+0]
+; CHECK: .byte 2
+; CHECK: .byte 8
+; CHECK: .short 7
+; CHECK: .long 0
+; Direct Spill Slot [RSP+0]
+; CHECK: .byte 2
+; CHECK: .byte 8
+; CHECK: .short 7
+; CHECK: .long 0
+
+; No Padding or LiveOuts
+; CHECK: .short 0
+; CHECK: .short 0
+; CHECK: .align 8
+
+
diff --git a/test/CodeGen/X86/switch-bt.ll b/test/CodeGen/X86/switch-bt.ll
index a80002b..065d8cd 100644
--- a/test/CodeGen/X86/switch-bt.ll
+++ b/test/CodeGen/X86/switch-bt.ll
@@ -99,3 +99,61 @@ if.then:
if.end:
ret void
}
+
+; Ensure that optimizing for jump tables doesn't needlessly deteriorate the
+; created binary tree search. See PR22262.
+define void @test4(i32 %x, i32* %y) {
+; CHECK-LABEL: test4:
+
+entry:
+ switch i32 %x, label %sw.default [
+ i32 10, label %sw.bb
+ i32 20, label %sw.bb1
+ i32 30, label %sw.bb2
+ i32 40, label %sw.bb3
+ i32 50, label %sw.bb4
+ i32 60, label %sw.bb5
+ ]
+sw.bb:
+ store i32 1, i32* %y
+ br label %sw.epilog
+sw.bb1:
+ store i32 2, i32* %y
+ br label %sw.epilog
+sw.bb2:
+ store i32 3, i32* %y
+ br label %sw.epilog
+sw.bb3:
+ store i32 4, i32* %y
+ br label %sw.epilog
+sw.bb4:
+ store i32 5, i32* %y
+ br label %sw.epilog
+sw.bb5:
+ store i32 6, i32* %y
+ br label %sw.epilog
+sw.default:
+ store i32 7, i32* %y
+ br label %sw.epilog
+sw.epilog:
+ ret void
+
+; The balanced binary switch here would start with a comparison against 39, but
+; it is currently starting with 29 because of the density-sum heuristic.
+; CHECK: cmpl $29
+; CHECK: jg
+; CHECK: cmpl $10
+; CHECK: jne
+; CHECK: cmpl $49
+; CHECK: jg
+; CHECK: cmpl $30
+; CHECK: jne
+; CHECK: cmpl $20
+; CHECK: jne
+; CHECK: cmpl $50
+; CHECK: jne
+; CHECK: cmpl $40
+; CHECK: jne
+; CHECK: cmpl $60
+; CHECK: jne
+}
diff --git a/test/CodeGen/X86/switch-default-only.ll b/test/CodeGen/X86/switch-default-only.ll
new file mode 100644
index 0000000..360ace5
--- /dev/null
+++ b/test/CodeGen/X86/switch-default-only.ll
@@ -0,0 +1,14 @@
+; RUN: llc -O0 -fast-isel=false -march=x86 < %s | FileCheck %s
+
+; No need for branching when the default and only destination follows
+; immediately after the switch.
+; CHECK-LABEL: no_branch:
+; CHECK-NOT: jmp
+; CHECK: ret
+
+define void @no_branch(i32 %x) {
+entry:
+ switch i32 %x, label %exit [ ]
+exit:
+ ret void
+}
diff --git a/test/CodeGen/X86/switch-jump-table.ll b/test/CodeGen/X86/switch-jump-table.ll
new file mode 100644
index 0000000..a84fb4a
--- /dev/null
+++ b/test/CodeGen/X86/switch-jump-table.ll
@@ -0,0 +1,52 @@
+; RUN: llc -mtriple=i686-pc-gnu-linux < %s | FileCheck %s
+
+
+; An unreachable default destination is replaced with the most popular case label.
+
+define void @sum2(i32 %x, i32* %to) {
+; CHECK-LABEL: sum2:
+; CHECK: movl 4(%esp), [[REG:%e[a-z]{2}]]
+; CHECK: cmpl $3, [[REG]]
+; CHECK: jbe .LBB0_1
+; CHECK: movl $4
+; CHECK: retl
+; CHECK-LABEL: .LBB0_1:
+; CHECK-NEXT: jmpl *.LJTI0_0(,[[REG]],4)
+
+entry:
+ switch i32 %x, label %default [
+ i32 0, label %bb0
+ i32 1, label %bb1
+ i32 2, label %bb2
+ i32 3, label %bb3
+ i32 4, label %bb4
+ i32 5, label %bb4
+ ]
+bb0:
+ store i32 0, i32* %to
+ br label %exit
+bb1:
+ store i32 1, i32* %to
+ br label %exit
+bb2:
+ store i32 2, i32* %to
+ br label %exit
+bb3:
+ store i32 3, i32* %to
+ br label %exit
+bb4:
+ store i32 4, i32* %to
+ br label %exit
+exit:
+ ret void
+default:
+ unreachable
+
+; The jump table has four entries.
+; CHECK-LABEL: .LJTI0_0:
+; CHECK-NEXT: .long .LBB0_2
+; CHECK-NEXT: .long .LBB0_3
+; CHECK-NEXT: .long .LBB0_4
+; CHECK-NEXT: .long .LBB0_5
+; CHECK-NOT: .long
+}
diff --git a/test/CodeGen/X86/tail-call-win64.ll b/test/CodeGen/X86/tail-call-win64.ll
new file mode 100644
index 0000000..23e9280
--- /dev/null
+++ b/test/CodeGen/X86/tail-call-win64.ll
@@ -0,0 +1,36 @@
+; RUN: llc -mtriple=x86_64-windows -show-mc-encoding < %s | FileCheck %s
+
+; The Win64 ABI wants tail jmps to use a REX_W prefix so it can distinguish
+; in-function jumps from function exiting jumps.
+
+define void @tail_jmp_reg(i32, i32, void ()* %fptr) {
+ tail call void ()* %fptr()
+ ret void
+}
+
+; Check that we merge the REX prefixes into 0x49 instead of 0x48, 0x41.
+
+; CHECK-LABEL: tail_jmp_reg:
+; CHECK: rex64 jmpq *%r8
+; CHECK: encoding: [0x49,0xff,0xe0]
+
+declare void @tail_tgt()
+
+define void @tail_jmp_imm() {
+ tail call void @tail_tgt()
+ ret void
+}
+
+; CHECK-LABEL: tail_jmp_imm:
+; CHECK: rex64 jmp tail_tgt
+
+@g_fptr = global void ()* @tail_tgt
+
+define void @tail_jmp_mem() {
+ %fptr = load void ()** @g_fptr
+ tail call void ()* %fptr()
+ ret void
+}
+
+; CHECK-LABEL: tail_jmp_mem:
+; CHECK: rex64 jmpq *g_fptr(%rip)
diff --git a/test/CodeGen/X86/tailcall-64.ll b/test/CodeGen/X86/tailcall-64.ll
index deab1dc..25d3802 100644
--- a/test/CodeGen/X86/tailcall-64.ll
+++ b/test/CodeGen/X86/tailcall-64.ll
@@ -182,7 +182,7 @@ define { i64, i64 } @crash(i8* %this) {
; Check that we can fold an indexed load into a tail call instruction.
; CHECK: fold_indexed_load
; CHECK: leaq (%rsi,%rsi,4), %[[RAX:r..]]
-; CHECK: jmpq *16(%{{r..}},%[[RAX]],8) # TAILCALL
+; CHECK: jmpq *16(%{{r..}},%[[RAX]],8) ## TAILCALL
%struct.funcs = type { i32 (i8*, i32*, i32)*, i32 (i8*)*, i32 (i8*)*, i32 (i8*, i32)*, i32 }
@func_table = external global [0 x %struct.funcs]
define void @fold_indexed_load(i8* %mbstr, i64 %idxprom) nounwind uwtable ssp {
@@ -207,7 +207,7 @@ entry:
; }
;
; CHECK-LABEL: rdar12282281
-; CHECK: jmpq *%r11 # TAILCALL
+; CHECK: jmpq *%r11 ## TAILCALL
@funcs = external constant [0 x i32 (i8*, ...)*]
define i32 @rdar12282281(i32 %n) nounwind uwtable ssp {
diff --git a/test/CodeGen/X86/tailcall-returndup-void.ll b/test/CodeGen/X86/tailcall-returndup-void.ll
index c1d6312..2c39cb4 100644
--- a/test/CodeGen/X86/tailcall-returndup-void.ll
+++ b/test/CodeGen/X86/tailcall-returndup-void.ll
@@ -3,9 +3,9 @@
; CHECK-NOT: ret
@sES_closure = external global [0 x i64]
-declare cc10 void @sEH_info(i64* noalias nocapture, i64* noalias nocapture, i64* noalias nocapture, i64, i64, i64) align 8
+declare ghccc void @sEH_info(i64* noalias nocapture, i64* noalias nocapture, i64* noalias nocapture, i64, i64, i64) align 8
-define cc10 void @rBM_info(i64* noalias nocapture %Base_Arg, i64* noalias nocapture %Sp_Arg, i64* noalias nocapture %Hp_Arg, i64 %R1_Arg, i64 %R2_Arg, i64 %R3_Arg) nounwind align 8 {
+define ghccc void @rBM_info(i64* noalias nocapture %Base_Arg, i64* noalias nocapture %Sp_Arg, i64* noalias nocapture %Hp_Arg, i64 %R1_Arg, i64 %R2_Arg, i64 %R3_Arg) nounwind align 8 {
c263:
%ln265 = getelementptr inbounds i64* %Sp_Arg, i64 -2
%ln266 = ptrtoint i64* %ln265 to i64
@@ -18,11 +18,11 @@ n26p: ; preds = %c263
n1ZQ.i: ; preds = %n26p
%ln1ZT.i = load i64* getelementptr inbounds ([0 x i64]* @sES_closure, i64 0, i64 0), align 8
%ln1ZU.i = inttoptr i64 %ln1ZT.i to void (i64*, i64*, i64*, i64, i64, i64)*
- tail call cc10 void %ln1ZU.i(i64* %Base_Arg, i64* %Sp_Arg, i64* %Hp_Arg, i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 %R3_Arg) nounwind
+ tail call ghccc void %ln1ZU.i(i64* %Base_Arg, i64* %Sp_Arg, i64* %Hp_Arg, i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 %R3_Arg) nounwind
br label %rBL_info.exit
c1ZP.i: ; preds = %n26p
- tail call cc10 void @sEH_info(i64* %Base_Arg, i64* %Sp_Arg, i64* %Hp_Arg, i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 %R3_Arg) nounwind
+ tail call ghccc void @sEH_info(i64* %Base_Arg, i64* %Sp_Arg, i64* %Hp_Arg, i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 %R3_Arg) nounwind
br label %rBL_info.exit
rBL_info.exit: ; preds = %c1ZP.i, %n1ZQ.i
@@ -32,6 +32,6 @@ c26a: ; preds = %c263
%ln27h = getelementptr inbounds i64* %Base_Arg, i64 -2
%ln27j = load i64* %ln27h, align 8
%ln27k = inttoptr i64 %ln27j to void (i64*, i64*, i64*, i64, i64, i64)*
- tail call cc10 void %ln27k(i64* %Base_Arg, i64* %Sp_Arg, i64* %Hp_Arg, i64 %R1_Arg, i64 %R2_Arg, i64 %R3_Arg) nounwind
+ tail call ghccc void %ln27k(i64* %Base_Arg, i64* %Sp_Arg, i64* %Hp_Arg, i64 %R1_Arg, i64 %R2_Arg, i64 %R3_Arg) nounwind
ret void
}
diff --git a/test/CodeGen/X86/tls-models.ll b/test/CodeGen/X86/tls-models.ll
index 8e3e958..0fd7853 100644
--- a/test/CodeGen/X86/tls-models.ll
+++ b/test/CodeGen/X86/tls-models.ll
@@ -128,6 +128,14 @@ entry:
; DARWIN: _internal_ie@TLVP
}
+define i32 @PR22083() {
+entry:
+ ret i32 ptrtoint (i32* @external_ie to i32)
+ ; X64-LABEL: PR22083:
+ ; X64: movq external_ie@GOTTPOFF(%rip), %rax
+ ; X64_PIC-LABEL: PR22083:
+ ; X64_PIC: movq external_ie@GOTTPOFF(%rip), %rax
+}
; ----- localexec specified -----
diff --git a/test/CodeGen/X86/trap.ll b/test/CodeGen/X86/trap.ll
index 149c667..ca33f9e 100644
--- a/test/CodeGen/X86/trap.ll
+++ b/test/CodeGen/X86/trap.ll
@@ -1,15 +1,25 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | FileCheck %s
+; RUN: llc < %s -mtriple=i686-apple-darwin8 -mcpu=yonah | FileCheck %s -check-prefix=DARWIN
+; RUN: llc < %s -mtriple=i686-unknown-linux -mcpu=yonah | FileCheck %s -check-prefix=LINUX
+; RUN: llc < %s -mtriple=x86_64-scei-ps4 | FileCheck %s -check-prefix=PS4
-; CHECK-LABEL: test0:
-; CHECK: ud2
+; DARWIN-LABEL: test0:
+; DARWIN: ud2
+; LINUX-LABEL: test0:
+; LINUX: ud2
+; PS4-LABEL: test0:
+; PS4: ud2
define i32 @test0() noreturn nounwind {
entry:
tail call void @llvm.trap( )
unreachable
}
-; CHECK-LABEL: test1:
-; CHECK: int3
+; DARWIN-LABEL: test1:
+; DARWIN: int3
+; LINUX-LABEL: test1:
+; LINUX: int3
+; PS4-LABEL: test1:
+; PS4: int $65
define i32 @test1() noreturn nounwind {
entry:
tail call void @llvm.debugtrap( )
diff --git a/test/CodeGen/X86/uint_to_fp-2.ll b/test/CodeGen/X86/uint_to_fp-2.ll
index e47f154..4b594f7 100644
--- a/test/CodeGen/X86/uint_to_fp-2.ll
+++ b/test/CodeGen/X86/uint_to_fp-2.ll
@@ -7,7 +7,7 @@ define float @test1(i32 %x) nounwind readnone {
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: movsd .LCPI0_0, %xmm0
; CHECK-NEXT: movd {{[0-9]+}}(%esp), %xmm1
-; CHECK-NEXT: orps %xmm0, %xmm1
+; CHECK-NEXT: orpd %xmm0, %xmm1
; CHECK-NEXT: subsd %xmm0, %xmm1
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: cvtsd2ss %xmm1, %xmm0
diff --git a/test/CodeGen/X86/unaligned-32-byte-memops.ll b/test/CodeGen/X86/unaligned-32-byte-memops.ll
new file mode 100644
index 0000000..9cec17d
--- /dev/null
+++ b/test/CodeGen/X86/unaligned-32-byte-memops.ll
@@ -0,0 +1,288 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck %s --check-prefix=SANDYB --check-prefix=CHECK
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx-i | FileCheck %s --check-prefix=SANDYB --check-prefix=CHECK
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=btver2 | FileCheck %s --check-prefix=BTVER2 --check-prefix=CHECK
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 | FileCheck %s --check-prefix=HASWELL --check-prefix=CHECK
+
+; On Sandy Bridge or Ivy Bridge, we should not generate an unaligned 32-byte load
+; because that is slower than two 16-byte loads.
+; Other AVX-capable chips don't have that problem.
+
+define <8 x float> @load32bytes(<8 x float>* %Ap) {
+ ; CHECK-LABEL: load32bytes
+
+ ; SANDYB: vmovaps
+ ; SANDYB: vinsertf128
+ ; SANDYB: retq
+
+ ; BTVER2: vmovups
+ ; BTVER2: retq
+
+ ; HASWELL: vmovups
+ ; HASWELL: retq
+
+ %A = load <8 x float>* %Ap, align 16
+ ret <8 x float> %A
+}
+
+; On Sandy Bridge or Ivy Bridge, we should not generate an unaligned 32-byte store
+; because that is slowerthan two 16-byte stores.
+; Other AVX-capable chips don't have that problem.
+
+define void @store32bytes(<8 x float> %A, <8 x float>* %P) {
+ ; CHECK-LABEL: store32bytes
+
+ ; SANDYB: vextractf128
+ ; SANDYB: vmovaps
+ ; SANDYB: retq
+
+ ; BTVER2: vmovups
+ ; BTVER2: retq
+
+ ; HASWELL: vmovups
+ ; HASWELL: retq
+
+ store <8 x float> %A, <8 x float>* %P, align 16
+ ret void
+}
+
+; Merge two consecutive 16-byte subvector loads into a single 32-byte load
+; if it's faster.
+
+declare <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float>, <4 x float>, i8)
+
+; Use the vinsertf128 intrinsic to model source code
+; that explicitly uses AVX intrinsics.
+define <8 x float> @combine_16_byte_loads(<4 x float>* %ptr) {
+ ; CHECK-LABEL: combine_16_byte_loads
+
+ ; SANDYB: vmovups
+ ; SANDYB-NEXT: vinsertf128
+ ; SANDYB-NEXT: retq
+
+ ; BTVER2: vmovups
+ ; BTVER2-NEXT: retq
+
+ ; HASWELL: vmovups
+ ; HASWELL-NEXT: retq
+
+ %ptr1 = getelementptr inbounds <4 x float>* %ptr, i64 1
+ %ptr2 = getelementptr inbounds <4 x float>* %ptr, i64 2
+ %v1 = load <4 x float>* %ptr1, align 1
+ %v2 = load <4 x float>* %ptr2, align 1
+ %shuffle = shufflevector <4 x float> %v1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ %v3 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %shuffle, <4 x float> %v2, i8 1)
+ ret <8 x float> %v3
+}
+
+; Swap the operands of the shufflevector and vinsertf128 to ensure that the
+; pattern still matches.
+define <8 x float> @combine_16_byte_loads_swap(<4 x float>* %ptr) {
+ ; CHECK-LABEL: combine_16_byte_loads_swap
+
+ ; SANDYB: vmovups
+ ; SANDYB-NEXT: vinsertf128
+ ; SANDYB-NEXT: retq
+
+ ; BTVER2: vmovups
+ ; BTVER2-NEXT: retq
+
+ ; HASWELL: vmovups
+ ; HASWELL-NEXT: retq
+
+ %ptr1 = getelementptr inbounds <4 x float>* %ptr, i64 2
+ %ptr2 = getelementptr inbounds <4 x float>* %ptr, i64 3
+ %v1 = load <4 x float>* %ptr1, align 1
+ %v2 = load <4 x float>* %ptr2, align 1
+ %shuffle = shufflevector <4 x float> %v2, <4 x float> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2, i32 3>
+ %v3 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %shuffle, <4 x float> %v1, i8 0)
+ ret <8 x float> %v3
+}
+
+; Replace the vinsertf128 intrinsic with a shufflevector as might be
+; expected from auto-vectorized code.
+define <8 x float> @combine_16_byte_loads_no_intrinsic(<4 x float>* %ptr) {
+ ; CHECK-LABEL: combine_16_byte_loads_no_intrinsic
+
+ ; SANDYB: vmovups
+ ; SANDYB-NEXT: vinsertf128
+ ; SANDYB-NEXT: retq
+
+ ; BTVER2: vmovups
+ ; BTVER2-NEXT: retq
+
+ ; HASWELL: vmovups
+ ; HASWELL-NEXT: retq
+
+ %ptr1 = getelementptr inbounds <4 x float>* %ptr, i64 3
+ %ptr2 = getelementptr inbounds <4 x float>* %ptr, i64 4
+ %v1 = load <4 x float>* %ptr1, align 1
+ %v2 = load <4 x float>* %ptr2, align 1
+ %v3 = shufflevector <4 x float> %v1, <4 x float> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %v3
+}
+
+; Swap the order of the shufflevector operands to ensure that the
+; pattern still matches.
+define <8 x float> @combine_16_byte_loads_no_intrinsic_swap(<4 x float>* %ptr) {
+ ; CHECK-LABEL: combine_16_byte_loads_no_intrinsic_swap
+
+ ; SANDYB: vmovups
+ ; SANDYB-NEXT: vinsertf128
+ ; SANDYB-NEXT: retq
+
+ ; BTVER2: vmovups
+ ; BTVER2-NEXT: retq
+
+ ; HASWELL: vmovups
+ ; HASWELL-NEXT: retq
+
+ %ptr1 = getelementptr inbounds <4 x float>* %ptr, i64 4
+ %ptr2 = getelementptr inbounds <4 x float>* %ptr, i64 5
+ %v1 = load <4 x float>* %ptr1, align 1
+ %v2 = load <4 x float>* %ptr2, align 1
+ %v3 = shufflevector <4 x float> %v2, <4 x float> %v1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
+ ret <8 x float> %v3
+}
+
+; Check each element type other than float to make sure it is handled correctly.
+; Use the loaded values with an 'add' to make sure we're using the correct load type.
+; Even though BtVer2 has fast 32-byte loads, we should not generate those for
+; 256-bit integer vectors because BtVer2 doesn't have AVX2.
+
+define <4 x i64> @combine_16_byte_loads_i64(<2 x i64>* %ptr, <4 x i64> %x) {
+ ; CHECK-LABEL: combine_16_byte_loads_i64
+
+ ; SANDYB: vextractf128
+ ; SANDYB-NEXT: vpaddq
+ ; SANDYB-NEXT: vpaddq
+ ; SANDYB-NEXT: vinsertf128
+ ; SANDYB-NEXT: retq
+
+ ; BTVER2: vextractf128
+ ; BTVER2-NEXT: vpaddq
+ ; BTVER2-NEXT: vpaddq
+ ; BTVER2-NEXT: vinsertf128
+ ; BTVER2-NEXT: retq
+
+ ; HASWELL-NOT: vextract
+ ; HASWELL: vpaddq
+ ; HASWELL-NEXT: retq
+
+ %ptr1 = getelementptr inbounds <2 x i64>* %ptr, i64 5
+ %ptr2 = getelementptr inbounds <2 x i64>* %ptr, i64 6
+ %v1 = load <2 x i64>* %ptr1, align 1
+ %v2 = load <2 x i64>* %ptr2, align 1
+ %v3 = shufflevector <2 x i64> %v1, <2 x i64> %v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v4 = add <4 x i64> %v3, %x
+ ret <4 x i64> %v4
+}
+
+define <8 x i32> @combine_16_byte_loads_i32(<4 x i32>* %ptr, <8 x i32> %x) {
+ ; CHECK-LABEL: combine_16_byte_loads_i32
+
+ ; SANDYB: vextractf128
+ ; SANDYB-NEXT: vpaddd
+ ; SANDYB-NEXT: vpaddd
+ ; SANDYB-NEXT: vinsertf128
+ ; SANDYB-NEXT: retq
+
+ ; BTVER2: vextractf128
+ ; BTVER2-NEXT: vpaddd
+ ; BTVER2-NEXT: vpaddd
+ ; BTVER2-NEXT: vinsertf128
+ ; BTVER2-NEXT: retq
+
+ ; HASWELL-NOT: vextract
+ ; HASWELL: vpaddd
+ ; HASWELL-NEXT: retq
+
+ %ptr1 = getelementptr inbounds <4 x i32>* %ptr, i64 6
+ %ptr2 = getelementptr inbounds <4 x i32>* %ptr, i64 7
+ %v1 = load <4 x i32>* %ptr1, align 1
+ %v2 = load <4 x i32>* %ptr2, align 1
+ %v3 = shufflevector <4 x i32> %v1, <4 x i32> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v4 = add <8 x i32> %v3, %x
+ ret <8 x i32> %v4
+}
+
+define <16 x i16> @combine_16_byte_loads_i16(<8 x i16>* %ptr, <16 x i16> %x) {
+ ; CHECK-LABEL: combine_16_byte_loads_i16
+
+ ; SANDYB: vextractf128
+ ; SANDYB-NEXT: vpaddw
+ ; SANDYB-NEXT: vpaddw
+ ; SANDYB-NEXT: vinsertf128
+ ; SANDYB-NEXT: retq
+
+ ; BTVER2: vextractf128
+ ; BTVER2-NEXT: vpaddw
+ ; BTVER2-NEXT: vpaddw
+ ; BTVER2-NEXT: vinsertf128
+ ; BTVER2-NEXT: retq
+
+ ; HASWELL-NOT: vextract
+ ; HASWELL: vpaddw
+ ; HASWELL-NEXT: retq
+
+ %ptr1 = getelementptr inbounds <8 x i16>* %ptr, i64 7
+ %ptr2 = getelementptr inbounds <8 x i16>* %ptr, i64 8
+ %v1 = load <8 x i16>* %ptr1, align 1
+ %v2 = load <8 x i16>* %ptr2, align 1
+ %v3 = shufflevector <8 x i16> %v1, <8 x i16> %v2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %v4 = add <16 x i16> %v3, %x
+ ret <16 x i16> %v4
+}
+
+define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) {
+ ; CHECK-LABEL: combine_16_byte_loads_i8
+
+ ; SANDYB: vextractf128
+ ; SANDYB-NEXT: vpaddb
+ ; SANDYB-NEXT: vpaddb
+ ; SANDYB-NEXT: vinsertf128
+ ; SANDYB-NEXT: retq
+
+ ; BTVER2: vextractf128
+ ; BTVER2-NEXT: vpaddb
+ ; BTVER2-NEXT: vpaddb
+ ; BTVER2-NEXT: vinsertf128
+ ; BTVER2-NEXT: retq
+
+ ; HASWELL-NOT: vextract
+ ; HASWELL: vpaddb
+ ; HASWELL-NEXT: retq
+
+ %ptr1 = getelementptr inbounds <16 x i8>* %ptr, i64 8
+ %ptr2 = getelementptr inbounds <16 x i8>* %ptr, i64 9
+ %v1 = load <16 x i8>* %ptr1, align 1
+ %v2 = load <16 x i8>* %ptr2, align 1
+ %v3 = shufflevector <16 x i8> %v1, <16 x i8> %v2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %v4 = add <32 x i8> %v3, %x
+ ret <32 x i8> %v4
+}
+
+define <4 x double> @combine_16_byte_loads_double(<2 x double>* %ptr, <4 x double> %x) {
+ ; CHECK-LABEL: combine_16_byte_loads_double
+
+ ; SANDYB: vmovupd
+ ; SANDYB-NEXT: vinsertf128
+ ; SANDYB-NEXT: vaddpd
+ ; SANDYB-NEXT: retq
+
+ ; BTVER2-NOT: vinsertf128
+ ; BTVER2: vaddpd
+ ; BTVER2-NEXT: retq
+
+ ; HASWELL-NOT: vinsertf128
+ ; HASWELL: vaddpd
+ ; HASWELL-NEXT: retq
+
+ %ptr1 = getelementptr inbounds <2 x double>* %ptr, i64 9
+ %ptr2 = getelementptr inbounds <2 x double>* %ptr, i64 10
+ %v1 = load <2 x double>* %ptr1, align 1
+ %v2 = load <2 x double>* %ptr2, align 1
+ %v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v4 = fadd <4 x double> %v3, %x
+ ret <4 x double> %v4
+}
+
diff --git a/test/CodeGen/X86/unknown-location.ll b/test/CodeGen/X86/unknown-location.ll
index ca9ea4a..140121b 100644
--- a/test/CodeGen/X86/unknown-location.ll
+++ b/test/CodeGen/X86/unknown-location.ll
@@ -21,16 +21,16 @@ entry:
!llvm.dbg.cu = !{!3}
!llvm.module.flags = !{!12}
-!0 = metadata !{metadata !"0x101\00x\001\000", metadata !1, metadata !2, metadata !6} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{metadata !"0x2e\00foo\00foo\00foo\001\000\001\000\006\000\000\001", metadata !10, metadata !2, metadata !4, null, i32 (i32, i32, i32, i32)* @foo, null, null, null} ; [ DW_TAG_subprogram ]
-!2 = metadata !{metadata !"0x29", metadata !10} ; [ DW_TAG_file_type ]
-!3 = metadata !{metadata !"0x11\0012\00producer\000\00\000\00\000", metadata !10, metadata !11, metadata !11, metadata !9, null, null} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", metadata !10, metadata !2, null, metadata !5, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
-!5 = metadata !{metadata !6}
-!6 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", metadata !10, metadata !2} ; [ DW_TAG_base_type ]
-!7 = metadata !{metadata !"0xb\001\0030\000", metadata !2, metadata !1} ; [ DW_TAG_lexical_block ]
-!8 = metadata !{i32 4, i32 3, metadata !7, null}
-!9 = metadata !{metadata !1}
-!10 = metadata !{metadata !"test.c", metadata !"/dir"}
-!11 = metadata !{i32 0}
-!12 = metadata !{i32 1, metadata !"Debug Info Version", i32 2}
+!0 = !{!"0x101\00x\001\000", !1, !2, !6} ; [ DW_TAG_arg_variable ]
+!1 = !{!"0x2e\00foo\00foo\00foo\001\000\001\000\006\000\000\001", !10, !2, !4, null, i32 (i32, i32, i32, i32)* @foo, null, null, null} ; [ DW_TAG_subprogram ]
+!2 = !{!"0x29", !10} ; [ DW_TAG_file_type ]
+!3 = !{!"0x11\0012\00producer\000\00\000\00\000", !10, !11, !11, !9, null, null} ; [ DW_TAG_compile_unit ]
+!4 = !{!"0x15\00\000\000\000\000\000\000", !10, !2, null, !5, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!5 = !{!6}
+!6 = !{!"0x24\00int\000\0032\0032\000\000\005", !10, !2} ; [ DW_TAG_base_type ]
+!7 = !{!"0xb\001\0030\000", !2, !1} ; [ DW_TAG_lexical_block ]
+!8 = !MDLocation(line: 4, column: 3, scope: !7)
+!9 = !{!1}
+!10 = !{!"test.c", !"/dir"}
+!11 = !{i32 0}
+!12 = !{i32 1, !"Debug Info Version", i32 2}
diff --git a/test/CodeGen/X86/utf16-cfstrings.ll b/test/CodeGen/X86/utf16-cfstrings.ll
index af76a33..c7ec3eb 100644
--- a/test/CodeGen/X86/utf16-cfstrings.ll
+++ b/test/CodeGen/X86/utf16-cfstrings.ll
@@ -29,7 +29,7 @@ declare void @NSLog(%0*, ...)
!llvm.module.flags = !{!0, !1, !2, !3}
-!0 = metadata !{i32 1, metadata !"Objective-C Version", i32 2}
-!1 = metadata !{i32 1, metadata !"Objective-C Image Info Version", i32 0}
-!2 = metadata !{i32 1, metadata !"Objective-C Image Info Section", metadata !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
-!3 = metadata !{i32 4, metadata !"Objective-C Garbage Collection", i32 0}
+!0 = !{i32 1, !"Objective-C Version", i32 2}
+!1 = !{i32 1, !"Objective-C Image Info Version", i32 0}
+!2 = !{i32 1, !"Objective-C Image Info Section", !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
+!3 = !{i32 4, !"Objective-C Garbage Collection", i32 0}
diff --git a/test/CodeGen/X86/v2f32.ll b/test/CodeGen/X86/v2f32.ll
index b9bd80f9..7beed52 100644
--- a/test/CodeGen/X86/v2f32.ll
+++ b/test/CodeGen/X86/v2f32.ll
@@ -5,8 +5,7 @@
define void @test1(<2 x float> %Q, float *%P2) nounwind {
; X64-LABEL: test1:
; X64: # BB#0:
-; X64-NEXT: movaps %xmm0, %xmm1
-; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; X64-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; X64-NEXT: addss %xmm0, %xmm1
; X64-NEXT: movss %xmm1, (%rdi)
; X64-NEXT: retq
@@ -14,8 +13,7 @@ define void @test1(<2 x float> %Q, float *%P2) nounwind {
; X32-LABEL: test1:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movaps %xmm0, %xmm1
-; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; X32-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; X32-NEXT: addss %xmm0, %xmm1
; X32-NEXT: movss %xmm1, (%eax)
; X32-NEXT: retl
diff --git a/test/CodeGen/X86/vaargs.ll b/test/CodeGen/X86/vaargs.ll
index ddeb7a3..43c895e 100644
--- a/test/CodeGen/X86/vaargs.ll
+++ b/test/CodeGen/X86/vaargs.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=corei7-avx %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=NO-FLAGS
+; RUN: llc -verify-machineinstrs -mcpu=corei7-avx %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=NO-FLAGS
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.9.0"
diff --git a/test/CodeGen/X86/vec-loadsingles-alignment.ll b/test/CodeGen/X86/vec-loadsingles-alignment.ll
new file mode 100644
index 0000000..6aa2adb
--- /dev/null
+++ b/test/CodeGen/X86/vec-loadsingles-alignment.ll
@@ -0,0 +1,35 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s
+
+@e = global [8 x i32] [i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8], align 16
+@d = global [8 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1], align 16
+
+; The global 'e' has 16 byte alignment, so make sure we don't generate an
+; aligned 32-byte load instruction when we combine the load+insert sequence.
+
+define i32 @subb() nounwind ssp {
+; CHECK-LABEL: subb:
+; CHECK: vmovups e(%rip), %ymm
+entry:
+ %0 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 7), align 4
+ %1 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 6), align 8
+ %2 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 5), align 4
+ %3 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 4), align 16
+ %4 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 3), align 4
+ %5 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 2), align 8
+ %6 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 1), align 4
+ %7 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 0), align 16
+ %vecinit.i = insertelement <8 x i32> undef, i32 %7, i32 0
+ %vecinit1.i = insertelement <8 x i32> %vecinit.i, i32 %6, i32 1
+ %vecinit2.i = insertelement <8 x i32> %vecinit1.i, i32 %5, i32 2
+ %vecinit3.i = insertelement <8 x i32> %vecinit2.i, i32 %4, i32 3
+ %vecinit4.i = insertelement <8 x i32> %vecinit3.i, i32 %3, i32 4
+ %vecinit5.i = insertelement <8 x i32> %vecinit4.i, i32 %2, i32 5
+ %vecinit6.i = insertelement <8 x i32> %vecinit5.i, i32 %1, i32 6
+ %vecinit7.i = insertelement <8 x i32> %vecinit6.i, i32 %0, i32 7
+ %8 = bitcast <8 x i32> %vecinit7.i to <32 x i8>
+ tail call void @llvm.x86.avx.storeu.dq.256(i8* bitcast ([8 x i32]* @d to i8*), <32 x i8> %8)
+ ret i32 0
+}
+
+declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
+
diff --git a/test/CodeGen/X86/vec_cast2.ll b/test/CodeGen/X86/vec_cast2.ll
index 8600c48..07cd195 100644
--- a/test/CodeGen/X86/vec_cast2.ll
+++ b/test/CodeGen/X86/vec_cast2.ll
@@ -5,7 +5,7 @@ define <8 x float> @foo1_8(<8 x i8> %src) {
; CHECK-LABEL: foo1_8:
; CHECK: ## BB#0:
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
-; CHECK-NEXT: vpmovzxwd %xmm0, %xmm0
+; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: vpslld $24, %xmm0, %xmm0
; CHECK-NEXT: vpsrad $24, %xmm0, %xmm0
; CHECK-NEXT: vpslld $24, %xmm1, %xmm1
@@ -16,7 +16,7 @@ define <8 x float> @foo1_8(<8 x i8> %src) {
;
; CHECK-WIDE-LABEL: foo1_8:
; CHECK-WIDE: ## BB#0:
-; CHECK-WIDE-NEXT: vpmovzxbd %xmm0, %xmm1
+; CHECK-WIDE-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-WIDE-NEXT: vpslld $24, %xmm1, %xmm1
; CHECK-WIDE-NEXT: vpsrad $24, %xmm1, %xmm1
; CHECK-WIDE-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -40,7 +40,7 @@ define <4 x float> @foo1_4(<4 x i8> %src) {
;
; CHECK-WIDE-LABEL: foo1_4:
; CHECK-WIDE: ## BB#0:
-; CHECK-WIDE-NEXT: vpmovzxbd %xmm0, %xmm0
+; CHECK-WIDE-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-WIDE-NEXT: vpslld $24, %xmm0, %xmm0
; CHECK-WIDE-NEXT: vpsrad $24, %xmm0, %xmm0
; CHECK-WIDE-NEXT: vcvtdq2ps %xmm0, %xmm0
@@ -52,7 +52,7 @@ define <4 x float> @foo1_4(<4 x i8> %src) {
define <8 x float> @foo2_8(<8 x i8> %src) {
; CHECK-LABEL: foo2_8:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpmovzxwd %xmm0, %xmm1
+; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; CHECK-NEXT: vandps LCPI2_0, %ymm0, %ymm0
@@ -61,20 +61,9 @@ define <8 x float> @foo2_8(<8 x i8> %src) {
;
; CHECK-WIDE-LABEL: foo2_8:
; CHECK-WIDE: ## BB#0:
-; CHECK-WIDE-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; CHECK-WIDE-NEXT: vextractf128 $1, %ymm1, %xmm2
-; CHECK-WIDE-NEXT: vmovdqa {{.*#+}} xmm3 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; CHECK-WIDE-NEXT: vpshufb %xmm3, %xmm2, %xmm4
-; CHECK-WIDE-NEXT: vmovdqa {{.*#+}} xmm5 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
-; CHECK-WIDE-NEXT: vpshufb %xmm5, %xmm2, %xmm2
-; CHECK-WIDE-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[1,1,2,3]
-; CHECK-WIDE-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
-; CHECK-WIDE-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; CHECK-WIDE-NEXT: vpshufb %xmm3, %xmm1, %xmm3
-; CHECK-WIDE-NEXT: vpshufb %xmm5, %xmm1, %xmm1
-; CHECK-WIDE-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; CHECK-WIDE-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; CHECK-WIDE-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-WIDE-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; CHECK-WIDE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; CHECK-WIDE-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; CHECK-WIDE-NEXT: vcvtdq2ps %ymm0, %ymm0
; CHECK-WIDE-NEXT: retl
%res = uitofp <8 x i8> %src to <8 x float>
@@ -90,7 +79,7 @@ define <4 x float> @foo2_4(<4 x i8> %src) {
;
; CHECK-WIDE-LABEL: foo2_4:
; CHECK-WIDE: ## BB#0:
-; CHECK-WIDE-NEXT: vpmovzxbd %xmm0, %xmm0
+; CHECK-WIDE-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-WIDE-NEXT: vcvtdq2ps %xmm0, %xmm0
; CHECK-WIDE-NEXT: retl
%res = uitofp <4 x i8> %src to <4 x float>
@@ -118,7 +107,7 @@ define <8 x i8> @foo3_8(<8 x float> %src) {
; CHECK-WIDE-NEXT: vcvttss2si %xmm1, %ecx
; CHECK-WIDE-NEXT: movzbl %cl, %ecx
; CHECK-WIDE-NEXT: orl %eax, %ecx
-; CHECK-WIDE-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; CHECK-WIDE-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; CHECK-WIDE-NEXT: vcvttss2si %xmm1, %eax
; CHECK-WIDE-NEXT: shll $8, %eax
; CHECK-WIDE-NEXT: vcvttss2si %xmm0, %edx
@@ -127,7 +116,7 @@ define <8 x i8> @foo3_8(<8 x float> %src) {
; CHECK-WIDE-NEXT: vpinsrw $0, %edx, %xmm0, %xmm1
; CHECK-WIDE-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1
; CHECK-WIDE-NEXT: vextractf128 $1, %ymm0, %xmm0
-; CHECK-WIDE-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; CHECK-WIDE-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; CHECK-WIDE-NEXT: vcvttss2si %xmm2, %eax
; CHECK-WIDE-NEXT: shll $8, %eax
; CHECK-WIDE-NEXT: vcvttss2si %xmm0, %ecx
@@ -163,7 +152,7 @@ define <4 x i8> @foo3_4(<4 x float> %src) {
; CHECK-WIDE-NEXT: vcvttss2si %xmm1, %ecx
; CHECK-WIDE-NEXT: movzbl %cl, %ecx
; CHECK-WIDE-NEXT: orl %eax, %ecx
-; CHECK-WIDE-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; CHECK-WIDE-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; CHECK-WIDE-NEXT: vcvttss2si %xmm1, %eax
; CHECK-WIDE-NEXT: shll $8, %eax
; CHECK-WIDE-NEXT: vcvttss2si %xmm0, %edx
diff --git a/test/CodeGen/X86/vec_clear.ll b/test/CodeGen/X86/vec_clear.ll
deleted file mode 100644
index 166d436..0000000
--- a/test/CodeGen/X86/vec_clear.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i386-apple-darwin -o %t
-; RUN: not grep and %t
-; RUN: not grep psrldq %t
-; RUN: grep xorps %t
-
-define <4 x float> @test(<4 x float>* %v1) nounwind {
- %tmp = load <4 x float>* %v1 ; <<4 x float>> [#uses=1]
- %tmp15 = bitcast <4 x float> %tmp to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp24 = and <2 x i64> %tmp15, bitcast (<4 x i32> < i32 0, i32 0, i32 -1, i32 -1 > to <2 x i64>) ; <<2 x i64>> [#uses=1]
- %tmp31 = bitcast <2 x i64> %tmp24 to <4 x float> ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp31
-}
-
diff --git a/test/CodeGen/X86/vec_compare.ll b/test/CodeGen/X86/vec_compare.ll
index 365fe92..df3eae3 100644
--- a/test/CodeGen/X86/vec_compare.ll
+++ b/test/CodeGen/X86/vec_compare.ll
@@ -45,7 +45,7 @@ define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) nounwind {
define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) nounwind {
; CHECK-LABEL: test5:
; CHECK: pcmpeqd
-; CHECK: pshufd $-79
+; CHECK: pshufd $177
; CHECK: pand
; CHECK: ret
%C = icmp eq <2 x i64> %A, %B
@@ -56,7 +56,7 @@ define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) nounwind {
define <2 x i64> @test6(<2 x i64> %A, <2 x i64> %B) nounwind {
; CHECK-LABEL: test6:
; CHECK: pcmpeqd
-; CHECK: pshufd $-79
+; CHECK: pshufd $177
; CHECK: pand
; CHECK: pcmpeqd
; CHECK: pxor
@@ -77,11 +77,11 @@ define <2 x i64> @test7(<2 x i64> %A, <2 x i64> %B) nounwind {
; CHECK: pxor [[CONSTREG]]
; CHECK: pxor [[CONSTREG]]
; CHECK: pcmpgtd %xmm1
-; CHECK: pshufd $-96
+; CHECK: pshufd $160
; CHECK: pcmpeqd
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: pand
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: por
; CHECK: ret
%C = icmp sgt <2 x i64> %A, %B
@@ -94,11 +94,11 @@ define <2 x i64> @test8(<2 x i64> %A, <2 x i64> %B) nounwind {
; CHECK: pxor
; CHECK: pxor
; CHECK: pcmpgtd %xmm0
-; CHECK: pshufd $-96
+; CHECK: pshufd $160
; CHECK: pcmpeqd
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: pand
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: por
; CHECK: ret
%C = icmp slt <2 x i64> %A, %B
@@ -111,11 +111,11 @@ define <2 x i64> @test9(<2 x i64> %A, <2 x i64> %B) nounwind {
; CHECK: pxor
; CHECK: pxor
; CHECK: pcmpgtd %xmm0
-; CHECK: pshufd $-96
+; CHECK: pshufd $160
; CHECK: pcmpeqd
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: pand
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: por
; CHECK: pcmpeqd
; CHECK: pxor
@@ -130,11 +130,11 @@ define <2 x i64> @test10(<2 x i64> %A, <2 x i64> %B) nounwind {
; CHECK: pxor
; CHECK: pxor
; CHECK: pcmpgtd %xmm1
-; CHECK: pshufd $-96
+; CHECK: pshufd $160
; CHECK: pcmpeqd
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: pand
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: por
; CHECK: pcmpeqd
; CHECK: pxor
@@ -155,11 +155,11 @@ define <2 x i64> @test11(<2 x i64> %A, <2 x i64> %B) nounwind {
; CHECK: pxor [[CONSTREG]]
; CHECK: pxor [[CONSTREG]]
; CHECK: pcmpgtd %xmm1
-; CHECK: pshufd $-96
+; CHECK: pshufd $160
; CHECK: pcmpeqd
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: pand
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: por
; CHECK: ret
%C = icmp ugt <2 x i64> %A, %B
@@ -172,11 +172,11 @@ define <2 x i64> @test12(<2 x i64> %A, <2 x i64> %B) nounwind {
; CHECK: pxor
; CHECK: pxor
; CHECK: pcmpgtd %xmm0
-; CHECK: pshufd $-96
+; CHECK: pshufd $160
; CHECK: pcmpeqd
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: pand
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: por
; CHECK: ret
%C = icmp ult <2 x i64> %A, %B
@@ -189,11 +189,11 @@ define <2 x i64> @test13(<2 x i64> %A, <2 x i64> %B) nounwind {
; CHECK: pxor
; CHECK: pxor
; CHECK: pcmpgtd %xmm0
-; CHECK: pshufd $-96
+; CHECK: pshufd $160
; CHECK: pcmpeqd
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: pand
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: por
; CHECK: pcmpeqd
; CHECK: pxor
@@ -208,11 +208,11 @@ define <2 x i64> @test14(<2 x i64> %A, <2 x i64> %B) nounwind {
; CHECK: pxor
; CHECK: pxor
; CHECK: pcmpgtd %xmm1
-; CHECK: pshufd $-96
+; CHECK: pshufd $160
; CHECK: pcmpeqd
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: pand
-; CHECK: pshufd $-11
+; CHECK: pshufd $245
; CHECK: por
; CHECK: pcmpeqd
; CHECK: pxor
diff --git a/test/CodeGen/X86/vec_extract-avx.ll b/test/CodeGen/X86/vec_extract-avx.ll
new file mode 100644
index 0000000..fbb8417
--- /dev/null
+++ b/test/CodeGen/X86/vec_extract-avx.ll
@@ -0,0 +1,82 @@
+target triple = "x86_64-unknown-unknown"
+
+; RUN: llc < %s -march=x86-64 -mattr=+avx | FileCheck %s
+
+; When extracting multiple consecutive elements from a larger
+; vector into a smaller one, do it efficiently. We should use
+; an EXTRACT_SUBVECTOR node internally rather than a bunch of
+; single element extractions.
+
+; Extracting the low elements only requires using the right kind of store.
+define void @low_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
+ %ext0 = extractelement <8 x float> %v, i32 0
+ %ext1 = extractelement <8 x float> %v, i32 1
+ %ext2 = extractelement <8 x float> %v, i32 2
+ %ext3 = extractelement <8 x float> %v, i32 3
+ %ins0 = insertelement <4 x float> undef, float %ext0, i32 0
+ %ins1 = insertelement <4 x float> %ins0, float %ext1, i32 1
+ %ins2 = insertelement <4 x float> %ins1, float %ext2, i32 2
+ %ins3 = insertelement <4 x float> %ins2, float %ext3, i32 3
+ store <4 x float> %ins3, <4 x float>* %ptr, align 16
+ ret void
+
+; CHECK-LABEL: low_v8f32_to_v4f32
+; CHECK: vmovaps
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+}
+
+; Extracting the high elements requires just one AVX instruction.
+define void @high_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
+ %ext0 = extractelement <8 x float> %v, i32 4
+ %ext1 = extractelement <8 x float> %v, i32 5
+ %ext2 = extractelement <8 x float> %v, i32 6
+ %ext3 = extractelement <8 x float> %v, i32 7
+ %ins0 = insertelement <4 x float> undef, float %ext0, i32 0
+ %ins1 = insertelement <4 x float> %ins0, float %ext1, i32 1
+ %ins2 = insertelement <4 x float> %ins1, float %ext2, i32 2
+ %ins3 = insertelement <4 x float> %ins2, float %ext3, i32 3
+ store <4 x float> %ins3, <4 x float>* %ptr, align 16
+ ret void
+
+; CHECK-LABEL: high_v8f32_to_v4f32
+; CHECK: vextractf128
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+}
+
+; Make sure element type doesn't alter the codegen. Note that
+; if we were actually using the vector in this function and
+; have AVX2, we should generate vextracti128 (the int version).
+define void @high_v8i32_to_v4i32(<8 x i32> %v, <4 x i32>* %ptr) {
+ %ext0 = extractelement <8 x i32> %v, i32 4
+ %ext1 = extractelement <8 x i32> %v, i32 5
+ %ext2 = extractelement <8 x i32> %v, i32 6
+ %ext3 = extractelement <8 x i32> %v, i32 7
+ %ins0 = insertelement <4 x i32> undef, i32 %ext0, i32 0
+ %ins1 = insertelement <4 x i32> %ins0, i32 %ext1, i32 1
+ %ins2 = insertelement <4 x i32> %ins1, i32 %ext2, i32 2
+ %ins3 = insertelement <4 x i32> %ins2, i32 %ext3, i32 3
+ store <4 x i32> %ins3, <4 x i32>* %ptr, align 16
+ ret void
+
+; CHECK-LABEL: high_v8i32_to_v4i32
+; CHECK: vextractf128
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+}
+
+; Make sure that element size doesn't alter the codegen.
+define void @high_v4f64_to_v2f64(<4 x double> %v, <2 x double>* %ptr) {
+ %ext0 = extractelement <4 x double> %v, i32 2
+ %ext1 = extractelement <4 x double> %v, i32 3
+ %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
+ %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
+ store <2 x double> %ins1, <2 x double>* %ptr, align 16
+ ret void
+
+; CHECK-LABEL: high_v4f64_to_v2f64
+; CHECK: vextractf128
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+}
diff --git a/test/CodeGen/X86/vec_extract-mmx.ll b/test/CodeGen/X86/vec_extract-mmx.ll
new file mode 100644
index 0000000..c6c93a1
--- /dev/null
+++ b/test/CodeGen/X86/vec_extract-mmx.ll
@@ -0,0 +1,71 @@
+; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck %s
+
+define i32 @test0(<1 x i64>* %v4) {
+; CHECK-LABEL: test0:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: pshufw $238, (%[[REG:[a-z]+]]), %mm0
+; CHECK-NEXT: movd %mm0, %eax
+; CHECK-NEXT: addl $32, %eax
+; CHECK-NEXT: retq
+entry:
+ %v5 = load <1 x i64>* %v4, align 8
+ %v12 = bitcast <1 x i64> %v5 to <4 x i16>
+ %v13 = bitcast <4 x i16> %v12 to x86_mmx
+ %v14 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %v13, i8 -18)
+ %v15 = bitcast x86_mmx %v14 to <4 x i16>
+ %v16 = bitcast <4 x i16> %v15 to <1 x i64>
+ %v17 = extractelement <1 x i64> %v16, i32 0
+ %v18 = bitcast i64 %v17 to <2 x i32>
+ %v19 = extractelement <2 x i32> %v18, i32 0
+ %v20 = add i32 %v19, 32
+ ret i32 %v20
+}
+
+define i32 @test1(i32* nocapture readonly %ptr) {
+; CHECK-LABEL: test1:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: movd (%[[REG]]), %mm0
+; CHECK-NEXT: pshufw $232, %mm0, %mm0
+; CHECK-NEXT: movd %mm0, %eax
+; CHECK-NEXT: emms
+; CHECK-NEXT: retq
+entry:
+ %0 = load i32* %ptr, align 4
+ %1 = insertelement <2 x i32> undef, i32 %0, i32 0
+ %2 = insertelement <2 x i32> %1, i32 0, i32 1
+ %3 = bitcast <2 x i32> %2 to x86_mmx
+ %4 = bitcast x86_mmx %3 to i64
+ %5 = bitcast i64 %4 to <4 x i16>
+ %6 = bitcast <4 x i16> %5 to x86_mmx
+ %7 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %6, i8 -24)
+ %8 = bitcast x86_mmx %7 to <4 x i16>
+ %9 = bitcast <4 x i16> %8 to <1 x i64>
+ %10 = extractelement <1 x i64> %9, i32 0
+ %11 = bitcast i64 %10 to <2 x i32>
+ %12 = extractelement <2 x i32> %11, i32 0
+ tail call void @llvm.x86.mmx.emms()
+ ret i32 %12
+}
+
+define i32 @test2(i32* nocapture readonly %ptr) {
+; CHECK-LABEL: test2:
+; CHECK: # BB#0:{{.*}} %entry
+; CHECK: pshufw $232, (%[[REG]]), %mm0
+; CHECK-NEXT: movd %mm0, %eax
+; CHECK-NEXT: emms
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast i32* %ptr to x86_mmx*
+ %1 = load x86_mmx* %0, align 8
+ %2 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %1, i8 -24)
+ %3 = bitcast x86_mmx %2 to <4 x i16>
+ %4 = bitcast <4 x i16> %3 to <1 x i64>
+ %5 = extractelement <1 x i64> %4, i32 0
+ %6 = bitcast i64 %5 to <2 x i32>
+ %7 = extractelement <2 x i32> %6, i32 0
+ tail call void @llvm.x86.mmx.emms()
+ ret i32 %7
+}
+
+declare x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx, i8)
+declare void @llvm.x86.mmx.emms()
diff --git a/test/CodeGen/X86/vec_fabs.ll b/test/CodeGen/X86/vec_fabs.ll
index ac02acf..bfefbcf 100644
--- a/test/CodeGen/X86/vec_fabs.ll
+++ b/test/CodeGen/X86/vec_fabs.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s
define <2 x double> @fabs_v2f64(<2 x double> %p)
diff --git a/test/CodeGen/X86/vec_fneg.ll b/test/CodeGen/X86/vec_fneg.ll
index 9743f71..a85ae98 100644
--- a/test/CodeGen/X86/vec_fneg.ll
+++ b/test/CodeGen/X86/vec_fneg.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse | FileCheck %s
; FNEG is defined as subtraction from -0.0.
diff --git a/test/CodeGen/X86/vec_insert-5.ll b/test/CodeGen/X86/vec_insert-5.ll
index b72044a..b77a1b5 100644
--- a/test/CodeGen/X86/vec_insert-5.ll
+++ b/test/CodeGen/X86/vec_insert-5.ll
@@ -25,8 +25,8 @@ define <4 x float> @t2(<4 x float>* %P) nounwind {
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movaps (%eax), %xmm1
; CHECK-NEXT: xorps %xmm0, %xmm0
-; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
-; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
+; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; CHECK-NEXT: retl
%tmp1 = load <4 x float>* %P
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 4, i32 4, i32 4, i32 0 >
@@ -37,9 +37,9 @@ define <4 x float> @t3(<4 x float>* %P) nounwind {
; CHECK-LABEL: t3:
; CHECK: # BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movaps (%eax), %xmm0
-; CHECK-NEXT: xorps %xmm1, %xmm1
-; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm1[0,0]
+; CHECK-NEXT: movapd (%eax), %xmm0
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; CHECK-NEXT: retl
%tmp1 = load <4 x float>* %P
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 2, i32 3, i32 4, i32 4 >
@@ -52,8 +52,8 @@ define <4 x float> @t4(<4 x float>* %P) nounwind {
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movaps (%eax), %xmm0
; CHECK-NEXT: xorps %xmm1, %xmm1
-; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[0,0]
-; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,0]
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[1,0]
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
; CHECK-NEXT: retl
%tmp1 = load <4 x float>* %P
%tmp2 = shufflevector <4 x float> zeroinitializer, <4 x float> %tmp1, <4 x i32> < i32 7, i32 0, i32 0, i32 0 >
@@ -63,7 +63,7 @@ define <4 x float> @t4(<4 x float>* %P) nounwind {
define <16 x i8> @t5(<16 x i8> %x) nounwind {
; CHECK-LABEL: t5:
; CHECK: # BB#0:
-; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+; CHECK-NEXT: psrlw $8, %xmm0
; CHECK-NEXT: retl
%s = shufflevector <16 x i8> %x, <16 x i8> zeroinitializer, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 17>
ret <16 x i8> %s
@@ -72,7 +72,7 @@ define <16 x i8> @t5(<16 x i8> %x) nounwind {
define <16 x i8> @t6(<16 x i8> %x) nounwind {
; CHECK-LABEL: t6:
; CHECK: # BB#0:
-; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+; CHECK-NEXT: psrlw $8, %xmm0
; CHECK-NEXT: retl
%s = shufflevector <16 x i8> %x, <16 x i8> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
ret <16 x i8> %s
@@ -86,3 +86,21 @@ define <16 x i8> @t7(<16 x i8> %x) nounwind {
%s = shufflevector <16 x i8> %x, <16 x i8> undef, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 2>
ret <16 x i8> %s
}
+
+define <16 x i8> @t8(<16 x i8> %x) nounwind {
+; CHECK-LABEL: t8:
+; CHECK: # BB#0:
+; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+; CHECK-NEXT: retl
+ %s = shufflevector <16 x i8> %x, <16 x i8> zeroinitializer, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 17>
+ ret <16 x i8> %s
+}
+
+define <16 x i8> @t9(<16 x i8> %x) nounwind {
+; CHECK-LABEL: t9:
+; CHECK: # BB#0:
+; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+; CHECK-NEXT: retl
+ %s = shufflevector <16 x i8> %x, <16 x i8> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 7, i32 8, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 14, i32 undef, i32 undef>
+ ret <16 x i8> %s
+}
diff --git a/test/CodeGen/X86/vec_insert-mmx.ll b/test/CodeGen/X86/vec_insert-mmx.ll
new file mode 100644
index 0000000..d397d80
--- /dev/null
+++ b/test/CodeGen/X86/vec_insert-mmx.ll
@@ -0,0 +1,58 @@
+; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck %s -check-prefix=X86-32
+; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse4.1 | FileCheck %s -check-prefix=X86-64
+
+; This is not an MMX operation; promoted to XMM.
+define x86_mmx @t0(i32 %A) nounwind {
+; X86-32-LABEL: t0:
+; X86-32: ## BB#0:
+; X86-32: movd {{[0-9]+}}(%esp), %xmm0
+; X86-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,0,1]
+; X86-32-NEXT: movlpd %xmm0, (%esp)
+; X86-32-NEXT: movq (%esp), %mm0
+; X86-32-NEXT: addl $12, %esp
+; X86-32-NEXT: retl
+ %tmp3 = insertelement <2 x i32> < i32 0, i32 undef >, i32 %A, i32 1
+ %tmp4 = bitcast <2 x i32> %tmp3 to x86_mmx
+ ret x86_mmx %tmp4
+}
+
+define <8 x i8> @t1(i8 zeroext %x) nounwind {
+; X86-32-LABEL: t1:
+; X86-32: ## BB#0:
+; X86-32-NOT: movl
+; X86-32-NEXT: movd {{[0-9]+}}(%esp), %xmm0
+; X86-32-NEXT: retl
+ %r = insertelement <8 x i8> undef, i8 %x, i32 0
+ ret <8 x i8> %r
+}
+
+; PR2574
+define <2 x float> @t2(<2 x float> %a0) {
+; X86-32-LABEL: t2:
+; X86-32: ## BB#0:
+; X86-32-NEXT: xorps %xmm0, %xmm0
+; X86-32-NEXT: retl
+ %v1 = insertelement <2 x float> %a0, float 0.000000e+00, i32 0
+ %v2 = insertelement <2 x float> %v1, float 0.000000e+00, i32 1
+ ret <2 x float> %v2
+}
+
+@g0 = external global i16
+@g1 = external global <4 x i16>
+
+; PR2562
+define void @t3() {
+; X86-64-LABEL: t3:
+; X86-64: ## BB#0:
+; X86-64: pmovzxwd (%rcx)
+; X86-64-NEXT: movzwl
+; X86-64-NEXT: pinsrd $0
+; X86-64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; X86-64-NEXT: movq %xmm0
+; X86-64-NEXT: retq
+ load i16* @g0
+ load <4 x i16>* @g1
+ insertelement <4 x i16> %2, i16 %1, i32 0
+ store <4 x i16> %3, <4 x i16>* @g1
+ ret void
+}
diff --git a/test/CodeGen/X86/vec_loadsingles.ll b/test/CodeGen/X86/vec_loadsingles.ll
index 8812c4f..fd132a5 100644
--- a/test/CodeGen/X86/vec_loadsingles.ll
+++ b/test/CodeGen/X86/vec_loadsingles.ll
@@ -1,12 +1,145 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movq
-
-define <4 x float> @a(<4 x float> %a, float* nocapture %p) nounwind readonly {
-entry:
- %tmp1 = load float* %p
- %vecins = insertelement <4 x float> undef, float %tmp1, i32 0
- %add.ptr = getelementptr float* %p, i32 1
- %tmp5 = load float* %add.ptr
- %vecins7 = insertelement <4 x float> %vecins, float %tmp5, i32 1
- ret <4 x float> %vecins7
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,-slow-unaligned-mem-32 | FileCheck %s --check-prefix=ALL --check-prefix=FAST32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+slow-unaligned-mem-32 | FileCheck %s --check-prefix=ALL --check-prefix=SLOW32
+
+define <4 x float> @merge_2_floats(float* nocapture %p) nounwind readonly {
+ %tmp1 = load float* %p
+ %vecins = insertelement <4 x float> undef, float %tmp1, i32 0
+ %add.ptr = getelementptr float* %p, i32 1
+ %tmp5 = load float* %add.ptr
+ %vecins7 = insertelement <4 x float> %vecins, float %tmp5, i32 1
+ ret <4 x float> %vecins7
+
+; ALL-LABEL: merge_2_floats
+; ALL: vmovq
+; ALL-NEXT: retq
+}
+
+; Test-case generated due to a crash when trying to treat loading the first
+; two i64s of a <4 x i64> as a load of two i32s.
+define <4 x i64> @merge_2_floats_into_4() {
+ %1 = load i64** undef, align 8
+ %2 = getelementptr inbounds i64* %1, i64 0
+ %3 = load i64* %2
+ %4 = insertelement <4 x i64> undef, i64 %3, i32 0
+ %5 = load i64** undef, align 8
+ %6 = getelementptr inbounds i64* %5, i64 1
+ %7 = load i64* %6
+ %8 = insertelement <4 x i64> %4, i64 %7, i32 1
+ %9 = shufflevector <4 x i64> %8, <4 x i64> undef, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ ret <4 x i64> %9
+
+; ALL-LABEL: merge_2_floats_into_4
+; ALL: vmovups
+; ALL-NEXT: retq
+}
+
+define <4 x float> @merge_4_floats(float* %ptr) {
+ %a = load float* %ptr, align 8
+ %vec = insertelement <4 x float> undef, float %a, i32 0
+ %idx1 = getelementptr inbounds float* %ptr, i64 1
+ %b = load float* %idx1, align 8
+ %vec2 = insertelement <4 x float> %vec, float %b, i32 1
+ %idx3 = getelementptr inbounds float* %ptr, i64 2
+ %c = load float* %idx3, align 8
+ %vec4 = insertelement <4 x float> %vec2, float %c, i32 2
+ %idx5 = getelementptr inbounds float* %ptr, i64 3
+ %d = load float* %idx5, align 8
+ %vec6 = insertelement <4 x float> %vec4, float %d, i32 3
+ ret <4 x float> %vec6
+
+; ALL-LABEL: merge_4_floats
+; ALL: vmovups
+; ALL-NEXT: retq
+}
+
+; PR21710 ( http://llvm.org/bugs/show_bug.cgi?id=21710 )
+; Make sure that 32-byte vectors are handled efficiently.
+; If the target has slow 32-byte accesses, we should still generate
+; 16-byte loads.
+
+define <8 x float> @merge_8_floats(float* %ptr) {
+ %a = load float* %ptr, align 4
+ %vec = insertelement <8 x float> undef, float %a, i32 0
+ %idx1 = getelementptr inbounds float* %ptr, i64 1
+ %b = load float* %idx1, align 4
+ %vec2 = insertelement <8 x float> %vec, float %b, i32 1
+ %idx3 = getelementptr inbounds float* %ptr, i64 2
+ %c = load float* %idx3, align 4
+ %vec4 = insertelement <8 x float> %vec2, float %c, i32 2
+ %idx5 = getelementptr inbounds float* %ptr, i64 3
+ %d = load float* %idx5, align 4
+ %vec6 = insertelement <8 x float> %vec4, float %d, i32 3
+ %idx7 = getelementptr inbounds float* %ptr, i64 4
+ %e = load float* %idx7, align 4
+ %vec8 = insertelement <8 x float> %vec6, float %e, i32 4
+ %idx9 = getelementptr inbounds float* %ptr, i64 5
+ %f = load float* %idx9, align 4
+ %vec10 = insertelement <8 x float> %vec8, float %f, i32 5
+ %idx11 = getelementptr inbounds float* %ptr, i64 6
+ %g = load float* %idx11, align 4
+ %vec12 = insertelement <8 x float> %vec10, float %g, i32 6
+ %idx13 = getelementptr inbounds float* %ptr, i64 7
+ %h = load float* %idx13, align 4
+ %vec14 = insertelement <8 x float> %vec12, float %h, i32 7
+ ret <8 x float> %vec14
+
+; ALL-LABEL: merge_8_floats
+
+; FAST32: vmovups
+; FAST32-NEXT: retq
+
+; SLOW32: vmovups
+; SLOW32-NEXT: vinsertf128
+; SLOW32-NEXT: retq
+}
+
+define <4 x double> @merge_4_doubles(double* %ptr) {
+ %a = load double* %ptr, align 8
+ %vec = insertelement <4 x double> undef, double %a, i32 0
+ %idx1 = getelementptr inbounds double* %ptr, i64 1
+ %b = load double* %idx1, align 8
+ %vec2 = insertelement <4 x double> %vec, double %b, i32 1
+ %idx3 = getelementptr inbounds double* %ptr, i64 2
+ %c = load double* %idx3, align 8
+ %vec4 = insertelement <4 x double> %vec2, double %c, i32 2
+ %idx5 = getelementptr inbounds double* %ptr, i64 3
+ %d = load double* %idx5, align 8
+ %vec6 = insertelement <4 x double> %vec4, double %d, i32 3
+ ret <4 x double> %vec6
+
+; ALL-LABEL: merge_4_doubles
+; FAST32: vmovups
+; FAST32-NEXT: retq
+
+; SLOW32: vmovups
+; SLOW32-NEXT: vinsertf128
+; SLOW32-NEXT: retq
+}
+
+; PR21771 ( http://llvm.org/bugs/show_bug.cgi?id=21771 )
+; Recognize and combine consecutive loads even when the
+; first of the combined loads is offset from the base address.
+define <4 x double> @merge_4_doubles_offset(double* %ptr) {
+ %arrayidx4 = getelementptr inbounds double* %ptr, i64 4
+ %arrayidx5 = getelementptr inbounds double* %ptr, i64 5
+ %arrayidx6 = getelementptr inbounds double* %ptr, i64 6
+ %arrayidx7 = getelementptr inbounds double* %ptr, i64 7
+ %e = load double* %arrayidx4, align 8
+ %f = load double* %arrayidx5, align 8
+ %g = load double* %arrayidx6, align 8
+ %h = load double* %arrayidx7, align 8
+ %vecinit4 = insertelement <4 x double> undef, double %e, i32 0
+ %vecinit5 = insertelement <4 x double> %vecinit4, double %f, i32 1
+ %vecinit6 = insertelement <4 x double> %vecinit5, double %g, i32 2
+ %vecinit7 = insertelement <4 x double> %vecinit6, double %h, i32 3
+ ret <4 x double> %vecinit7
+
+; ALL-LABEL: merge_4_doubles_offset
+; FAST32: vmovups
+; FAST32-NEXT: retq
+
+; SLOW32: vmovups
+; SLOW32-NEXT: vinsertf128
+; SLOW32-NEXT: retq
}
diff --git a/test/CodeGen/X86/vec_split.ll b/test/CodeGen/X86/vec_split.ll
index bc2c663..1df4cf2 100644
--- a/test/CodeGen/X86/vec_split.ll
+++ b/test/CodeGen/X86/vec_split.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=x86-64 -mcpu=corei7 < %s | FileCheck %s -check-prefix=SSE4
-; RUN: llc -march=x86-64 -mcpu=corei7-avx < %s | FileCheck %s -check-prefix=AVX1
-; RUN: llc -march=x86-64 -mcpu=core-avx2 < %s | FileCheck %s -check-prefix=AVX2
+; RUN: llc -march=x86-64 -mattr=sse4.1 < %s | FileCheck %s -check-prefix=SSE4
+; RUN: llc -march=x86-64 -mattr=avx < %s | FileCheck %s -check-prefix=AVX1
+; RUN: llc -march=x86-64 -mattr=avx2 < %s | FileCheck %s -check-prefix=AVX2
define <16 x i16> @split16(<16 x i16> %a, <16 x i16> %b, <16 x i8> %__mask) {
; SSE4-LABEL: split16:
diff --git a/test/CodeGen/X86/vector-blend.ll b/test/CodeGen/X86/vector-blend.ll
index 0a3ed7e..e15daaa 100644
--- a/test/CodeGen/X86/vector-blend.ll
+++ b/test/CodeGen/X86/vector-blend.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
@@ -9,16 +9,14 @@
define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) {
; SSE2-LABEL: vsel_float:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: andps {{.*}}(%rip), %xmm1
-; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
-; SSE2-NEXT: orps %xmm1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_float:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: andps {{.*}}(%rip), %xmm1
-; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
-; SSSE3-NEXT: orps %xmm1, %xmm0
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_float:
@@ -36,15 +34,26 @@ entry:
}
define <4 x float> @vsel_float2(<4 x float> %v1, <4 x float> %v2) {
-; SSE-LABEL: vsel_float2:
-; SSE: # BB#0: # %entry
-; SSE-NEXT: movss %xmm0, %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: vsel_float2:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: vsel_float2:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: vsel_float2:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_float2:
; AVX: # BB#0: # %entry
-; AVX-NEXT: vmovss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: retq
entry:
%vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %v1, <4 x float> %v2
@@ -54,16 +63,14 @@ entry:
define <4 x i8> @vsel_4xi8(<4 x i8> %v1, <4 x i8> %v2) {
; SSE2-LABEL: vsel_4xi8:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: andps {{.*}}(%rip), %xmm1
-; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
-; SSE2-NEXT: orps %xmm1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_4xi8:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: andps {{.*}}(%rip), %xmm1
-; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
-; SSSE3-NEXT: orps %xmm1, %xmm0
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_4xi8:
@@ -88,16 +95,16 @@ entry:
define <4 x i16> @vsel_4xi16(<4 x i16> %v1, <4 x i16> %v2) {
; SSE2-LABEL: vsel_4xi16:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: andps {{.*}}(%rip), %xmm1
-; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
-; SSE2-NEXT: orps %xmm1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_4xi16:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: andps {{.*}}(%rip), %xmm1
-; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
-; SSSE3-NEXT: orps %xmm1, %xmm0
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
+; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_4xi16:
@@ -122,16 +129,16 @@ entry:
define <4 x i32> @vsel_i32(<4 x i32> %v1, <4 x i32> %v2) {
; SSE2-LABEL: vsel_i32:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: andps {{.*}}(%rip), %xmm1
-; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
-; SSE2-NEXT: orps %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i32:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: andps {{.*}}(%rip), %xmm1
-; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
-; SSSE3-NEXT: orps %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i32:
@@ -154,15 +161,26 @@ entry:
}
define <2 x double> @vsel_double(<2 x double> %v1, <2 x double> %v2) {
-; SSE-LABEL: vsel_double:
-; SSE: # BB#0: # %entry
-; SSE-NEXT: movsd %xmm0, %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: vsel_double:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: vsel_double:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSSE3-NEXT: movapd %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: vsel_double:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_double:
; AVX: # BB#0: # %entry
-; AVX-NEXT: vmovsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: retq
entry:
%vsel = select <2 x i1> <i1 true, i1 false>, <2 x double> %v1, <2 x double> %v2
@@ -170,16 +188,32 @@ entry:
}
define <2 x i64> @vsel_i64(<2 x i64> %v1, <2 x i64> %v2) {
-; SSE-LABEL: vsel_i64:
-; SSE: # BB#0: # %entry
-; SSE-NEXT: movsd %xmm0, %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: vsel_i64:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: retq
;
-; AVX-LABEL: vsel_i64:
-; AVX: # BB#0: # %entry
-; AVX-NEXT: vmovsd %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
+; SSSE3-LABEL: vsel_i64:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSSE3-NEXT: movapd %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: vsel_i64:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: vsel_i64:
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: vsel_i64:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-NEXT: retq
entry:
%vsel = select <2 x i1> <i1 true, i1 false>, <2 x i64> %v1, <2 x i64> %v2
ret <2 x i64> %vsel
@@ -188,16 +222,20 @@ entry:
define <8 x i16> @vsel_8xi16(<8 x i16> %v1, <8 x i16> %v2) {
; SSE2-LABEL: vsel_8xi16:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: andps {{.*}}(%rip), %xmm1
-; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
-; SSE2-NEXT: orps %xmm1, %xmm0
+; SSE2-NEXT: movaps {{.*#+}} xmm2 = [0,65535,65535,65535,0,65535,65535,65535]
+; SSE2-NEXT: andps %xmm2, %xmm1
+; SSE2-NEXT: andnps %xmm0, %xmm2
+; SSE2-NEXT: orps %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_8xi16:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: andps {{.*}}(%rip), %xmm1
-; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
-; SSSE3-NEXT: orps %xmm1, %xmm0
+; SSSE3-NEXT: movaps {{.*#+}} xmm2 = [0,65535,65535,65535,0,65535,65535,65535]
+; SSSE3-NEXT: andps %xmm2, %xmm1
+; SSSE3-NEXT: andnps %xmm0, %xmm2
+; SSSE3-NEXT: orps %xmm1, %xmm2
+; SSSE3-NEXT: movaps %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_8xi16:
@@ -217,29 +255,30 @@ entry:
define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) {
; SSE2-LABEL: vsel_i8:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: andps {{.*}}(%rip), %xmm1
-; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
-; SSE2-NEXT: orps %xmm1, %xmm0
+; SSE2-NEXT: movaps {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,255,255,255,255,255,255,255]
+; SSE2-NEXT: andps %xmm2, %xmm0
+; SSE2-NEXT: andnps %xmm1, %xmm2
+; SSE2-NEXT: orps %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i8:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: andps {{.*}}(%rip), %xmm1
-; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
-; SSSE3-NEXT: orps %xmm1, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,xmm1[1,2,3],zero,xmm1[5,6,7],zero,zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[8,9,10,11,12,13,14,15]
+; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i8:
; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,255,255,255,255,255,255,255]
; SSE41-NEXT: pblendvb %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_i8:
; AVX: # BB#0: # %entry
-; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,255,255,255,255,255,255,255]
; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
entry:
@@ -251,13 +290,27 @@ entry:
; AVX256 tests:
define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) {
-; SSE-LABEL: vsel_float8:
-; SSE: # BB#0: # %entry
-; SSE-NEXT: movss %xmm0, %xmm2
-; SSE-NEXT: movss %xmm1, %xmm3
-; SSE-NEXT: movaps %xmm2, %xmm0
-; SSE-NEXT: movaps %xmm3, %xmm1
-; SSE-NEXT: retq
+; SSE2-LABEL: vsel_float8:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
+; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm3, %xmm1
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: vsel_float8:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
+; SSSE3-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
+; SSSE3-NEXT: movaps %xmm2, %xmm0
+; SSSE3-NEXT: movaps %xmm3, %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: vsel_float8:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3]
+; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_float8:
; AVX: # BB#0: # %entry
@@ -269,13 +322,27 @@ entry:
}
define <8 x i32> @vsel_i328(<8 x i32> %v1, <8 x i32> %v2) {
-; SSE-LABEL: vsel_i328:
-; SSE: # BB#0: # %entry
-; SSE-NEXT: movss %xmm0, %xmm2
-; SSE-NEXT: movss %xmm1, %xmm3
-; SSE-NEXT: movaps %xmm2, %xmm0
-; SSE-NEXT: movaps %xmm3, %xmm1
-; SSE-NEXT: retq
+; SSE2-LABEL: vsel_i328:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
+; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm3, %xmm1
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: vsel_i328:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
+; SSSE3-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
+; SSSE3-NEXT: movaps %xmm2, %xmm0
+; SSSE3-NEXT: movaps %xmm3, %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: vsel_i328:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3,4,5,6,7]
+; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_i328:
; AVX1: # BB#0: # %entry
@@ -294,21 +361,21 @@ entry:
define <8 x double> @vsel_double8(<8 x double> %v1, <8 x double> %v2) {
; SSE2-LABEL: vsel_double8:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movsd %xmm0, %xmm4
-; SSE2-NEXT: movsd %xmm2, %xmm6
-; SSE2-NEXT: movaps %xmm4, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
+; SSE2-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
+; SSE2-NEXT: movapd %xmm4, %xmm0
; SSE2-NEXT: movaps %xmm5, %xmm1
-; SSE2-NEXT: movaps %xmm6, %xmm2
+; SSE2-NEXT: movapd %xmm6, %xmm2
; SSE2-NEXT: movaps %xmm7, %xmm3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_double8:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movsd %xmm0, %xmm4
-; SSSE3-NEXT: movsd %xmm2, %xmm6
-; SSSE3-NEXT: movaps %xmm4, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
+; SSSE3-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
+; SSSE3-NEXT: movapd %xmm4, %xmm0
; SSSE3-NEXT: movaps %xmm5, %xmm1
-; SSSE3-NEXT: movaps %xmm6, %xmm2
+; SSSE3-NEXT: movapd %xmm6, %xmm2
; SSSE3-NEXT: movaps %xmm7, %xmm3
; SSSE3-NEXT: retq
;
@@ -333,21 +400,21 @@ entry:
define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) {
; SSE2-LABEL: vsel_i648:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movsd %xmm0, %xmm4
-; SSE2-NEXT: movsd %xmm2, %xmm6
-; SSE2-NEXT: movaps %xmm4, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
+; SSE2-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
+; SSE2-NEXT: movapd %xmm4, %xmm0
; SSE2-NEXT: movaps %xmm5, %xmm1
-; SSE2-NEXT: movaps %xmm6, %xmm2
+; SSE2-NEXT: movapd %xmm6, %xmm2
; SSE2-NEXT: movaps %xmm7, %xmm3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i648:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movsd %xmm0, %xmm4
-; SSSE3-NEXT: movsd %xmm2, %xmm6
-; SSSE3-NEXT: movaps %xmm4, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
+; SSSE3-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
+; SSSE3-NEXT: movapd %xmm4, %xmm0
; SSSE3-NEXT: movaps %xmm5, %xmm1
-; SSSE3-NEXT: movaps %xmm6, %xmm2
+; SSSE3-NEXT: movapd %xmm6, %xmm2
; SSSE3-NEXT: movaps %xmm7, %xmm3
; SSSE3-NEXT: retq
;
@@ -376,13 +443,27 @@ entry:
}
define <4 x double> @vsel_double4(<4 x double> %v1, <4 x double> %v2) {
-; SSE-LABEL: vsel_double4:
-; SSE: # BB#0: # %entry
-; SSE-NEXT: movsd %xmm0, %xmm2
-; SSE-NEXT: movsd %xmm1, %xmm3
-; SSE-NEXT: movaps %xmm2, %xmm0
-; SSE-NEXT: movaps %xmm3, %xmm1
-; SSE-NEXT: retq
+; SSE2-LABEL: vsel_double4:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
+; SSE2-NEXT: movapd %xmm2, %xmm0
+; SSE2-NEXT: movapd %xmm3, %xmm1
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: vsel_double4:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSSE3-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
+; SSSE3-NEXT: movapd %xmm2, %xmm0
+; SSSE3-NEXT: movapd %xmm3, %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: vsel_double4:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm2[1]
+; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm1[0],xmm3[1]
+; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_double4:
; AVX: # BB#0: # %entry
@@ -474,12 +555,25 @@ entry:
; If we can figure out a blend has a constant mask, we should emit the
; blend instruction with an immediate mask
define <4 x double> @constant_blendvpd_avx(<4 x double> %xy, <4 x double> %ab) {
-; SSE-LABEL: constant_blendvpd_avx:
-; SSE: # BB#0: # %entry
-; SSE-NEXT: movsd %xmm1, %xmm3
-; SSE-NEXT: movaps %xmm2, %xmm0
-; SSE-NEXT: movaps %xmm3, %xmm1
-; SSE-NEXT: retq
+; SSE2-LABEL: constant_blendvpd_avx:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: movapd %xmm3, %xmm1
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: constant_blendvpd_avx:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
+; SSSE3-NEXT: movaps %xmm2, %xmm0
+; SSSE3-NEXT: movapd %xmm3, %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: constant_blendvpd_avx:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm1[0],xmm3[1]
+; SSE41-NEXT: movaps %xmm2, %xmm0
+; SSE41-NEXT: retq
;
; AVX-LABEL: constant_blendvpd_avx:
; AVX: # BB#0: # %entry
@@ -493,26 +587,22 @@ entry:
define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd) {
; SSE2-LABEL: constant_blendvps_avx:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movaps {{.*#+}} xmm4 = [4294967295,4294967295,4294967295,0]
-; SSE2-NEXT: andps %xmm4, %xmm2
-; SSE2-NEXT: movaps {{.*#+}} xmm5 = [0,0,0,4294967295]
-; SSE2-NEXT: andps %xmm5, %xmm0
-; SSE2-NEXT: orps %xmm2, %xmm0
-; SSE2-NEXT: andps %xmm4, %xmm3
-; SSE2-NEXT: andps %xmm5, %xmm1
-; SSE2-NEXT: orps %xmm3, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm2[2,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm3[2,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,0]
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: constant_blendvps_avx:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movaps {{.*#+}} xmm4 = [4294967295,4294967295,4294967295,0]
-; SSSE3-NEXT: andps %xmm4, %xmm2
-; SSSE3-NEXT: movaps {{.*#+}} xmm5 = [0,0,0,4294967295]
-; SSSE3-NEXT: andps %xmm5, %xmm0
-; SSSE3-NEXT: orps %xmm2, %xmm0
-; SSSE3-NEXT: andps %xmm4, %xmm3
-; SSSE3-NEXT: andps %xmm5, %xmm1
-; SSSE3-NEXT: orps %xmm3, %xmm1
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm2[2,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm3[2,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,0]
+; SSSE3-NEXT: movaps %xmm2, %xmm0
+; SSSE3-NEXT: movaps %xmm3, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: constant_blendvps_avx:
@@ -533,32 +623,32 @@ entry:
define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) {
; SSE2-LABEL: constant_pblendvb_avx2:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movaps {{.*#+}} xmm4 = [255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255]
-; SSE2-NEXT: andps %xmm4, %xmm2
-; SSE2-NEXT: movaps {{.*#+}} xmm5 = [0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0]
-; SSE2-NEXT: andps %xmm5, %xmm0
-; SSE2-NEXT: orps %xmm2, %xmm0
-; SSE2-NEXT: andps %xmm4, %xmm3
-; SSE2-NEXT: andps %xmm5, %xmm1
-; SSE2-NEXT: orps %xmm3, %xmm1
+; SSE2-NEXT: movaps {{.*#+}} xmm4 = [0,0,255,0,255,255,255,0,255,255,255,255,255,255,255,255]
+; SSE2-NEXT: movaps %xmm4, %xmm5
+; SSE2-NEXT: andnps %xmm2, %xmm5
+; SSE2-NEXT: andps %xmm4, %xmm0
+; SSE2-NEXT: orps %xmm5, %xmm0
+; SSE2-NEXT: andps %xmm4, %xmm1
+; SSE2-NEXT: andnps %xmm3, %xmm4
+; SSE2-NEXT: orps %xmm4, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: constant_pblendvb_avx2:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movaps {{.*#+}} xmm4 = [255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255]
-; SSSE3-NEXT: andps %xmm4, %xmm2
-; SSSE3-NEXT: movaps {{.*#+}} xmm5 = [0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0]
-; SSSE3-NEXT: andps %xmm5, %xmm0
-; SSSE3-NEXT: orps %xmm2, %xmm0
-; SSSE3-NEXT: andps %xmm4, %xmm3
-; SSSE3-NEXT: andps %xmm5, %xmm1
-; SSSE3-NEXT: orps %xmm3, %xmm1
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,1,128,3,128,128,128,7,128,128,128,128,128,128,128,128]
+; SSSE3-NEXT: pshufb %xmm4, %xmm2
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [128,128,2,128,4,5,6,128,8,9,10,11,12,13,14,15]
+; SSSE3-NEXT: pshufb %xmm5, %xmm0
+; SSSE3-NEXT: por %xmm2, %xmm0
+; SSSE3-NEXT: pshufb %xmm4, %xmm3
+; SSSE3-NEXT: pshufb %xmm5, %xmm1
+; SSSE3-NEXT: por %xmm3, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: constant_pblendvb_avx2:
; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0]
+; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,255,255,255,0,255,255,255,255,255,255,255,255]
; SSE41-NEXT: pblendvb %xmm4, %xmm2
; SSE41-NEXT: pblendvb %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm0
@@ -567,14 +657,15 @@ define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) {
;
; AVX1-LABEL: constant_pblendvb_avx2:
; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
-; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,255,0,255,255,255,0,255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_pblendvb_avx2:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,0,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
entry:
@@ -616,7 +707,7 @@ entry:
define <8 x float> @blend_shufflevector_8xfloat(<8 x float> %a, <8 x float> %b) {
; SSE2-LABEL: blend_shufflevector_8xfloat:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movss %xmm0, %xmm2
+; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[0,2]
; SSE2-NEXT: movaps %xmm2, %xmm0
@@ -625,7 +716,7 @@ define <8 x float> @blend_shufflevector_8xfloat(<8 x float> %a, <8 x float> %b)
;
; SSSE3-LABEL: blend_shufflevector_8xfloat:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movss %xmm0, %xmm2
+; SSSE3-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[0,2]
; SSSE3-NEXT: movaps %xmm2, %xmm0
@@ -650,14 +741,14 @@ entry:
define <4 x double> @blend_shufflevector_4xdouble(<4 x double> %a, <4 x double> %b) {
; SSE2-LABEL: blend_shufflevector_4xdouble:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movsd %xmm0, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT: movapd %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_shufflevector_4xdouble:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movsd %xmm0, %xmm2
-; SSSE3-NEXT: movaps %xmm2, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSSE3-NEXT: movapd %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_shufflevector_4xdouble:
@@ -677,13 +768,13 @@ entry:
define <4 x i64> @blend_shufflevector_4xi64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: blend_shufflevector_4xi64:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movsd %xmm2, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; SSE2-NEXT: movaps %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_shufflevector_4xi64:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movsd %xmm2, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; SSSE3-NEXT: movaps %xmm3, %xmm1
; SSSE3-NEXT: retq
;
diff --git a/test/CodeGen/X86/vector-ctpop.ll b/test/CodeGen/X86/vector-ctpop.ll
new file mode 100644
index 0000000..59d6792
--- /dev/null
+++ b/test/CodeGen/X86/vector-ctpop.ll
@@ -0,0 +1,159 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx2 | FileCheck -check-prefix=AVX2 %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx -mattr=-popcnt | FileCheck -check-prefix=AVX1-NOPOPCNT %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx2 -mattr=-popcnt | FileCheck -check-prefix=AVX2-NOPOPCNT %s
+
+; Vector version of:
+; v = v - ((v >> 1) & 0x55555555)
+; v = (v & 0x33333333) + ((v >> 2) & 0x33333333)
+; v = (v + (v >> 4) & 0xF0F0F0F)
+; v = v + (v >> 8)
+; v = v + (v >> 16)
+; v = v + (v >> 32) ; i64 only
+
+define <8 x i32> @test0(<8 x i32> %x) {
+; AVX2-LABEL: @test0
+entry:
+; AVX2: vpsrld $1, %ymm
+; AVX2-NEXT: vpbroadcastd
+; AVX2-NEXT: vpand
+; AVX2-NEXT: vpsubd
+; AVX2-NEXT: vpbroadcastd
+; AVX2-NEXT: vpand
+; AVX2-NEXT: vpsrld $2
+; AVX2-NEXT: vpand
+; AVX2-NEXT: vpaddd
+; AVX2-NEXT: vpsrld $4
+; AVX2-NEXT: vpaddd
+; AVX2-NEXT: vpbroadcastd
+; AVX2-NEXT: vpand
+; AVX2-NEXT: vpsrld $8
+; AVX2-NEXT: vpaddd
+; AVX2-NEXT: vpsrld $16
+; AVX2-NEXT: vpaddd
+; AVX2-NEXT: vpbroadcastd
+; AVX2-NEXT: vpand
+ %y = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %x)
+ ret <8 x i32> %y
+}
+
+define <4 x i64> @test1(<4 x i64> %x) {
+; AVX2-NOPOPCNT-LABEL: @test1
+entry:
+; AVX2-NOPOPCNT: vpsrlq $1, %ymm
+; AVX2-NOPOPCNT-NEXT: vpbroadcastq
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX2-NOPOPCNT-NEXT: vpsubq
+; AVX2-NOPOPCNT-NEXT: vpbroadcastq
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX2-NOPOPCNT-NEXT: vpsrlq $2
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX2-NOPOPCNT-NEXT: vpaddq
+; AVX2-NOPOPCNT-NEXT: vpsrlq $4
+; AVX2-NOPOPCNT-NEXT: vpaddq
+; AVX2-NOPOPCNT-NEXT: vpbroadcastq
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX2-NOPOPCNT-NEXT: vpsrlq $8
+; AVX2-NOPOPCNT-NEXT: vpaddq
+; AVX2-NOPOPCNT-NEXT: vpsrlq $16
+; AVX2-NOPOPCNT-NEXT: vpaddq
+; AVX2-NOPOPCNT-NEXT: vpsrlq $32
+; AVX2-NOPOPCNT-NEXT: vpaddq
+; AVX2-NOPOPCNT-NEXT: vpbroadcastq
+; AVX2-NOPOPCNT-NEXT: vpand
+ %y = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %x)
+ ret <4 x i64> %y
+}
+
+define <4 x i32> @test2(<4 x i32> %x) {
+; AVX2-NOPOPCNT-LABEL: @test2
+; AVX1-NOPOPCNT-LABEL: @test2
+entry:
+; AVX2-NOPOPCNT: vpsrld $1, %xmm
+; AVX2-NOPOPCNT-NEXT: vpbroadcastd
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX2-NOPOPCNT-NEXT: vpsubd
+; AVX2-NOPOPCNT-NEXT: vpbroadcastd
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX2-NOPOPCNT-NEXT: vpsrld $2
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX2-NOPOPCNT-NEXT: vpaddd
+; AVX2-NOPOPCNT-NEXT: vpsrld $4
+; AVX2-NOPOPCNT-NEXT: vpaddd
+; AVX2-NOPOPCNT-NEXT: vpbroadcastd
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX2-NOPOPCNT-NEXT: vpsrld $8
+; AVX2-NOPOPCNT-NEXT: vpaddd
+; AVX2-NOPOPCNT-NEXT: vpsrld $16
+; AVX2-NOPOPCNT-NEXT: vpaddd
+; AVX2-NOPOPCNT-NEXT: vpbroadcastd
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX1-NOPOPCNT: vpsrld $1, %xmm
+; AVX1-NOPOPCNT-NEXT: vpand
+; AVX1-NOPOPCNT-NEXT: vpsubd
+; AVX1-NOPOPCNT-NEXT: vmovdqa
+; AVX1-NOPOPCNT-NEXT: vpand
+; AVX1-NOPOPCNT-NEXT: vpsrld $2
+; AVX1-NOPOPCNT-NEXT: vpand
+; AVX1-NOPOPCNT-NEXT: vpaddd
+; AVX1-NOPOPCNT-NEXT: vpsrld $4
+; AVX1-NOPOPCNT-NEXT: vpaddd
+; AVX1-NOPOPCNT-NEXT: vpand
+; AVX1-NOPOPCNT-NEXT: vpsrld $8
+; AVX1-NOPOPCNT-NEXT: vpaddd
+; AVX1-NOPOPCNT-NEXT: vpsrld $16
+; AVX1-NOPOPCNT-NEXT: vpaddd
+; AVX1-NOPOPCNT-NEXT: vpand
+ %y = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %x)
+ ret <4 x i32> %y
+}
+
+define <2 x i64> @test3(<2 x i64> %x) {
+; AVX2-NOPOPCNT-LABEL: @test3
+; AVX1-NOPOPCNT-LABEL: @test3
+entry:
+; AVX2-NOPOPCNT: vpsrlq $1, %xmm
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX2-NOPOPCNT-NEXT: vpsubq
+; AVX2-NOPOPCNT-NEXT: vmovdqa
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX2-NOPOPCNT-NEXT: vpsrlq $2
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX2-NOPOPCNT-NEXT: vpaddq
+; AVX2-NOPOPCNT-NEXT: vpsrlq $4
+; AVX2-NOPOPCNT-NEXT: vpaddq
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX2-NOPOPCNT-NEXT: vpsrlq $8
+; AVX2-NOPOPCNT-NEXT: vpaddq
+; AVX2-NOPOPCNT-NEXT: vpsrlq $16
+; AVX2-NOPOPCNT-NEXT: vpaddq
+; AVX2-NOPOPCNT-NEXT: vpsrlq $32
+; AVX2-NOPOPCNT-NEXT: vpaddq
+; AVX2-NOPOPCNT-NEXT: vpand
+; AVX1-NOPOPCNT: vpsrlq $1, %xmm
+; AVX1-NOPOPCNT-NEXT: vpand
+; AVX1-NOPOPCNT-NEXT: vpsubq
+; AVX1-NOPOPCNT-NEXT: vmovdqa
+; AVX1-NOPOPCNT-NEXT: vpand
+; AVX1-NOPOPCNT-NEXT: vpsrlq $2
+; AVX1-NOPOPCNT-NEXT: vpand
+; AVX1-NOPOPCNT-NEXT: vpaddq
+; AVX1-NOPOPCNT-NEXT: vpsrlq $4
+; AVX1-NOPOPCNT-NEXT: vpaddq
+; AVX1-NOPOPCNT-NEXT: vpand
+; AVX1-NOPOPCNT-NEXT: vpsrlq $8
+; AVX1-NOPOPCNT-NEXT: vpaddq
+; AVX1-NOPOPCNT-NEXT: vpsrlq $16
+; AVX1-NOPOPCNT-NEXT: vpaddq
+; AVX1-NOPOPCNT-NEXT: vpsrlq $32
+; AVX1-NOPOPCNT-NEXT: vpaddq
+; AVX1-NOPOPCNT-NEXT: vpand
+ %y = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %x)
+ ret <2 x i64> %y
+}
+
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
+
+declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>)
+declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>)
+
diff --git a/test/CodeGen/X86/vector-idiv.ll b/test/CodeGen/X86/vector-idiv.ll
index 4b269dc..06ce543 100644
--- a/test/CodeGen/X86/vector-idiv.ll
+++ b/test/CodeGen/X86/vector-idiv.ll
@@ -8,16 +8,15 @@ define <4 x i32> @test1(<4 x i32> %a) {
; SSE41-LABEL: test1:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: pmuludq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuludq %xmm1, %xmm3
-; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
-; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
-; SSE41-NEXT: psubd %xmm2, %xmm0
+; SSE41-NEXT: pmuludq %xmm2, %xmm3
+; SSE41-NEXT: pmuludq %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT: psubd %xmm1, %xmm0
; SSE41-NEXT: psrld $1, %xmm0
-; SSE41-NEXT: paddd %xmm2, %xmm0
+; SSE41-NEXT: paddd %xmm1, %xmm0
; SSE41-NEXT: psrld $2, %xmm0
; SSE41-NEXT: retq
;
@@ -26,11 +25,12 @@ define <4 x i32> @test1(<4 x i32> %a) {
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE-NEXT: pmuludq %xmm1, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: psubd %xmm2, %xmm0
; SSE-NEXT: psrld $1, %xmm0
; SSE-NEXT: paddd %xmm2, %xmm0
@@ -40,12 +40,12 @@ define <4 x i32> @test1(<4 x i32> %a) {
; AVX-LABEL: test1:
; AVX: # BB#0:
; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
-; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
+; AVX-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
+; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
@@ -59,22 +59,22 @@ define <8 x i32> @test2(<8 x i32> %a) {
; SSE41-LABEL: test2:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757]
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: pmuludq %xmm2, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuludq %xmm4, %xmm5
-; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3]
-; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3]
-; SSE41-NEXT: psubd %xmm3, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pmuludq %xmm3, %xmm4
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pmuludq %xmm2, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7]
+; SSE41-NEXT: psubd %xmm5, %xmm0
; SSE41-NEXT: psrld $1, %xmm0
-; SSE41-NEXT: paddd %xmm3, %xmm0
+; SSE41-NEXT: paddd %xmm5, %xmm0
; SSE41-NEXT: psrld $2, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; SSE41-NEXT: pmuludq %xmm3, %xmm4
; SSE41-NEXT: pmuludq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; SSE41-NEXT: pmuludq %xmm4, %xmm3
-; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
-; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
; SSE41-NEXT: psubd %xmm2, %xmm1
; SSE41-NEXT: psrld $1, %xmm1
; SSE41-NEXT: paddd %xmm2, %xmm1
@@ -86,20 +86,22 @@ define <8 x i32> @test2(<8 x i32> %a) {
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; SSE-NEXT: pmuludq %xmm4, %xmm5
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
; SSE-NEXT: psubd %xmm3, %xmm0
; SSE-NEXT: psrld $1, %xmm0
; SSE-NEXT: paddd %xmm3, %xmm0
; SSE-NEXT: psrld $2, %xmm0
; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
; SSE-NEXT: pmuludq %xmm4, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE-NEXT: psubd %xmm2, %xmm1
; SSE-NEXT: psrld $1, %xmm1
; SSE-NEXT: paddd %xmm2, %xmm1
@@ -822,14 +824,13 @@ define <16 x i8> @test7(<16 x i8> %a) {
define <4 x i32> @test8(<4 x i32> %a) {
; SSE41-LABEL: test8:
; SSE41: # BB#0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pmuldq %xmm2, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE41-NEXT: pmuldq %xmm2, %xmm3
-; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
-; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
+; SSE41-NEXT: pmuldq %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
; SSE41-NEXT: paddd %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: psrld $31, %xmm0
@@ -840,22 +841,22 @@ define <4 x i32> @test8(<4 x i32> %a) {
;
; SSE-LABEL: test8:
; SSE: # BB#0:
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
-; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrad $31, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: pmuludq %xmm1, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psrad $31, %xmm3
-; SSE-NEXT: pand %xmm2, %xmm3
-; SSE-NEXT: paddd %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pmuludq %xmm2, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm2, %xmm4
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm4[1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
-; SSE-NEXT: psubd %xmm3, %xmm1
+; SSE-NEXT: paddd %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm4, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE-NEXT: psubd %xmm2, %xmm1
; SSE-NEXT: paddd %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: psrld $31, %xmm0
@@ -867,12 +868,12 @@ define <4 x i32> @test8(<4 x i32> %a) {
; AVX-LABEL: test8:
; AVX: # BB#0:
; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
-; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX-NEXT: vpmuldq %xmm1, %xmm3, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
+; AVX-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
+; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpsrld $31, %xmm0, %xmm1
; AVX-NEXT: vpsrad $2, %xmm0, %xmm0
@@ -885,75 +886,77 @@ define <4 x i32> @test8(<4 x i32> %a) {
define <8 x i32> @test9(<8 x i32> %a) {
; SSE41-LABEL: test9:
; SSE41: # BB#0:
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
-; SSE41-NEXT: # kill: XMM0<def> XMM3<kill>
-; SSE41-NEXT: pmuldq %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3]
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2454267027,2454267027,2454267027,2454267027]
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; SSE41-NEXT: pmuldq %xmm4, %xmm5
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm5[1,3]
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSE41-NEXT: paddd %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psrld $31, %xmm3
-; SSE41-NEXT: psrad $2, %xmm0
-; SSE41-NEXT: paddd %xmm3, %xmm0
-; SSE41-NEXT: pmuldq %xmm2, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; SSE41-NEXT: pmuldq %xmm4, %xmm3
-; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
-; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
-; SSE41-NEXT: paddd %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrld $31, %xmm2
-; SSE41-NEXT: psrad $2, %xmm1
-; SSE41-NEXT: paddd %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pmuldq %xmm3, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7]
+; SSE41-NEXT: paddd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: psrld $31, %xmm0
+; SSE41-NEXT: psrad $2, %xmm2
+; SSE41-NEXT: paddd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: pmuldq %xmm4, %xmm0
+; SSE41-NEXT: pmuldq %xmm1, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7]
+; SSE41-NEXT: paddd %xmm1, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: psrld $31, %xmm0
+; SSE41-NEXT: psrad $2, %xmm3
+; SSE41-NEXT: paddd %xmm0, %xmm3
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE-LABEL: test9:
; SSE: # BB#0:
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
-; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2454267027,2454267027,2454267027,2454267027]
+; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: psrad $31, %xmm4
; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: pand %xmm3, %xmm0
-; SSE-NEXT: movdqa %xmm3, %xmm5
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm5
; SSE-NEXT: psrad $31, %xmm5
-; SSE-NEXT: pand %xmm1, %xmm5
+; SSE-NEXT: pand %xmm3, %xmm5
; SSE-NEXT: paddd %xmm0, %xmm5
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: pmuludq %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: pmuludq %xmm3, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
; SSE-NEXT: pmuludq %xmm6, %xmm7
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm7[1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
; SSE-NEXT: psubd %xmm5, %xmm0
-; SSE-NEXT: paddd %xmm3, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psrld $31, %xmm3
-; SSE-NEXT: psrad $2, %xmm0
-; SSE-NEXT: paddd %xmm3, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: psrad $31, %xmm3
-; SSE-NEXT: pand %xmm1, %xmm3
-; SSE-NEXT: paddd %xmm4, %xmm3
-; SSE-NEXT: pmuludq %xmm2, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm6, %xmm4
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm4[1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
-; SSE-NEXT: psubd %xmm3, %xmm1
-; SSE-NEXT: paddd %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrld $31, %xmm2
-; SSE-NEXT: psrad $2, %xmm1
-; SSE-NEXT: paddd %xmm2, %xmm1
+; SSE-NEXT: psrad $2, %xmm0
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm5
+; SSE-NEXT: psrad $31, %xmm5
+; SSE-NEXT: pand %xmm3, %xmm5
+; SSE-NEXT: paddd %xmm4, %xmm5
+; SSE-NEXT: pmuludq %xmm1, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm6, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT: psubd %xmm5, %xmm2
+; SSE-NEXT: paddd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: psrld $31, %xmm1
+; SSE-NEXT: psrad $2, %xmm2
+; SSE-NEXT: paddd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: test9:
@@ -978,72 +981,76 @@ define <8 x i32> @test10(<8 x i32> %a) {
; SSE41-LABEL: test10:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757]
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: pmuludq %xmm2, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuludq %xmm4, %xmm5
-; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3]
-; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pmuludq %xmm3, %xmm4
; SSE41-NEXT: movdqa %xmm0, %xmm5
-; SSE41-NEXT: psubd %xmm3, %xmm5
-; SSE41-NEXT: psrld $1, %xmm5
-; SSE41-NEXT: paddd %xmm3, %xmm5
-; SSE41-NEXT: psrld $2, %xmm5
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7]
-; SSE41-NEXT: pmulld %xmm3, %xmm5
-; SSE41-NEXT: psubd %xmm5, %xmm0
-; SSE41-NEXT: pmuludq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
-; SSE41-NEXT: pmuludq %xmm4, %xmm5
-; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm5[1,3]
-; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: psubd %xmm2, %xmm4
+; SSE41-NEXT: pmuludq %xmm2, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: psubd %xmm5, %xmm4
; SSE41-NEXT: psrld $1, %xmm4
-; SSE41-NEXT: paddd %xmm2, %xmm4
+; SSE41-NEXT: paddd %xmm5, %xmm4
; SSE41-NEXT: psrld $2, %xmm4
-; SSE41-NEXT: pmulld %xmm3, %xmm4
-; SSE41-NEXT: psubd %xmm4, %xmm1
+; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [7,7,7,7]
+; SSE41-NEXT: pmulld %xmm5, %xmm4
+; SSE41-NEXT: psubd %xmm4, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; SSE41-NEXT: pmuludq %xmm3, %xmm4
+; SSE41-NEXT: pmuludq %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: psubd %xmm2, %xmm3
+; SSE41-NEXT: psrld $1, %xmm3
+; SSE41-NEXT: paddd %xmm2, %xmm3
+; SSE41-NEXT: psrld $2, %xmm3
+; SSE41-NEXT: pmulld %xmm5, %xmm3
+; SSE41-NEXT: psubd %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE-LABEL: test10:
; SSE: # BB#0:
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757]
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pmuludq %xmm2, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [613566757,613566757,613566757,613566757]
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pmuludq %xmm3, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; SSE-NEXT: pmuludq %xmm4, %xmm5
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: psubd %xmm3, %xmm5
+; SSE-NEXT: psubd %xmm2, %xmm5
; SSE-NEXT: psrld $1, %xmm5
-; SSE-NEXT: paddd %xmm3, %xmm5
+; SSE-NEXT: paddd %xmm2, %xmm5
; SSE-NEXT: psrld $2, %xmm5
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm3, %xmm5
-; SSE-NEXT: pmuludq %xmm3, %xmm6
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2]
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2,1,3]
+; SSE-NEXT: pmuludq %xmm2, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
+; SSE-NEXT: pmuludq %xmm2, %xmm6
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
; SSE-NEXT: psubd %xmm5, %xmm0
-; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: pmuludq %xmm1, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
; SSE-NEXT: pmuludq %xmm4, %xmm5
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm5[1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: psubd %xmm2, %xmm4
+; SSE-NEXT: psubd %xmm3, %xmm4
; SSE-NEXT: psrld $1, %xmm4
-; SSE-NEXT: paddd %xmm2, %xmm4
+; SSE-NEXT: paddd %xmm3, %xmm4
; SSE-NEXT: psrld $2, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm3, %xmm4
-; SSE-NEXT: pmuludq %xmm3, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm2[0,2]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm2, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
; SSE-NEXT: psubd %xmm4, %xmm1
; SSE-NEXT: retq
;
@@ -1072,32 +1079,32 @@ define <8 x i32> @test11(<8 x i32> %a) {
; SSE41-LABEL: test11:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: pmuldq %xmm2, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuldq %xmm4, %xmm5
-; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3]
-; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2,1,3]
-; SSE41-NEXT: paddd %xmm0, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm5
-; SSE41-NEXT: psrld $31, %xmm5
-; SSE41-NEXT: psrad $2, %xmm3
-; SSE41-NEXT: paddd %xmm5, %xmm3
-; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [7,7,7,7]
-; SSE41-NEXT: pmulld %xmm5, %xmm3
-; SSE41-NEXT: psubd %xmm3, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pmuldq %xmm3, %xmm4
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: pmuldq %xmm2, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7]
+; SSE41-NEXT: paddd %xmm0, %xmm5
+; SSE41-NEXT: movdqa %xmm5, %xmm4
+; SSE41-NEXT: psrld $31, %xmm4
+; SSE41-NEXT: psrad $2, %xmm5
+; SSE41-NEXT: paddd %xmm4, %xmm5
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [7,7,7,7]
+; SSE41-NEXT: pmulld %xmm4, %xmm5
+; SSE41-NEXT: psubd %xmm5, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; SSE41-NEXT: pmuldq %xmm3, %xmm5
; SSE41-NEXT: pmuldq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; SSE41-NEXT: pmuldq %xmm4, %xmm3
-; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
-; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7]
; SSE41-NEXT: paddd %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: psrld $31, %xmm3
; SSE41-NEXT: psrad $2, %xmm2
; SSE41-NEXT: paddd %xmm3, %xmm2
-; SSE41-NEXT: pmulld %xmm5, %xmm2
+; SSE41-NEXT: pmulld %xmm4, %xmm2
; SSE41-NEXT: psubd %xmm2, %xmm1
; SSE41-NEXT: retq
;
@@ -1112,13 +1119,14 @@ define <8 x i32> @test11(<8 x i32> %a) {
; SSE-NEXT: psrad $31, %xmm6
; SSE-NEXT: pand %xmm2, %xmm6
; SSE-NEXT: paddd %xmm4, %xmm6
-; SSE-NEXT: movdqa %xmm0, %xmm7
-; SSE-NEXT: pmuludq %xmm2, %xmm7
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pmuludq %xmm2, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE-NEXT: pmuludq %xmm5, %xmm4
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,3],xmm4[1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
; SSE-NEXT: psubd %xmm6, %xmm7
; SSE-NEXT: paddd %xmm0, %xmm7
; SSE-NEXT: movdqa %xmm7, %xmm4
@@ -1128,9 +1136,10 @@ define <8 x i32> @test11(<8 x i32> %a) {
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
; SSE-NEXT: pmuludq %xmm4, %xmm7
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
; SSE-NEXT: pmuludq %xmm4, %xmm6
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm6[0,2]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
; SSE-NEXT: psubd %xmm7, %xmm0
; SSE-NEXT: pand %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm1, %xmm6
@@ -1138,10 +1147,11 @@ define <8 x i32> @test11(<8 x i32> %a) {
; SSE-NEXT: pand %xmm2, %xmm6
; SSE-NEXT: paddd %xmm3, %xmm6
; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
; SSE-NEXT: pmuludq %xmm5, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE-NEXT: psubd %xmm6, %xmm2
; SSE-NEXT: paddd %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm3
@@ -1150,9 +1160,10 @@ define <8 x i32> @test11(<8 x i32> %a) {
; SSE-NEXT: paddd %xmm3, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
; SSE-NEXT: pmuludq %xmm4, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE-NEXT: pmuludq %xmm4, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE-NEXT: psubd %xmm2, %xmm1
; SSE-NEXT: retq
;
@@ -1202,16 +1213,15 @@ define <4 x i32> @PR20355(<4 x i32> %a) {
; SSE41-LABEL: PR20355:
; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT: pmuldq %xmm2, %xmm3
; SSE41-NEXT: pmuldq %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT: pmuldq %xmm2, %xmm1
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSE41-NEXT: movaps %xmm0, %xmm1
-; SSE41-NEXT: psrld $31, %xmm1
-; SSE41-NEXT: paddd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $31, %xmm0
+; SSE41-NEXT: paddd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE-LABEL: PR20355:
@@ -1226,26 +1236,26 @@ define <4 x i32> @PR20355(<4 x i32> %a) {
; SSE-NEXT: paddd %xmm2, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE-NEXT: pmuludq %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm2, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSE-NEXT: psubd %xmm3, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $31, %xmm1
-; SSE-NEXT: paddd %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm2, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
+; SSE-NEXT: psubd %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: psrld $31, %xmm0
+; SSE-NEXT: paddd %xmm4, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: PR20355:
; AVX: # BB#0: # %entry
; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
-; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[1,3],xmm0[1,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX-NEXT: vpsrld $31, %xmm0, %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
diff --git a/test/CodeGen/X86/vector-sext.ll b/test/CodeGen/X86/vector-sext.ll
index 7a329d7..962d038 100644
--- a/test/CodeGen/X86/vector-sext.ll
+++ b/test/CodeGen/X86/vector-sext.ll
@@ -523,64 +523,47 @@ define <4 x i64> @sext_4i1_to_4i64(<4 x i1> %mask) {
define <16 x i16> @sext_16i8_to_16i16(<16 x i8> *%ptr) {
; SSE2-LABEL: sext_16i8_to_16i16:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: movq (%rdi), %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psllw $8, %xmm0
; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psllw $8, %xmm1
+; SSE2-NEXT: movq 8(%rdi), %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_16i8_to_16i16:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movdqa (%rdi), %xmm1
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: movq (%rdi), %xmm0
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: psllw $8, %xmm0
; SSSE3-NEXT: psraw $8, %xmm0
-; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSSE3-NEXT: psllw $8, %xmm1
+; SSSE3-NEXT: movq 8(%rdi), %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: psraw $8, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_16i8_to_16i16:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: movdqa (%rdi), %xmm1
-; SSE41-NEXT: pmovzxbw %xmm1, %xmm0
-; SSE41-NEXT: psllw $8, %xmm0
-; SSE41-NEXT: psraw $8, %xmm0
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE41-NEXT: psllw $8, %xmm1
-; SSE41-NEXT: psraw $8, %xmm1
+; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
+; SSE41-NEXT: pmovsxbw 8(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_16i8_to_16i16:
; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vmovdqa (%rdi), %xmm0
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpmovsxbw (%rdi), %xmm0
+; AVX1-NEXT: vpmovsxbw 8(%rdi), %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_16i8_to_16i16:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vmovdqa (%rdi), %xmm0
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxbw (%rdi), %ymm0
; AVX2-NEXT: retq
;
; X32-SSE41-LABEL: sext_16i8_to_16i16:
; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: movdqa (%eax), %xmm1
-; X32-SSE41-NEXT: pmovzxbw %xmm1, %xmm0
-; X32-SSE41-NEXT: psllw $8, %xmm0
-; X32-SSE41-NEXT: psraw $8, %xmm0
-; X32-SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X32-SSE41-NEXT: psllw $8, %xmm1
-; X32-SSE41-NEXT: psraw $8, %xmm1
+; X32-SSE41-NEXT: pmovsxbw (%eax), %xmm0
+; X32-SSE41-NEXT: pmovsxbw 8(%eax), %xmm1
; X32-SSE41-NEXT: retl
entry:
%X = load <16 x i8>* %ptr
@@ -706,73 +689,36 @@ define <4 x i64> @sext_4i8_to_4i64(<4 x i8> %mask) {
define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
; SSE2-LABEL: load_sext_4i8_to_4i64:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movd (%rdi), %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,1,3]
-; SSE2-NEXT: movd %xmm2, %rax
-; SSE2-NEXT: movsbq %al, %rax
+; SSE2-NEXT: movsbq 1(%rdi), %rax
+; SSE2-NEXT: movd %rax, %xmm1
+; SSE2-NEXT: movsbq (%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE2-NEXT: movd %xmm2, %rax
-; SSE2-NEXT: movsbq %al, %rax
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: movsbq 3(%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm2
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; SSE2-NEXT: movd %xmm2, %rax
-; SSE2-NEXT: movsbq %al, %rax
+; SSE2-NEXT: movsbq 2(%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE2-NEXT: movd %xmm2, %rax
-; SSE2-NEXT: movsbq %al, %rax
-; SSE2-NEXT: movd %rax, %xmm2
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_4i8_to_4i64:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movd (%rdi), %xmm1
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,1,3]
-; SSSE3-NEXT: movd %xmm2, %rax
-; SSSE3-NEXT: movsbq %al, %rax
+; SSSE3-NEXT: movsbq 1(%rdi), %rax
+; SSSE3-NEXT: movd %rax, %xmm1
+; SSSE3-NEXT: movsbq (%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSSE3-NEXT: movd %xmm2, %rax
-; SSSE3-NEXT: movsbq %al, %rax
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: movsbq 3(%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm2
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; SSSE3-NEXT: movd %xmm2, %rax
-; SSSE3-NEXT: movsbq %al, %rax
+; SSSE3-NEXT: movsbq 2(%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSSE3-NEXT: movd %xmm2, %rax
-; SSSE3-NEXT: movsbq %al, %rax
-; SSSE3-NEXT: movd %rax, %xmm2
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i8_to_4i64:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pmovzxbd (%rdi), %xmm1
-; SSE41-NEXT: pmovzxdq %xmm1, %xmm0
-; SSE41-NEXT: pextrq $1, %xmm0, %rax
-; SSE41-NEXT: movsbq %al, %rax
-; SSE41-NEXT: movd %rax, %xmm2
-; SSE41-NEXT: movd %xmm0, %rax
-; SSE41-NEXT: movsbq %al, %rax
-; SSE41-NEXT: movd %rax, %xmm0
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
-; SSE41-NEXT: pextrq $1, %xmm1, %rax
-; SSE41-NEXT: movsbq %al, %rax
-; SSE41-NEXT: movd %rax, %xmm2
-; SSE41-NEXT: movd %xmm1, %rax
-; SSE41-NEXT: movsbq %al, %rax
-; SSE41-NEXT: movd %rax, %xmm1
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
+; SSE41-NEXT: pmovsxbq 2(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_4i8_to_4i64:
@@ -792,30 +738,8 @@ define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
; X32-SSE41-LABEL: load_sext_4i8_to_4i64:
; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: movd (%eax), %xmm0
-; X32-SSE41-NEXT: pmovzxbd %xmm0, %xmm1
-; X32-SSE41-NEXT: pmovzxbq %xmm0, %xmm2
-; X32-SSE41-NEXT: movd %xmm2, %eax
-; X32-SSE41-NEXT: movsbl %al, %eax
-; X32-SSE41-NEXT: movd %eax, %xmm0
-; X32-SSE41-NEXT: sarl $31, %eax
-; X32-SSE41-NEXT: pinsrd $1, %eax, %xmm0
-; X32-SSE41-NEXT: pextrd $2, %xmm2, %eax
-; X32-SSE41-NEXT: movsbl %al, %eax
-; X32-SSE41-NEXT: pinsrd $2, %eax, %xmm0
-; X32-SSE41-NEXT: sarl $31, %eax
-; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm0
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; X32-SSE41-NEXT: movd %xmm2, %eax
-; X32-SSE41-NEXT: movsbl %al, %eax
-; X32-SSE41-NEXT: movd %eax, %xmm1
-; X32-SSE41-NEXT: sarl $31, %eax
-; X32-SSE41-NEXT: pinsrd $1, %eax, %xmm1
-; X32-SSE41-NEXT: pextrd $2, %xmm2, %eax
-; X32-SSE41-NEXT: movsbl %al, %eax
-; X32-SSE41-NEXT: pinsrd $2, %eax, %xmm1
-; X32-SSE41-NEXT: sarl $31, %eax
-; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm1
+; X32-SSE41-NEXT: pmovsxbq (%eax), %xmm0
+; X32-SSE41-NEXT: pmovsxbq 2(%eax), %xmm1
; X32-SSE41-NEXT: retl
entry:
%X = load <4 x i8>* %ptr
@@ -826,72 +750,36 @@ entry:
define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) {
; SSE2-LABEL: load_sext_4i16_to_4i64:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movq (%rdi), %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,1,3]
-; SSE2-NEXT: movd %xmm2, %rax
-; SSE2-NEXT: movswq %ax, %rax
+; SSE2-NEXT: movswq 2(%rdi), %rax
+; SSE2-NEXT: movd %rax, %xmm1
+; SSE2-NEXT: movswq (%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE2-NEXT: movd %xmm2, %rax
-; SSE2-NEXT: movswq %ax, %rax
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: movswq 6(%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm2
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; SSE2-NEXT: movd %xmm2, %rax
-; SSE2-NEXT: movswq %ax, %rax
+; SSE2-NEXT: movswq 4(%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE2-NEXT: movd %xmm2, %rax
-; SSE2-NEXT: movswq %ax, %rax
-; SSE2-NEXT: movd %rax, %xmm2
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_4i16_to_4i64:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movq (%rdi), %xmm1
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,1,3]
-; SSSE3-NEXT: movd %xmm2, %rax
-; SSSE3-NEXT: movswq %ax, %rax
+; SSSE3-NEXT: movswq 2(%rdi), %rax
+; SSSE3-NEXT: movd %rax, %xmm1
+; SSSE3-NEXT: movswq (%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSSE3-NEXT: movd %xmm2, %rax
-; SSSE3-NEXT: movswq %ax, %rax
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: movswq 6(%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm2
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; SSSE3-NEXT: movd %xmm2, %rax
-; SSSE3-NEXT: movswq %ax, %rax
+; SSSE3-NEXT: movswq 4(%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSSE3-NEXT: movd %xmm2, %rax
-; SSSE3-NEXT: movswq %ax, %rax
-; SSSE3-NEXT: movd %rax, %xmm2
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i16_to_4i64:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: movq (%rdi), %xmm0
-; SSE41-NEXT: pmovzxwd %xmm0, %xmm1
-; SSE41-NEXT: pmovzxwq %xmm0, %xmm0
-; SSE41-NEXT: pextrq $1, %xmm0, %rax
-; SSE41-NEXT: movswq %ax, %rax
-; SSE41-NEXT: movd %rax, %xmm2
-; SSE41-NEXT: movd %xmm0, %rax
-; SSE41-NEXT: movswq %ax, %rax
-; SSE41-NEXT: movd %rax, %xmm0
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
-; SSE41-NEXT: pextrq $1, %xmm1, %rax
-; SSE41-NEXT: movswq %ax, %rax
-; SSE41-NEXT: movd %rax, %xmm2
-; SSE41-NEXT: movd %xmm1, %rax
-; SSE41-NEXT: movswq %ax, %rax
-; SSE41-NEXT: movd %rax, %xmm1
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE41-NEXT: pmovsxwq (%rdi), %xmm0
+; SSE41-NEXT: pmovsxwq 4(%rdi), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_4i16_to_4i64:
@@ -911,30 +799,8 @@ define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) {
; X32-SSE41-LABEL: load_sext_4i16_to_4i64:
; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: movsd (%eax), %xmm0
-; X32-SSE41-NEXT: pmovzxwd %xmm0, %xmm1
-; X32-SSE41-NEXT: pmovzxwq %xmm0, %xmm2
-; X32-SSE41-NEXT: movd %xmm2, %eax
-; X32-SSE41-NEXT: cwtl
-; X32-SSE41-NEXT: movd %eax, %xmm0
-; X32-SSE41-NEXT: sarl $31, %eax
-; X32-SSE41-NEXT: pinsrd $1, %eax, %xmm0
-; X32-SSE41-NEXT: pextrd $2, %xmm2, %eax
-; X32-SSE41-NEXT: cwtl
-; X32-SSE41-NEXT: pinsrd $2, %eax, %xmm0
-; X32-SSE41-NEXT: sarl $31, %eax
-; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm0
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; X32-SSE41-NEXT: movd %xmm2, %eax
-; X32-SSE41-NEXT: cwtl
-; X32-SSE41-NEXT: movd %eax, %xmm1
-; X32-SSE41-NEXT: sarl $31, %eax
-; X32-SSE41-NEXT: pinsrd $1, %eax, %xmm1
-; X32-SSE41-NEXT: pextrd $2, %xmm2, %eax
-; X32-SSE41-NEXT: cwtl
-; X32-SSE41-NEXT: pinsrd $2, %eax, %xmm1
-; X32-SSE41-NEXT: sarl $31, %eax
-; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm1
+; X32-SSE41-NEXT: pmovsxwq (%eax), %xmm0
+; X32-SSE41-NEXT: pmovsxwq 4(%eax), %xmm1
; X32-SSE41-NEXT: retl
entry:
%X = load <4 x i16>* %ptr
diff --git a/test/CodeGen/X86/vector-shuffle-128-v16.ll b/test/CodeGen/X86/vector-shuffle-128-v16.ll
index 30ad366..c271622 100644
--- a/test/CodeGen/X86/vector-shuffle-128-v16.ll
+++ b/test/CodeGen/X86/vector-shuffle-128-v16.ll
@@ -1,8 +1,8 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
@@ -247,13 +247,34 @@ define <16 x i8> @shuffle_v16i8_08_24_09_25_10_26_11_27_12_28_13_29_14_30_15_31(
}
define <16 x i8> @shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07(<16 x i8> %a, <16 x i8> %b) {
-; SSE-LABEL: shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07:
-; SSE: # BB#0:
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07:
+; SSE2: # BB#0:
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pandn %xmm0, %xmm2
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07:
+; SSE41: # BB#0:
+; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v16i8_16_00_16_01_16_02_16_03_16_04_16_05_16_06_16_07:
; AVX1: # BB#0:
@@ -318,23 +339,20 @@ define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20(
;
; SSSE3-LABEL: shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[3,2,1,0,7,6,5,4]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4],zero,zero,zero,zero,zero,zero,zero,zero
-; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6,4,2,0,14,12,10,8,7,5,3,1,15,13,11,9]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20:
; SSE41: # BB#0:
-; SSE41-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[3,2,1,0,7,6,5,4]
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4],zero,zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6,4,2,0,14,12,10,8,7,5,3,1,15,13,11,9]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20:
; AVX: # BB#0:
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[3,2,1,0,7,6,5,4]
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4],zero,zero,zero,zero,zero,zero,zero,zero
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,4,2,0,14,12,10,8,7,5,3,1,15,13,11,9]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 19, i32 18, i32 17, i32 16, i32 23, i32 22, i32 21, i32 20>
ret <16 x i8> %shuffle
@@ -343,47 +361,181 @@ define <16 x i8> @shuffle_v16i8_03_02_01_00_07_06_05_04_19_18_17_16_23_22_21_20(
define <16 x i8> @shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20:
; SSE2: # BB#0:
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: movsd %xmm4, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
-; SSE2-NEXT: movsd %xmm0, %xmm1
-; SSE2-NEXT: packuswb %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: packuswb %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[15,14,13,12],zero,zero,zero,zero,xmm1[7,6,5,4]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0],zero,zero,zero,zero,xmm0[11,10,9,8],zero,zero,zero,zero
-; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[15,14,13,12,7,6,5,4,u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,11,10,9,8,u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20:
; SSE41: # BB#0:
-; SSE41-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[15,14,13,12],zero,zero,zero,zero,xmm1[7,6,5,4]
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0],zero,zero,zero,zero,xmm0[11,10,9,8],zero,zero,zero,zero
-; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[15,14,13,12,7,6,5,4,u,u,u,u,u,u,u,u]
+; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,11,10,9,8,u,u,u,u,u,u,u,u]
+; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_03_02_01_00_31_30_29_28_11_10_09_08_23_22_21_20:
; AVX: # BB#0:
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[15,14,13,12],zero,zero,zero,zero,xmm1[7,6,5,4]
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0],zero,zero,zero,zero,xmm0[11,10,9,8],zero,zero,zero,zero
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[15,14,13,12,7,6,5,4,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,11,10,9,8,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 31, i32 30, i32 29, i32 28, i32 11, i32 10, i32 9, i32 8, i32 23, i32 22, i32 21, i32 20>
ret <16 x i8> %shuffle
}
+define <16 x i8> @shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31(<16 x i8> %a, <16 x i8> %b) {
+; SSE2-LABEL: shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31:
+; SSE2: # BB#0:
+; SSE2-NEXT: movaps {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: andps %xmm2, %xmm0
+; SSE2-NEXT: andnps %xmm1, %xmm2
+; SSE2-NEXT: orps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v16i8_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31:
+; AVX: # BB#0:
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31(<16 x i8> %a, <16 x i8> %b) {
+; SSE2-LABEL: shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31:
+; SSE2: # BB#0:
+; SSE2-NEXT: movaps {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
+; SSE2-NEXT: andps %xmm2, %xmm0
+; SSE2-NEXT: andnps %xmm1, %xmm2
+; SSE2-NEXT: orps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[15]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2],zero,xmm0[4,5,6],zero,xmm0[8,9,10],zero,xmm0[12,13,14],zero
+; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
+; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v16i8_00_01_02_19_04_05_06_23_08_09_10_27_12_13_14_31:
+; AVX: # BB#0:
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
+; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 19, i32 4, i32 5, i32 6, i32 23, i32 8, i32 9, i32 10, i32 27, i32 12, i32 13, i32 14, i32 31>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31(<16 x i8> %a, <16 x i8> %b) {
+; SSE2-LABEL: shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31:
+; SSE2: # BB#0:
+; SSE2-NEXT: movaps {{.*#+}} xmm2 = [255,255,255,255,0,255,255,0,255,255,255,255,0,255,255,0]
+; SSE2-NEXT: andps %xmm2, %xmm0
+; SSE2-NEXT: andnps %xmm1, %xmm2
+; SSE2-NEXT: orps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[4],zero,zero,xmm1[7],zero,zero,zero,zero,xmm1[12],zero,zero,xmm1[15]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,xmm0[5,6],zero,xmm0[8,9,10,11],zero,xmm0[13,14],zero
+; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,255,255,255,0,255,255,0,255,255,255,255,0,255,255,0]
+; SSE41-NEXT: pblendvb %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v16i8_00_01_02_03_20_05_06_23_08_09_10_11_28_13_14_31:
+; AVX: # BB#0:
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,0,255,255,0,255,255,255,255,0,255,255,0]
+; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 20, i32 5, i32 6, i32 23, i32 8, i32 9, i32 10, i32 11, i32 28, i32 13, i32 14, i32 31>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15(<16 x i8> %a, <16 x i8> %b) {
+; SSE2-LABEL: shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15:
+; SSE2: # BB#0:
+; SSE2-NEXT: movaps {{.*#+}} xmm2 = [255,255,255,255,0,0,0,0,255,255,0,0,255,0,255,0]
+; SSE2-NEXT: andps %xmm2, %xmm1
+; SSE2-NEXT: andnps %xmm0, %xmm2
+; SSE2-NEXT: orps %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[4,5,6,7],zero,zero,xmm0[10,11],zero,xmm0[13],zero,xmm0[15]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,2,3],zero,zero,zero,zero,xmm1[8,9],zero,zero,xmm1[12],zero,xmm1[14],zero
+; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,255,255,255,0,0,0,0,255,255,0,0,255,0,255,0]
+; SSE41-NEXT: pblendvb %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v16i8_16_17_18_19_04_05_06_07_24_25_10_11_28_13_30_15:
+; AVX: # BB#0:
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,0,0,0,0,255,255,0,0,255,0,255,0]
+; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 10, i32 11, i32 28, i32 13, i32 30, i32 15>
+ ret <16 x i8> %shuffle
+}
+
define <16 x i8> @trunc_v4i32_shuffle(<16 x i8> %a) {
; SSE2-LABEL: trunc_v4i32_shuffle:
; SSE2: # BB#0:
@@ -429,12 +581,12 @@ entry:
ret <16 x i8> %s.16.0
}
-define <16 x i8> @stress_test1(<16 x i8> %s.0.5, <16 x i8> %s.0.8, <16 x i8> %s.0.9) noinline nounwind {
+define <16 x i8> @undef_test1(<16 x i8> %s.0.5, <16 x i8> %s.0.8, <16 x i8> %s.0.9) noinline nounwind {
; There is nothing interesting to check about these instructions other than
; that they survive codegen. However, we actually do better and delete all of
; them because the result is 'undef'.
;
-; ALL-LABEL: stress_test1:
+; ALL-LABEL: undef_test1:
; ALL: # BB#0: # %entry
; ALL-NEXT: retq
entry:
@@ -460,36 +612,22 @@ define <16 x i8> @PR20540(<8 x i8> %a) {
; SSE2: # BB#0:
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: packuswb %xmm0, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
-; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE2-NEXT: retq
;
; SSSE3-LABEL: PR20540:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,0,0,0,0,0,0,0]
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
-; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: PR20540:
; SSE41: # BB#0:
-; SSE41-NEXT: pxor %xmm1, %xmm1
-; SSE41-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,0,0,0,0,0,0,0]
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: PR20540:
; AVX: # BB#0:
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,0,0,0,0,0,0,0]
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
ret <16 x i8> %shuffle
@@ -505,28 +643,19 @@ define <16 x i8> @shuffle_v16i8_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(
; SSSE3-LABEL: shuffle_v16i8_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
; SSSE3: # BB#0:
; SSSE3-NEXT: movd %edi, %xmm0
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
; SSE41: # BB#0:
; SSE41-NEXT: movd %edi, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm1
-; SSE41-NEXT: pshufb {{.*#+}} xmm1 = zero,xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
; AVX: # BB#0:
; AVX-NEXT: vmovd %edi, %xmm0
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%a = insertelement <16 x i8> undef, i8 %i, i32 0
%shuffle = shufflevector <16 x i8> zeroinitializer, <16 x i8> %a, <16 x i32> <i32 16, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -544,28 +673,19 @@ define <16 x i8> @shuffle_v16i8_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(
; SSSE3-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
; SSSE3: # BB#0:
; SSSE3-NEXT: movd %edi, %xmm0
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,0,0,0,0],zero,xmm1[0,0,0,0,0,0,0,0,0,0]
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
; SSE41: # BB#0:
; SSE41-NEXT: movd %edi, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm1
-; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,0,0,0,0],zero,xmm1[0,0,0,0,0,0,0,0,0,0]
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
; AVX: # BB#0:
; AVX-NEXT: vmovd %edi, %xmm0
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,0,0,0,0],zero,xmm1[0,0,0,0,0,0,0,0,0,0]
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%a = insertelement <16 x i8> undef, i8 %i, i32 0
%shuffle = shufflevector <16 x i8> zeroinitializer, <16 x i8> %a, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -573,23 +693,11 @@ define <16 x i8> @shuffle_v16i8_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(
}
define <16 x i8> @shuffle_v16i8_zz_uu_uu_zz_uu_uu_zz_zz_zz_zz_zz_zz_zz_zz_zz_16(i8 %i) {
-; SSE2-LABEL: shuffle_v16i8_zz_uu_uu_zz_uu_uu_zz_zz_zz_zz_zz_zz_zz_zz_zz_16:
-; SSE2: # BB#0:
-; SSE2-NEXT: movd %edi, %xmm0
-; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: shuffle_v16i8_zz_uu_uu_zz_uu_uu_zz_zz_zz_zz_zz_zz_zz_zz_zz_16:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: movd %edi, %xmm0
-; SSSE3-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: shuffle_v16i8_zz_uu_uu_zz_uu_uu_zz_zz_zz_zz_zz_zz_zz_zz_zz_16:
-; SSE41: # BB#0:
-; SSE41-NEXT: movd %edi, %xmm0
-; SSE41-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0]
-; SSE41-NEXT: retq
+; SSE-LABEL: shuffle_v16i8_zz_uu_uu_zz_uu_uu_zz_zz_zz_zz_zz_zz_zz_zz_zz_16:
+; SSE: # BB#0:
+; SSE-NEXT: movd %edi, %xmm0
+; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0]
+; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_zz_uu_uu_zz_uu_uu_zz_zz_zz_zz_zz_zz_zz_zz_zz_16:
; AVX: # BB#0:
@@ -612,31 +720,22 @@ define <16 x i8> @shuffle_v16i8_zz_zz_19_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(
; SSSE3-LABEL: shuffle_v16i8_zz_zz_19_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
; SSSE3: # BB#0:
; SSSE3-NEXT: movd %edi, %xmm0
-; SSSE3-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12]
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1],zero,xmm1[3,4,5,6,7,8,9,10,11,12,13,14,15]
+; SSSE3-NEXT: pslld $24, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_zz_zz_19_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
; SSE41: # BB#0:
; SSE41-NEXT: movd %edi, %xmm0
-; SSE41-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12]
-; SSE41-NEXT: pxor %xmm1, %xmm1
-; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1],zero,xmm1[3,4,5,6,7,8,9,10,11,12,13,14,15]
+; SSE41-NEXT: pslld $24, %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_zz_zz_19_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
; AVX: # BB#0:
; AVX-NEXT: vmovd %edi, %xmm0
-; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12]
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1],zero,xmm1[3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-NEXT: vpslld $24, %xmm0, %xmm0
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%a = insertelement <16 x i8> undef, i8 %i, i32 3
%shuffle = shufflevector <16 x i8> zeroinitializer, <16 x i8> %a, <16 x i32> <i32 0, i32 1, i32 19, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -644,44 +743,24 @@ define <16 x i8> @shuffle_v16i8_zz_zz_19_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(
}
define <16 x i8> @shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_16_uu_18_uu(<16 x i8> %a) {
-; SSE2-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_16_uu_18_uu:
-; SSE2: # BB#0:
-; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_16_uu_18_uu:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_16_uu_18_uu:
-; SSE41: # BB#0:
-; SSE41-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
-; SSE41-NEXT: retq
+; SSE-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_16_uu_18_uu:
+; SSE: # BB#0:
+; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
+; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_16_uu_18_uu:
; AVX: # BB#0:
; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
; AVX-NEXT: retq
- %shuffle = shufflevector <16 x i8> zeroinitializer, <16 x i8> %a, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 09, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 undef, i32 18, i32 undef>
+ %shuffle = shufflevector <16 x i8> zeroinitializer, <16 x i8> %a, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 undef, i32 18, i32 undef>
ret <16 x i8> %shuffle
}
define <16 x i8> @shuffle_v16i8_28_uu_30_31_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(<16 x i8> %a) {
-; SSE2-LABEL: shuffle_v16i8_28_uu_30_31_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; SSE2: # BB#0:
-; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: shuffle_v16i8_28_uu_30_31_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: psrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: shuffle_v16i8_28_uu_30_31_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
-; SSE41: # BB#0:
-; SSE41-NEXT: psrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: retq
+; SSE-LABEL: shuffle_v16i8_28_uu_30_31_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
+; SSE: # BB#0:
+; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_28_uu_30_31_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
; AVX: # BB#0:
@@ -868,12 +947,12 @@ define <16 x i8> @shuffle_v16i8_00_uu_uu_uu_uu_uu_uu_uu_01_uu_uu_uu_uu_uu_uu_uu(
;
; SSE41-LABEL: shuffle_v16i8_00_uu_uu_uu_uu_uu_uu_uu_01_uu_uu_uu_uu_uu_uu_uu:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxbq %xmm0, %xmm0
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_uu_uu_uu_uu_uu_uu_uu_01_uu_uu_uu_uu_uu_uu_uu:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxbq %xmm0, %xmm0
+; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
ret <16 x i8> %shuffle
@@ -895,12 +974,12 @@ define <16 x i8> @shuffle_v16i8_00_zz_zz_zz_zz_zz_zz_zz_01_zz_zz_zz_zz_zz_zz_zz(
;
; SSE41-LABEL: shuffle_v16i8_00_zz_zz_zz_zz_zz_zz_zz_01_zz_zz_zz_zz_zz_zz_zz:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxbq %xmm0, %xmm0
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_zz_zz_zz_zz_zz_zz_zz_01_zz_zz_zz_zz_zz_zz_zz:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxbq %xmm0, %xmm0
+; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 1, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
ret <16 x i8> %shuffle
@@ -921,12 +1000,12 @@ define <16 x i8> @shuffle_v16i8_00_uu_uu_uu_01_uu_uu_uu_02_uu_uu_uu_03_uu_uu_uu(
;
; SSE41-LABEL: shuffle_v16i8_00_uu_uu_uu_01_uu_uu_uu_02_uu_uu_uu_03_uu_uu_uu:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxbd %xmm0, %xmm0
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_uu_uu_uu_01_uu_uu_uu_02_uu_uu_uu_03_uu_uu_uu:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxbd %xmm0, %xmm0
+; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 undef, i32 2, i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 undef>
ret <16 x i8> %shuffle
@@ -949,12 +1028,12 @@ define <16 x i8> @shuffle_v16i8_00_zz_zz_zz_01_zz_zz_zz_02_zz_zz_zz_03_zz_zz_zz(
;
; SSE41-LABEL: shuffle_v16i8_00_zz_zz_zz_01_zz_zz_zz_02_zz_zz_zz_03_zz_zz_zz:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxbd %xmm0, %xmm0
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_zz_zz_zz_01_zz_zz_zz_02_zz_zz_zz_03_zz_zz_zz:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxbd %xmm0, %xmm0
+; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 1, i32 21, i32 22, i32 23, i32 2, i32 25, i32 26, i32 27, i32 3, i32 29, i32 30, i32 31>
ret <16 x i8> %shuffle
@@ -973,12 +1052,12 @@ define <16 x i8> @shuffle_v16i8_00_uu_01_uu_02_uu_03_uu_04_uu_05_uu_06_uu_07_uu(
;
; SSE41-LABEL: shuffle_v16i8_00_uu_01_uu_02_uu_03_uu_04_uu_05_uu_06_uu_07_uu:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxbw %xmm0, %xmm0
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_uu_01_uu_02_uu_03_uu_04_uu_05_uu_06_uu_07_uu:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxbw %xmm0, %xmm0
+; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 undef, i32 1, i32 undef, i32 2, i32 undef, i32 3, i32 undef, i32 4, i32 undef, i32 5, i32 undef, i32 6, i32 undef, i32 7, i32 undef>
ret <16 x i8> %shuffle
@@ -999,12 +1078,12 @@ define <16 x i8> @shuffle_v16i8_00_zz_01_zz_02_zz_03_zz_04_zz_05_zz_06_zz_07_zz(
;
; SSE41-LABEL: shuffle_v16i8_00_zz_01_zz_02_zz_03_zz_04_zz_05_zz_06_zz_07_zz:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxbw %xmm0, %xmm0
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_00_zz_01_zz_02_zz_03_zz_04_zz_05_zz_06_zz_07_zz:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxbw %xmm0, %xmm0
+; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX-NEXT: retq
%shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 1, i32 19, i32 2, i32 21, i32 3, i32 23, i32 4, i32 25, i32 5, i32 27, i32 6, i32 29, i32 7, i32 31>
ret <16 x i8> %shuffle
@@ -1016,69 +1095,53 @@ define <16 x i8> @shuffle_v16i8_uu_10_02_07_22_14_07_02_18_03_01_14_18_09_11_00(
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,0,1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,2,2,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,0,65535,0,0,65535]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,3,4,5,6,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,0,3,1,4,5,6,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[2,1,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,3,1,4,5,6,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; SSE2-NEXT: packuswb %xmm0, %xmm4
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,2,1,3,4,5,6,7]
-; SSE2-NEXT: packuswb %xmm0, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,1,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,4]
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pandn %xmm4, %xmm5
+; SSE2-NEXT: por %xmm2, %xmm5
+; SSE2-NEXT: psrlq $16, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,1,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,4]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-NEXT: packuswb %xmm5, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255]
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,7]
+; SSE2-NEXT: pandn %xmm1, %xmm0
+; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v16i8_uu_10_02_07_22_14_07_02_18_03_01_14_18_09_11_00:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movdqa %xmm0, %xmm2
-; SSSE3-NEXT: pshufb {{.*#+}} xmm2 = xmm2[2,7,1,11,u,u,u,u,u,u,u,u,u,u,u,u]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[6,6,2,2,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[10,7,14,2,3,14,9,0,u,u,u,u,u,u,u,u]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[2],zero,zero,zero
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,10,2,7],zero,xmm0[14,7,2],zero,xmm0[3,1,14],zero,xmm0[9,11,0]
+; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v16i8_uu_10_02_07_22_14_07_02_18_03_01_14_18_09_11_00:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[2,7,1,11,u,u,u,u,u,u,u,u,u,u,u,u]
-; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[6,6,2,2,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[10,7,14,2,3,14,9,0,u,u,u,u,u,u,u,u]
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[2],zero,zero,zero
+; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,10,2,7],zero,xmm0[14,7,2],zero,xmm0[3,1,14],zero,xmm0[9,11,0]
+; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v16i8_uu_10_02_07_22_14_07_02_18_03_01_14_18_09_11_00:
; AVX: # BB#0: # %entry
-; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[2,7,1,11,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,6,2,2,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,7,14,2,3,14,9,0,u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[2],zero,zero,zero
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,10,2,7],zero,xmm0[14,7,2],zero,xmm0[3,1,14],zero,xmm0[9,11,0]
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 undef, i32 10, i32 2, i32 7, i32 22, i32 14, i32 7, i32 2, i32 18, i32 3, i32 1, i32 14, i32 18, i32 9, i32 11, i32 0>
@@ -1098,13 +1161,178 @@ entry:
ret <16 x i8> %s.2.0
}
-define void @constant_gets_selected() {
-; ALL-LABEL: constant_gets_selected:
-; ALL-NOT movd $0, {{%xmm[0-9]+}}
+define void @constant_gets_selected(<4 x i32>* %ptr1, <4 x i32>* %ptr2) {
+; SSE-LABEL: constant_gets_selected:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: movaps %xmm0, (%rdi)
+; SSE-NEXT: movaps %xmm0, (%rsi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: constant_gets_selected:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovaps %xmm0, (%rdi)
+; AVX-NEXT: vmovaps %xmm0, (%rsi)
+; AVX-NEXT: retq
+entry:
%weird_zero = bitcast <4 x i32> zeroinitializer to <16 x i8>
%shuffle.i = shufflevector <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0>, <16 x i8> %weird_zero, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27>
%weirder_zero = bitcast <16 x i8> %shuffle.i to <4 x i32>
- store <4 x i32> %weirder_zero, <4 x i32>* undef, align 16
- store <4 x i32> zeroinitializer, <4 x i32>* undef, align 16
+ store <4 x i32> %weirder_zero, <4 x i32>* %ptr1, align 16
+ store <4 x i32> zeroinitializer, <4 x i32>* %ptr2, align 16
ret void
}
+
+;
+; Shuffle to logical bit shifts
+;
+
+define <16 x i8> @shuffle_v16i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14(<16 x i8> %a, <16 x i8> %b) {
+; SSE-LABEL: shuffle_v16i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14:
+; SSE: # BB#0:
+; SSE-NEXT: psllw $8, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v16i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14:
+; AVX: # BB#0:
+; AVX-NEXT: vpsllw $8, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 16, i32 0, i32 16, i32 2, i32 16, i32 4, i32 16, i32 6, i32 16, i32 8, i32 16, i32 10, i32 16, i32 12, i32 16, i32 14>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_12(<16 x i8> %a, <16 x i8> %b) {
+; SSE-LABEL: shuffle_v16i8_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_12:
+; SSE: # BB#0:
+; SSE-NEXT: pslld $24, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v16i8_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_12:
+; AVX: # BB#0:
+; AVX-NEXT: vpslld $24, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 16, i32 16, i32 16, i32 0, i32 16, i32 16, i32 16, i32 4, i32 16, i32 16, i32 16, i32 8, i32 16, i32 16, i32 16, i32 12>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_00_zz_zz_zz_zz_zz_zz_zz_08(<16 x i8> %a, <16 x i8> %b) {
+; SSE-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_00_zz_zz_zz_zz_zz_zz_zz_08:
+; SSE: # BB#0:
+; SSE-NEXT: psllq $56, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v16i8_zz_zz_zz_zz_zz_zz_zz_00_zz_zz_zz_zz_zz_zz_zz_08:
+; AVX: # BB#0:
+; AVX-NEXT: vpsllq $56, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 0, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 8>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_zz_00_uu_02_03_uu_05_06_zz_08_09_uu_11_12_13_14(<16 x i8> %a, <16 x i8> %b) {
+; SSE-LABEL: shuffle_v16i8_zz_00_uu_02_03_uu_05_06_zz_08_09_uu_11_12_13_14:
+; SSE: # BB#0:
+; SSE-NEXT: psllq $8, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v16i8_zz_00_uu_02_03_uu_05_06_zz_08_09_uu_11_12_13_14:
+; AVX: # BB#0:
+; AVX-NEXT: vpsllq $8, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 16, i32 0, i32 undef, i32 2, i32 3, i32 undef, i32 5, i32 6, i32 16, i32 8, i32 9, i32 undef, i32 11, i32 12, i32 13, i32 14>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_01_uu_uu_uu_uu_zz_uu_zz_uu_zz_11_zz_13_zz_15_zz(<16 x i8> %a, <16 x i8> %b) {
+; SSE-LABEL: shuffle_v16i8_01_uu_uu_uu_uu_zz_uu_zz_uu_zz_11_zz_13_zz_15_zz:
+; SSE: # BB#0:
+; SSE-NEXT: psrlw $8, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v16i8_01_uu_uu_uu_uu_zz_uu_zz_uu_zz_11_zz_13_zz_15_zz:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 undef, i32 16, i32 undef, i32 16, i32 11, i32 16, i32 13, i32 16, i32 15, i32 16>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_02_03_zz_zz_06_07_uu_uu_uu_uu_uu_uu_14_15_zz_zz(<16 x i8> %a, <16 x i8> %b) {
+; SSE-LABEL: shuffle_v16i8_02_03_zz_zz_06_07_uu_uu_uu_uu_uu_uu_14_15_zz_zz:
+; SSE: # BB#0:
+; SSE-NEXT: psrld $16, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v16i8_02_03_zz_zz_06_07_uu_uu_uu_uu_uu_uu_14_15_zz_zz:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 2, i32 3, i32 16, i32 16, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 14, i32 15, i32 16, i32 16>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @shuffle_v16i8_07_zz_zz_zz_zz_zz_uu_uu_15_uu_uu_uu_uu_uu_zz_zz(<16 x i8> %a, <16 x i8> %b) {
+; SSE-LABEL: shuffle_v16i8_07_zz_zz_zz_zz_zz_uu_uu_15_uu_uu_uu_uu_uu_zz_zz:
+; SSE: # BB#0:
+; SSE-NEXT: psrlq $56, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v16i8_07_zz_zz_zz_zz_zz_uu_uu_15_uu_uu_uu_uu_uu_zz_zz:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlq $56, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32><i32 7, i32 16, i32 16, i32 16, i32 16, i32 16, i32 undef, i32 undef, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 16>
+ ret <16 x i8> %shuffle
+}
+
+define <16 x i8> @PR12412(<16 x i8> %inval1, <16 x i8> %inval2) {
+; SSE2-LABEL: PR12412:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: PR12412:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSSE3-NEXT: pshufb %xmm2, %xmm1
+; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: PR12412:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: PR12412:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: retq
+entry:
+ %0 = shufflevector <16 x i8> %inval1, <16 x i8> %inval2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ ret <16 x i8> %0
+}
+
+define <16 x i8> @shuffle_v16i8_uu_02_03_zz_uu_06_07_zz_uu_10_11_zz_uu_14_15_zz(<16 x i8> %a) {
+; SSE-LABEL: shuffle_v16i8_uu_02_03_zz_uu_06_07_zz_uu_10_11_zz_uu_14_15_zz:
+; SSE: # BB#0:
+; SSE-NEXT: psrld $8, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v16i8_uu_02_03_zz_uu_06_07_zz_uu_10_11_zz_uu_14_15_zz:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrld $8, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <16 x i32> <i32 undef, i32 2, i32 3, i32 16, i32 undef, i32 6, i32 7, i32 16, i32 undef, i32 10, i32 11, i32 16, i32 undef, i32 14, i32 15, i32 16>
+ ret <16 x i8> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-128-v2.ll b/test/CodeGen/X86/vector-shuffle-128-v2.ll
index 9affee9..7214803 100644
--- a/test/CodeGen/X86/vector-shuffle-128-v2.ll
+++ b/test/CodeGen/X86/vector-shuffle-128-v2.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
@@ -105,22 +105,22 @@ define <2 x double> @shuffle_v2f64_00(<2 x double> %a, <2 x double> %b) {
;
; SSE3-LABEL: shuffle_v2f64_00:
; SSE3: # BB#0:
-; SSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2f64_00:
; SSSE3: # BB#0:
-; SSSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2f64_00:
; SSE41: # BB#0:
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v2f64_00:
; AVX: # BB#0:
-; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 0>
ret <2 x double> %shuffle
@@ -160,25 +160,22 @@ define <2 x double> @shuffle_v2f64_22(<2 x double> %a, <2 x double> %b) {
;
; SSE3-LABEL: shuffle_v2f64_22:
; SSE3: # BB#0:
-; SSE3-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0,0]
-; SSE3-NEXT: movapd %xmm1, %xmm0
+; SSE3-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2f64_22:
; SSSE3: # BB#0:
-; SSSE3-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0,0]
-; SSSE3-NEXT: movapd %xmm1, %xmm0
+; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2f64_22:
; SSE41: # BB#0:
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0,0]
-; SSE41-NEXT: movapd %xmm1, %xmm0
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v2f64_22:
; AVX: # BB#0:
-; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0,0]
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 2, i32 2>
ret <2 x double> %shuffle
@@ -214,20 +211,20 @@ define <2 x double> @shuffle_v2f64_33(<2 x double> %a, <2 x double> %b) {
define <2 x double> @shuffle_v2f64_03(<2 x double> %a, <2 x double> %b) {
; SSE2-LABEL: shuffle_v2f64_03:
; SSE2: # BB#0:
-; SSE2-NEXT: movsd %xmm0, %xmm1
-; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2f64_03:
; SSE3: # BB#0:
-; SSE3-NEXT: movsd %xmm0, %xmm1
-; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2f64_03:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movsd %xmm0, %xmm1
-; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2f64_03:
@@ -245,17 +242,17 @@ define <2 x double> @shuffle_v2f64_03(<2 x double> %a, <2 x double> %b) {
define <2 x double> @shuffle_v2f64_21(<2 x double> %a, <2 x double> %b) {
; SSE2-LABEL: shuffle_v2f64_21:
; SSE2: # BB#0:
-; SSE2-NEXT: movsd %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2f64_21:
; SSE3: # BB#0:
-; SSE3-NEXT: movsd %xmm1, %xmm0
+; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2f64_21:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movsd %xmm1, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2f64_21:
@@ -302,20 +299,20 @@ define <2 x i64> @shuffle_v2i64_02_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64
define <2 x i64> @shuffle_v2i64_03(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: shuffle_v2i64_03:
; SSE2: # BB#0:
-; SSE2-NEXT: movsd %xmm0, %xmm1
-; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_03:
; SSE3: # BB#0:
-; SSE3-NEXT: movsd %xmm0, %xmm1
-; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_03:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movsd %xmm0, %xmm1
-; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_03:
@@ -338,20 +335,20 @@ define <2 x i64> @shuffle_v2i64_03(<2 x i64> %a, <2 x i64> %b) {
define <2 x i64> @shuffle_v2i64_03_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: shuffle_v2i64_03_copy:
; SSE2: # BB#0:
-; SSE2-NEXT: movsd %xmm1, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
+; SSE2-NEXT: movapd %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_03_copy:
; SSE3: # BB#0:
-; SSE3-NEXT: movsd %xmm1, %xmm2
-; SSE3-NEXT: movaps %xmm2, %xmm0
+; SSE3-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
+; SSE3-NEXT: movapd %xmm2, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_03_copy:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movsd %xmm1, %xmm2
-; SSSE3-NEXT: movaps %xmm2, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
+; SSSE3-NEXT: movapd %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_03_copy:
@@ -492,17 +489,17 @@ define <2 x i64> @shuffle_v2i64_20_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64
define <2 x i64> @shuffle_v2i64_21(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: shuffle_v2i64_21:
; SSE2: # BB#0:
-; SSE2-NEXT: movsd %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_21:
; SSE3: # BB#0:
-; SSE3-NEXT: movsd %xmm1, %xmm0
+; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_21:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movsd %xmm1, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_21:
@@ -525,20 +522,20 @@ define <2 x i64> @shuffle_v2i64_21(<2 x i64> %a, <2 x i64> %b) {
define <2 x i64> @shuffle_v2i64_21_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: shuffle_v2i64_21_copy:
; SSE2: # BB#0:
-; SSE2-NEXT: movsd %xmm2, %xmm1
-; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_21_copy:
; SSE3: # BB#0:
-; SSE3-NEXT: movsd %xmm2, %xmm1
-; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_21_copy:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movsd %xmm2, %xmm1
-; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_21_copy:
@@ -653,12 +650,12 @@ define <2 x i64> @shuffle_v2i64_31_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64
define <2 x i64> @shuffle_v2i64_0z(<2 x i64> %a) {
; SSE-LABEL: shuffle_v2i64_0z:
; SSE: # BB#0:
-; SSE-NEXT: movq %xmm0, %xmm0
+; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_0z:
; AVX: # BB#0:
-; AVX-NEXT: vmovq %xmm0, %xmm0
+; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 3>
ret <2 x i64> %shuffle
@@ -667,14 +664,12 @@ define <2 x i64> @shuffle_v2i64_0z(<2 x i64> %a) {
define <2 x i64> @shuffle_v2i64_1z(<2 x i64> %a) {
; SSE-LABEL: shuffle_v2i64_1z:
; SSE: # BB#0:
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_1z:
; AVX: # BB#0:
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32> <i32 1, i32 3>
ret <2 x i64> %shuffle
@@ -683,14 +678,12 @@ define <2 x i64> @shuffle_v2i64_1z(<2 x i64> %a) {
define <2 x i64> @shuffle_v2i64_z0(<2 x i64> %a) {
; SSE-LABEL: shuffle_v2i64_z0:
; SSE: # BB#0:
-; SSE-NEXT: movq %xmm0, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2i64_z0:
; AVX: # BB#0:
-; AVX-NEXT: vmovq %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32> <i32 2, i32 0>
ret <2 x i64> %shuffle
@@ -699,20 +692,20 @@ define <2 x i64> @shuffle_v2i64_z0(<2 x i64> %a) {
define <2 x i64> @shuffle_v2i64_z1(<2 x i64> %a) {
; SSE2-LABEL: shuffle_v2i64_z1:
; SSE2: # BB#0:
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: movsd %xmm1, %xmm0
+; SSE2-NEXT: xorpd %xmm1, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2i64_z1:
; SSE3: # BB#0:
-; SSE3-NEXT: xorps %xmm1, %xmm1
-; SSE3-NEXT: movsd %xmm1, %xmm0
+; SSE3-NEXT: xorpd %xmm1, %xmm1
+; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2i64_z1:
; SSSE3: # BB#0:
-; SSSE3-NEXT: xorps %xmm1, %xmm1
-; SSSE3-NEXT: movsd %xmm1, %xmm0
+; SSSE3-NEXT: xorpd %xmm1, %xmm1
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2i64_z1:
@@ -739,12 +732,12 @@ define <2 x i64> @shuffle_v2i64_z1(<2 x i64> %a) {
define <2 x double> @shuffle_v2f64_0z(<2 x double> %a) {
; SSE-LABEL: shuffle_v2f64_0z:
; SSE: # BB#0:
-; SSE-NEXT: movq %xmm0, %xmm0
+; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v2f64_0z:
; AVX: # BB#0:
-; AVX-NEXT: vmovq %xmm0, %xmm0
+; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <2 x i32> <i32 0, i32 3>
ret <2 x double> %shuffle
@@ -786,20 +779,20 @@ define <2 x double> @shuffle_v2f64_z0(<2 x double> %a) {
define <2 x double> @shuffle_v2f64_z1(<2 x double> %a) {
; SSE2-LABEL: shuffle_v2f64_z1:
; SSE2: # BB#0:
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: movsd %xmm1, %xmm0
+; SSE2-NEXT: xorpd %xmm1, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v2f64_z1:
; SSE3: # BB#0:
-; SSE3-NEXT: xorps %xmm1, %xmm1
-; SSE3-NEXT: movsd %xmm1, %xmm0
+; SSE3-NEXT: xorpd %xmm1, %xmm1
+; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v2f64_z1:
; SSSE3: # BB#0:
-; SSSE3-NEXT: xorps %xmm1, %xmm1
-; SSSE3-NEXT: movsd %xmm1, %xmm0
+; SSSE3-NEXT: xorpd %xmm1, %xmm1
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v2f64_z1:
@@ -835,12 +828,12 @@ define <2 x i64> @insert_reg_and_zero_v2i64(i64 %a) {
define <2 x i64> @insert_mem_and_zero_v2i64(i64* %ptr) {
; SSE-LABEL: insert_mem_and_zero_v2i64:
; SSE: # BB#0:
-; SSE-NEXT: movq (%rdi), %xmm0
+; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_and_zero_v2i64:
; AVX: # BB#0:
-; AVX-NEXT: vmovq (%rdi), %xmm0
+; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
%a = load i64* %ptr
%v = insertelement <2 x i64> undef, i64 %a, i32 0
@@ -851,12 +844,12 @@ define <2 x i64> @insert_mem_and_zero_v2i64(i64* %ptr) {
define <2 x double> @insert_reg_and_zero_v2f64(double %a) {
; SSE-LABEL: insert_reg_and_zero_v2f64:
; SSE: # BB#0:
-; SSE-NEXT: movq %xmm0, %xmm0
+; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: insert_reg_and_zero_v2f64:
; AVX: # BB#0:
-; AVX-NEXT: vmovq %xmm0, %xmm0
+; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX-NEXT: retq
%v = insertelement <2 x double> undef, double %a, i32 0
%shuffle = shufflevector <2 x double> %v, <2 x double> zeroinitializer, <2 x i32> <i32 0, i32 3>
@@ -866,12 +859,12 @@ define <2 x double> @insert_reg_and_zero_v2f64(double %a) {
define <2 x double> @insert_mem_and_zero_v2f64(double* %ptr) {
; SSE-LABEL: insert_mem_and_zero_v2f64:
; SSE: # BB#0:
-; SSE-NEXT: movsd (%rdi), %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_and_zero_v2f64:
; AVX: # BB#0:
-; AVX-NEXT: vmovsd (%rdi), %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
%a = load double* %ptr
%v = insertelement <2 x double> undef, double %a, i32 0
@@ -883,19 +876,19 @@ define <2 x i64> @insert_reg_lo_v2i64(i64 %a, <2 x i64> %b) {
; SSE2-LABEL: insert_reg_lo_v2i64:
; SSE2: # BB#0:
; SSE2-NEXT: movd %rdi, %xmm1
-; SSE2-NEXT: movsd %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_reg_lo_v2i64:
; SSE3: # BB#0:
; SSE3-NEXT: movd %rdi, %xmm1
-; SSE3-NEXT: movsd %xmm1, %xmm0
+; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_reg_lo_v2i64:
; SSSE3: # BB#0:
; SSSE3-NEXT: movd %rdi, %xmm1
-; SSSE3-NEXT: movsd %xmm1, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_reg_lo_v2i64:
@@ -938,19 +931,19 @@ define <2 x i64> @insert_mem_lo_v2i64(i64* %ptr, <2 x i64> %b) {
;
; SSE41-LABEL: insert_mem_lo_v2i64:
; SSE41: # BB#0:
-; SSE41-NEXT: movq (%rdi), %xmm1
+; SSE41-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_mem_lo_v2i64:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovq (%rdi), %xmm1
+; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_mem_lo_v2i64:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovq (%rdi), %xmm1
+; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: retq
%a = load i64* %ptr
@@ -979,13 +972,13 @@ define <2 x i64> @insert_reg_hi_v2i64(i64 %a, <2 x i64> %b) {
define <2 x i64> @insert_mem_hi_v2i64(i64* %ptr, <2 x i64> %b) {
; SSE-LABEL: insert_mem_hi_v2i64:
; SSE: # BB#0:
-; SSE-NEXT: movq (%rdi), %xmm1
+; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_hi_v2i64:
; AVX: # BB#0:
-; AVX-NEXT: vmovq (%rdi), %xmm1
+; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%a = load i64* %ptr
@@ -997,13 +990,13 @@ define <2 x i64> @insert_mem_hi_v2i64(i64* %ptr, <2 x i64> %b) {
define <2 x double> @insert_reg_lo_v2f64(double %a, <2 x double> %b) {
; SSE-LABEL: insert_reg_lo_v2f64:
; SSE: # BB#0:
-; SSE-NEXT: movsd %xmm0, %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_reg_lo_v2f64:
; AVX: # BB#0:
-; AVX-NEXT: vmovsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: retq
%v = insertelement <2 x double> undef, double %a, i32 0
%shuffle = shufflevector <2 x double> %v, <2 x double> %b, <2 x i32> <i32 0, i32 3>
@@ -1068,22 +1061,22 @@ define <2 x double> @insert_dup_reg_v2f64(double %a) {
;
; SSE3-LABEL: insert_dup_reg_v2f64:
; SSE3: # BB#0:
-; SSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_dup_reg_v2f64:
; SSSE3: # BB#0:
-; SSSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_reg_v2f64:
; SSE41: # BB#0:
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_dup_reg_v2f64:
; AVX: # BB#0:
-; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
%v = insertelement <2 x double> undef, double %a, i32 0
%shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 0, i32 0>
@@ -1092,28 +1085,28 @@ define <2 x double> @insert_dup_reg_v2f64(double %a) {
define <2 x double> @insert_dup_mem_v2f64(double* %ptr) {
; SSE2-LABEL: insert_dup_mem_v2f64:
; SSE2: # BB#0:
-; SSE2-NEXT: movsd (%rdi), %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_dup_mem_v2f64:
; SSE3: # BB#0:
-; SSE3-NEXT: movddup (%rdi), %xmm0
+; SSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_dup_mem_v2f64:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movddup (%rdi), %xmm0
+; SSSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_dup_mem_v2f64:
; SSE41: # BB#0:
-; SSE41-NEXT: movddup (%rdi), %xmm0
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_dup_mem_v2f64:
; AVX: # BB#0:
-; AVX-NEXT: vmovddup (%rdi), %xmm0
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX-NEXT: retq
%a = load double* %ptr
%v = insertelement <2 x double> undef, double %a, i32 0
diff --git a/test/CodeGen/X86/vector-shuffle-128-v4.ll b/test/CodeGen/X86/vector-shuffle-128-v4.ll
index 833b822..a684e5e 100644
--- a/test/CodeGen/X86/vector-shuffle-128-v4.ll
+++ b/test/CodeGen/X86/vector-shuffle-128-v4.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
@@ -322,60 +322,150 @@ define <4 x i32> @shuffle_v4i32_0124(<4 x i32> %a, <4 x i32> %b) {
;
; SSE41-LABEL: shuffle_v4i32_0124:
; SSE41: # BB#0:
-; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; SSE41-NEXT: retq
;
-; AVX-LABEL: shuffle_v4i32_0124:
-; AVX: # BB#0:
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
-; AVX-NEXT: retq
+; AVX1-LABEL: shuffle_v4i32_0124:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i32_0124:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
ret <4 x i32> %shuffle
}
define <4 x i32> @shuffle_v4i32_0142(<4 x i32> %a, <4 x i32> %b) {
-; SSE-LABEL: shuffle_v4i32_0142:
-; SSE: # BB#0:
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
-; SSE-NEXT: retq
+; SSE2-LABEL: shuffle_v4i32_0142:
+; SSE2: # BB#0:
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
+; SSE2-NEXT: retq
;
-; AVX-LABEL: shuffle_v4i32_0142:
-; AVX: # BB#0:
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
-; AVX-NEXT: retq
+; SSE3-LABEL: shuffle_v4i32_0142:
+; SSE3: # BB#0:
+; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4i32_0142:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4i32_0142:
+; SSE41: # BB#0:
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuffle_v4i32_0142:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i32_0142:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
+; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 2>
ret <4 x i32> %shuffle
}
define <4 x i32> @shuffle_v4i32_0412(<4 x i32> %a, <4 x i32> %b) {
-; SSE-LABEL: shuffle_v4i32_0412:
-; SSE: # BB#0:
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,2]
-; SSE-NEXT: movaps %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: shuffle_v4i32_0412:
+; SSE2: # BB#0:
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,2]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
;
-; AVX-LABEL: shuffle_v4i32_0412:
-; AVX: # BB#0:
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[2,0],xmm0[1,2]
-; AVX-NEXT: retq
+; SSE3-LABEL: shuffle_v4i32_0412:
+; SSE3: # BB#0:
+; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
+; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,2]
+; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4i32_0412:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,2]
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4i32_0412:
+; SSE41: # BB#0:
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,2]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuffle_v4i32_0412:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,2]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i32_0412:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,2]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 2>
ret <4 x i32> %shuffle
}
define <4 x i32> @shuffle_v4i32_4012(<4 x i32> %a, <4 x i32> %b) {
-; SSE-LABEL: shuffle_v4i32_4012:
-; SSE: # BB#0:
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,2]
-; SSE-NEXT: movaps %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: shuffle_v4i32_4012:
+; SSE2: # BB#0:
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,2]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
;
-; AVX-LABEL: shuffle_v4i32_4012:
-; AVX: # BB#0:
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[1,2]
-; AVX-NEXT: retq
+; SSE3-LABEL: shuffle_v4i32_4012:
+; SSE3: # BB#0:
+; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
+; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,2]
+; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4i32_4012:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,2]
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4i32_4012:
+; SSE41: # BB#0:
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,2]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuffle_v4i32_4012:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,2]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i32_4012:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,2]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
ret <4 x i32> %shuffle
}
@@ -393,17 +483,44 @@ define <4 x i32> @shuffle_v4i32_0145(<4 x i32> %a, <4 x i32> %b) {
ret <4 x i32> %shuffle
}
define <4 x i32> @shuffle_v4i32_0451(<4 x i32> %a, <4 x i32> %b) {
-; SSE-LABEL: shuffle_v4i32_0451:
-; SSE: # BB#0:
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1]
-; SSE-NEXT: retq
+; SSE2-LABEL: shuffle_v4i32_0451:
+; SSE2: # BB#0:
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
+; SSE2-NEXT: retq
;
-; AVX-LABEL: shuffle_v4i32_0451:
-; AVX: # BB#0:
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,3,1]
-; AVX-NEXT: retq
+; SSE3-LABEL: shuffle_v4i32_0451:
+; SSE3: # BB#0:
+; SSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4i32_0451:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4i32_0451:
+; SSE41: # BB#0:
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuffle_v4i32_0451:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i32_0451:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 5, i32 1>
ret <4 x i32> %shuffle
}
@@ -422,17 +539,44 @@ define <4 x i32> @shuffle_v4i32_4501(<4 x i32> %a, <4 x i32> %b) {
ret <4 x i32> %shuffle
}
define <4 x i32> @shuffle_v4i32_4015(<4 x i32> %a, <4 x i32> %b) {
-; SSE-LABEL: shuffle_v4i32_4015:
-; SSE: # BB#0:
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
-; SSE-NEXT: retq
+; SSE2-LABEL: shuffle_v4i32_4015:
+; SSE2: # BB#0:
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
+; SSE2-NEXT: retq
;
-; AVX-LABEL: shuffle_v4i32_4015:
-; AVX: # BB#0:
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
-; AVX-NEXT: retq
+; SSE3-LABEL: shuffle_v4i32_4015:
+; SSE3: # BB#0:
+; SSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4i32_4015:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4i32_4015:
+; SSE41: # BB#0:
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuffle_v4i32_4015:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i32_4015:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
+; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 5>
ret <4 x i32> %shuffle
}
@@ -441,21 +585,21 @@ define <4 x float> @shuffle_v4f32_4zzz(<4 x float> %a) {
; SSE2-LABEL: shuffle_v4f32_4zzz:
; SSE2: # BB#0:
; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_4zzz:
; SSE3: # BB#0:
; SSE3-NEXT: xorps %xmm1, %xmm1
-; SSE3-NEXT: movss %xmm0, %xmm1
+; SSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_4zzz:
; SSSE3: # BB#0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
-; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -478,22 +622,22 @@ define <4 x float> @shuffle_v4f32_z4zz(<4 x float> %a) {
; SSE2-LABEL: shuffle_v4f32_z4zz:
; SSE2: # BB#0:
; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[3,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_z4zz:
; SSE3: # BB#0:
; SSE3-NEXT: xorps %xmm1, %xmm1
-; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
-; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[3,0]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_z4zz:
; SSSE3: # BB#0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[3,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4f32_z4zz:
@@ -513,24 +657,24 @@ define <4 x float> @shuffle_v4f32_zz4z(<4 x float> %a) {
; SSE2-LABEL: shuffle_v4f32_zz4z:
; SSE2: # BB#0:
; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,2]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4f32_zz4z:
; SSE3: # BB#0:
; SSE3-NEXT: xorps %xmm1, %xmm1
-; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
-; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,2]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
+; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4f32_zz4z:
; SSSE3: # BB#0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
-; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,2]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -657,38 +801,204 @@ define <4 x float> @shuffle_v4f32_z6zz(<4 x float> %a) {
ret <4 x float> %shuffle
}
+define <4 x float> @shuffle_v4f32_0z23(<4 x float> %a) {
+; SSE2-LABEL: shuffle_v4f32_0z23:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_v4f32_0z23:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorps %xmm1, %xmm1
+; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
+; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
+; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4f32_0z23:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4f32_0z23:
+; SSE41: # BB#0:
+; SSE41-NEXT: xorps %xmm1, %xmm1
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v4f32_0z23:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 4, i32 2, i32 3>
+ ret <4 x float> %shuffle
+}
+
+define <4 x float> @shuffle_v4f32_01z3(<4 x float> %a) {
+; SSE2-LABEL: shuffle_v4f32_01z3:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_v4f32_01z3:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorps %xmm1, %xmm1
+; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4f32_01z3:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4f32_01z3:
+; SSE41: # BB#0:
+; SSE41-NEXT: xorps %xmm1, %xmm1
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v4f32_01z3:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 3>
+ ret <4 x float> %shuffle
+}
+
+define <4 x float> @shuffle_v4f32_012z(<4 x float> %a) {
+; SSE2-LABEL: shuffle_v4f32_012z:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[2,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_v4f32_012z:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorps %xmm1, %xmm1
+; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[2,0]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4f32_012z:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[2,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4f32_012z:
+; SSE41: # BB#0:
+; SSE41-NEXT: xorps %xmm1, %xmm1
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v4f32_012z:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
+ ret <4 x float> %shuffle
+}
+
+define <4 x float> @shuffle_v4f32_0zz3(<4 x float> %a) {
+; SSE2-LABEL: shuffle_v4f32_0zz3:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[1,2]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_v4f32_0zz3:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorps %xmm1, %xmm1
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[1,2]
+; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4f32_0zz3:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[1,2]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4f32_0zz3:
+; SSE41: # BB#0:
+; SSE41-NEXT: xorps %xmm1, %xmm1
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v4f32_0zz3:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 4, i32 4, i32 3>
+ ret <4 x float> %shuffle
+}
+
+define <4 x float> @shuffle_v4f32_u051(<4 x float> %a, <4 x float> %b) {
+; SSE-LABEL: shuffle_v4f32_u051:
+; SSE: # BB#0:
+; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v4f32_u051:
+; AVX: # BB#0:
+; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 undef, i32 0, i32 5, i32 1>
+ ret <4 x float> %shuffle
+}
+
define <4 x i32> @shuffle_v4i32_4zzz(<4 x i32> %a) {
; SSE2-LABEL: shuffle_v4i32_4zzz:
; SSE2: # BB#0:
; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_4zzz:
; SSE3: # BB#0:
; SSE3-NEXT: xorps %xmm1, %xmm1
-; SSE3-NEXT: movss %xmm0, %xmm1
+; SSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_4zzz:
; SSSE3: # BB#0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
-; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_4zzz:
; SSE41: # BB#0:
-; SSE41-NEXT: xorps %xmm1, %xmm1
-; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_4zzz:
; AVX: # BB#0:
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
ret <4 x i32> %shuffle
@@ -698,35 +1008,35 @@ define <4 x i32> @shuffle_v4i32_z4zz(<4 x i32> %a) {
; SSE2-LABEL: shuffle_v4i32_z4zz:
; SSE2: # BB#0:
; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_z4zz:
; SSE3: # BB#0:
; SSE3-NEXT: xorps %xmm1, %xmm1
-; SSE3-NEXT: movss %xmm0, %xmm1
+; SSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_z4zz:
; SSSE3: # BB#0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
-; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_z4zz:
; SSE41: # BB#0:
-; SSE41-NEXT: xorps %xmm1, %xmm1
-; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_z4zz:
; AVX: # BB#0:
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 2, i32 4, i32 3, i32 0>
@@ -737,35 +1047,35 @@ define <4 x i32> @shuffle_v4i32_zz4z(<4 x i32> %a) {
; SSE2-LABEL: shuffle_v4i32_zz4z:
; SSE2: # BB#0:
; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_v4i32_zz4z:
; SSE3: # BB#0:
; SSE3-NEXT: xorps %xmm1, %xmm1
-; SSE3-NEXT: movss %xmm0, %xmm1
+; SSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_v4i32_zz4z:
; SSSE3: # BB#0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
-; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v4i32_zz4z:
; SSE41: # BB#0:
-; SSE41-NEXT: xorps %xmm1, %xmm1
-; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_zz4z:
; AVX: # BB#0:
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 0, i32 0, i32 4, i32 0>
@@ -773,39 +1083,14 @@ define <4 x i32> @shuffle_v4i32_zz4z(<4 x i32> %a) {
}
define <4 x i32> @shuffle_v4i32_zuu4(<4 x i32> %a) {
-; SSE2-LABEL: shuffle_v4i32_zuu4:
-; SSE2: # BB#0:
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: movss %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]
-; SSE2-NEXT: retq
-;
-; SSE3-LABEL: shuffle_v4i32_zuu4:
-; SSE3: # BB#0:
-; SSE3-NEXT: xorps %xmm1, %xmm1
-; SSE3-NEXT: movss %xmm0, %xmm1
-; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]
-; SSE3-NEXT: retq
-;
-; SSSE3-LABEL: shuffle_v4i32_zuu4:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: xorps %xmm1, %xmm1
-; SSSE3-NEXT: movss %xmm0, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: shuffle_v4i32_zuu4:
-; SSE41: # BB#0:
-; SSE41-NEXT: xorps %xmm1, %xmm1
-; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]
-; SSE41-NEXT: retq
+; SSE-LABEL: shuffle_v4i32_zuu4:
+; SSE: # BB#0:
+; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
+; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_zuu4:
; AVX: # BB#0:
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,0]
+; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 0, i32 undef, i32 undef, i32 4>
ret <4 x i32> %shuffle
@@ -835,13 +1120,24 @@ define <4 x i32> @shuffle_v4i32_z6zz(<4 x i32> %a) {
;
; SSE41-LABEL: shuffle_v4i32_z6zz:
; SSE41: # BB#0:
-; SSE41-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[2],zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
+; SSE41-NEXT: pxor %xmm0, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
-; AVX-LABEL: shuffle_v4i32_z6zz:
-; AVX: # BB#0:
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm0[2],zero,zero
-; AVX-NEXT: retq
+; AVX1-LABEL: shuffle_v4i32_z6zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i32_z6zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 0, i32 6, i32 2, i32 3>
ret <4 x i32> %shuffle
}
@@ -1007,6 +1303,21 @@ define <4 x i32> @shuffle_v4i32_2345(<4 x i32> %a, <4 x i32> %b) {
ret <4 x i32> %shuffle
}
+define <4 x i32> @shuffle_v4i32_40u1(<4 x i32> %a, <4 x i32> %b) {
+; SSE-LABEL: shuffle_v4i32_40u1:
+; SSE: # BB#0:
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v4i32_40u1:
+; AVX: # BB#0:
+; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 0, i32 undef, i32 1>
+ ret <4 x i32> %shuffle
+}
+
define <4 x i32> @shuffle_v4i32_3456(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: shuffle_v4i32_3456:
; SSE2: # BB#0:
@@ -1058,12 +1369,12 @@ define <4 x i32> @shuffle_v4i32_0u1u(<4 x i32> %a, <4 x i32> %b) {
;
; SSE41-LABEL: shuffle_v4i32_0u1u:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxdq %xmm0, %xmm0
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_0u1u:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxdq %xmm0, %xmm0
+; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 undef, i32 1, i32 undef>
ret <4 x i32> %shuffle
@@ -1090,17 +1401,179 @@ define <4 x i32> @shuffle_v4i32_0z1z(<4 x i32> %a) {
;
; SSE41-LABEL: shuffle_v4i32_0z1z:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxdq %xmm0, %xmm0
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_0z1z:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxdq %xmm0, %xmm0
+; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
ret <4 x i32> %shuffle
}
+define <4 x i32> @shuffle_v4i32_01zu(<4 x i32> %a) {
+; SSE-LABEL: shuffle_v4i32_01zu:
+; SSE: # BB#0:
+; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v4i32_01zu:
+; AVX: # BB#0:
+; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 7, i32 undef>
+ ret <4 x i32> %shuffle
+}
+
+define <4 x i32> @shuffle_v4i32_0z23(<4 x i32> %a) {
+; SSE2-LABEL: shuffle_v4i32_0z23:
+; SSE2: # BB#0:
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_v4i32_0z23:
+; SSE3: # BB#0:
+; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4i32_0z23:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4i32_0z23:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuffle_v4i32_0z23:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i32_0z23:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 4, i32 2, i32 3>
+ ret <4 x i32> %shuffle
+}
+
+define <4 x i32> @shuffle_v4i32_01z3(<4 x i32> %a) {
+; SSE2-LABEL: shuffle_v4i32_01z3:
+; SSE2: # BB#0:
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_v4i32_01z3:
+; SSE3: # BB#0:
+; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4i32_01z3:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4i32_01z3:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuffle_v4i32_01z3:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i32_01z3:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 3>
+ ret <4 x i32> %shuffle
+}
+
+define <4 x i32> @shuffle_v4i32_012z(<4 x i32> %a) {
+; SSE2-LABEL: shuffle_v4i32_012z:
+; SSE2: # BB#0:
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_v4i32_012z:
+; SSE3: # BB#0:
+; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4i32_012z:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4i32_012z:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuffle_v4i32_012z:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i32_012z:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
+ ret <4 x i32> %shuffle
+}
+
+define <4 x i32> @shuffle_v4i32_0zz3(<4 x i32> %a) {
+; SSE2-LABEL: shuffle_v4i32_0zz3:
+; SSE2: # BB#0:
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_v4i32_0zz3:
+; SSE3: # BB#0:
+; SSE3-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4i32_0zz3:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4i32_0zz3:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuffle_v4i32_0zz3:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i32_0zz3:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 4, i32 4, i32 3>
+ ret <4 x i32> %shuffle
+}
+
define <4 x i32> @insert_reg_and_zero_v4i32(i32 %a) {
; SSE-LABEL: insert_reg_and_zero_v4i32:
; SSE: # BB#0:
@@ -1119,12 +1592,12 @@ define <4 x i32> @insert_reg_and_zero_v4i32(i32 %a) {
define <4 x i32> @insert_mem_and_zero_v4i32(i32* %ptr) {
; SSE-LABEL: insert_mem_and_zero_v4i32:
; SSE: # BB#0:
-; SSE-NEXT: movd (%rdi), %xmm0
+; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_and_zero_v4i32:
; AVX: # BB#0:
-; AVX-NEXT: vmovd (%rdi), %xmm0
+; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
%a = load i32* %ptr
%v = insertelement <4 x i32> undef, i32 %a, i32 0
@@ -1136,21 +1609,21 @@ define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
; SSE2-LABEL: insert_reg_and_zero_v4f32:
; SSE2: # BB#0:
; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_reg_and_zero_v4f32:
; SSE3: # BB#0:
; SSE3-NEXT: xorps %xmm1, %xmm1
-; SSE3-NEXT: movss %xmm0, %xmm1
+; SSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE3-NEXT: movaps %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_reg_and_zero_v4f32:
; SSSE3: # BB#0:
; SSSE3-NEXT: xorps %xmm1, %xmm1
-; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -1163,7 +1636,7 @@ define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
; AVX-LABEL: insert_reg_and_zero_v4f32:
; AVX: # BB#0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: retq
%v = insertelement <4 x float> undef, float %a, i32 0
%shuffle = shufflevector <4 x float> %v, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
@@ -1173,12 +1646,12 @@ define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
define <4 x float> @insert_mem_and_zero_v4f32(float* %ptr) {
; SSE-LABEL: insert_mem_and_zero_v4f32:
; SSE: # BB#0:
-; SSE-NEXT: movss (%rdi), %xmm0
+; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_and_zero_v4f32:
; AVX: # BB#0:
-; AVX-NEXT: vmovss (%rdi), %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
%a = load float* %ptr
%v = insertelement <4 x float> undef, float %a, i32 0
@@ -1190,19 +1663,19 @@ define <4 x i32> @insert_reg_lo_v4i32(i64 %a, <4 x i32> %b) {
; SSE2-LABEL: insert_reg_lo_v4i32:
; SSE2: # BB#0:
; SSE2-NEXT: movd %rdi, %xmm1
-; SSE2-NEXT: movsd %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_reg_lo_v4i32:
; SSE3: # BB#0:
; SSE3-NEXT: movd %rdi, %xmm1
-; SSE3-NEXT: movsd %xmm1, %xmm0
+; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_reg_lo_v4i32:
; SSSE3: # BB#0:
; SSSE3-NEXT: movd %rdi, %xmm1
-; SSSE3-NEXT: movsd %xmm1, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_reg_lo_v4i32:
@@ -1246,19 +1719,19 @@ define <4 x i32> @insert_mem_lo_v4i32(<2 x i32>* %ptr, <4 x i32> %b) {
;
; SSE41-LABEL: insert_mem_lo_v4i32:
; SSE41: # BB#0:
-; SSE41-NEXT: movq (%rdi), %xmm1
+; SSE41-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_mem_lo_v4i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovq (%rdi), %xmm1
+; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_mem_lo_v4i32:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovq (%rdi), %xmm1
+; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: retq
%a = load <2 x i32>* %ptr
@@ -1288,13 +1761,13 @@ define <4 x i32> @insert_reg_hi_v4i32(i64 %a, <4 x i32> %b) {
define <4 x i32> @insert_mem_hi_v4i32(<2 x i32>* %ptr, <4 x i32> %b) {
; SSE-LABEL: insert_mem_hi_v4i32:
; SSE: # BB#0:
-; SSE-NEXT: movq (%rdi), %xmm1
+; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: insert_mem_hi_v4i32:
; AVX: # BB#0:
-; AVX-NEXT: vmovq (%rdi), %xmm1
+; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%a = load <2 x i32>* %ptr
@@ -1306,13 +1779,13 @@ define <4 x i32> @insert_mem_hi_v4i32(<2 x i32>* %ptr, <4 x i32> %b) {
define <4 x float> @insert_reg_lo_v4f32(double %a, <4 x float> %b) {
; SSE-LABEL: insert_reg_lo_v4f32:
; SSE: # BB#0:
-; SSE-NEXT: movsd %xmm0, %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: insert_reg_lo_v4f32:
; AVX: # BB#0:
-; AVX-NEXT: vmovsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: retq
%a.cast = bitcast double %a to <2 x float>
%v = shufflevector <2 x float> %a.cast, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -1384,3 +1857,35 @@ define <4 x float> @shuffle_mem_v4f32_3210(<4 x float>* %ptr) {
%shuffle = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
ret <4 x float> %shuffle
}
+
+;
+; Shuffle to logical bit shifts
+;
+
+define <4 x i32> @shuffle_v4i32_z0zX(<4 x i32> %a) {
+; SSE-LABEL: shuffle_v4i32_z0zX:
+; SSE: # BB#0:
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v4i32_z0zX:
+; AVX: # BB#0:
+; AVX-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 4, i32 0, i32 4, i32 undef>
+ ret <4 x i32> %shuffle
+}
+
+define <4 x i32> @shuffle_v4i32_1z3z(<4 x i32> %a) {
+; SSE-LABEL: shuffle_v4i32_1z3z:
+; SSE: # BB#0:
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v4i32_1z3z:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 4, i32 3, i32 4>
+ ret <4 x i32> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-128-v8.ll b/test/CodeGen/X86/vector-shuffle-128-v8.ll
index 59af434..eb77c38 100644
--- a/test/CodeGen/X86/vector-shuffle-128-v8.ll
+++ b/test/CodeGen/X86/vector-shuffle-128-v8.ll
@@ -1,8 +1,8 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
@@ -952,20 +952,15 @@ define <8 x i16> @shuffle_v8i16_109832ba(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: shuffle_v8i16_109832ba:
; SSE: # BB#0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[2,0,3,1,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,0,3,1,4,5,6,7]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,7,5]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_109832ba:
; AVX: # BB#0:
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[2,0,3,1,4,5,6,7]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,0,3,1,4,5,6,7]
-; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,7,5]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 0, i32 9, i32 8, i32 3, i32 2, i32 11, i32 10>
ret <8 x i16> %shuffle
@@ -1023,36 +1018,33 @@ define <8 x i16> @shuffle_v8i16_0213cedf(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_443aXXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_443aXXXX:
; SSE2: # BB#0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,65535,65535,65535]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pandn %xmm1, %xmm2
+; SSE2-NEXT: por %xmm0, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,1,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_443aXXXX:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,12,13,10,11,12,13,10,11,12,13,14,15]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[4,5,u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,6,7],zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_443aXXXX:
; SSE41: # BB#0:
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,12,13,10,11,12,13,10,11,12,13,14,15]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_443aXXXX:
; AVX: # BB#0:
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,12,13,10,11,12,13,10,11,12,13,14,15]
+; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 3, i32 10, i32 undef, i32 undef, i32 undef, i32 undef>
ret <8 x i16> %shuffle
@@ -1061,34 +1053,37 @@ define <8 x i16> @shuffle_v8i16_443aXXXX(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_032dXXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_032dXXXX:
; SSE2: # BB#0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,0]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_032dXXXX:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,12,13,8,9,6,7,8,9,12,13,12,13,14,15]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[10,11,u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5],zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_032dXXXX:
; SSE41: # BB#0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,12,13,8,9,6,7,8,9,12,13,12,13,14,15]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3]
; SSE41-NEXT: retq
;
-; AVX-LABEL: shuffle_v8i16_032dXXXX:
-; AVX: # BB#0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,12,13,8,9,6,7,8,9,12,13,12,13,14,15]
-; AVX-NEXT: retq
+; AVX1-LABEL: shuffle_v8i16_032dXXXX:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i16_032dXXXX:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3]
+; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 3, i32 2, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
ret <8 x i16> %shuffle
}
@@ -1109,33 +1104,30 @@ define <8 x i16> @shuffle_v8i16_XXXdXXXX(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_012dXXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_012dXXXX:
; SSE2: # BB#0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,0,3,4,5,6,7]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,65535,65535,65535]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; SSE2-NEXT: pandn %xmm1, %xmm2
+; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_012dXXXX:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,6,7,8,9,0,1,0,1,2,3]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[10,11,u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_012dXXXX:
; SSE41: # BB#0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,6,7,8,9,0,1,0,1,2,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_012dXXXX:
; AVX: # BB#0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,6,7,8,9,0,1,0,1,2,3]
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7]
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
ret <8 x i16> %shuffle
@@ -1144,41 +1136,37 @@ define <8 x i16> @shuffle_v8i16_012dXXXX(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_XXXXcde3(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_XXXXcde3:
; SSE2: # BB#0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,65535,65535,65535,0]
+; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
+; SSE2-NEXT: pandn %xmm0, %xmm2
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_XXXXcde3:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,14,15]
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u],zero,zero,zero,zero,zero,zero,xmm0[6,7]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,8,9,10,11,12,13],zero,zero
+; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_XXXXcde3:
; SSE41: # BB#0:
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,14,15]
-; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v8i16_XXXXcde3:
; AVX1: # BB#0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,4,5,6,7,0,1,4,5,8,9,14,15]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i16_XXXXcde3:
; AVX2: # BB#0:
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,4,5,6,7,0,1,4,5,8,9,14,15]
+; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 12, i32 13, i32 14, i32 3>
ret <8 x i16> %shuffle
@@ -1187,42 +1175,32 @@ define <8 x i16> @shuffle_v8i16_XXXXcde3(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_cde3XXXX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_cde3XXXX:
; SSE2: # BB#0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,65535,65535,65535]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pandn %xmm0, %xmm2
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_cde3XXXX:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,8,9,14,15,8,9,14,15,12,13,14,15]
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[6,7,u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13],zero,zero,xmm1[u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_cde3XXXX:
; SSE41: # BB#0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,8,9,14,15,8,9,14,15,12,13,14,15]
-; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
-; AVX1-LABEL: shuffle_v8i16_cde3XXXX:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,14,15,8,9,14,15,12,13,14,15]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: shuffle_v8i16_cde3XXXX:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,14,15,8,9,14,15,12,13,14,15]
-; AVX2-NEXT: retq
+; AVX-LABEL: shuffle_v8i16_cde3XXXX:
+; AVX: # BB#0:
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5,6,7]
+; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 12, i32 13, i32 14, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
ret <8 x i16> %shuffle
}
@@ -1230,100 +1208,117 @@ define <8 x i16> @shuffle_v8i16_cde3XXXX(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_012dcde3(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_012dcde3:
; SSE2: # BB#0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,3,2,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,0,3,4,5,6,7]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,0,2,4,5,6,7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_012dcde3:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,8,9,14,15,8,9,14,15,12,13,14,15]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,6,7,8,9,0,1,0,1,2,3]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[10,11,8,9,10,11,12,13],zero,zero
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],zero,zero,zero,zero,zero,zero,zero,zero,xmm0[6,7]
+; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_012dcde3:
; SSE41: # BB#0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
-; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,8,9,14,15,8,9,14,15,12,13,14,15]
-; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,6,7,8,9,0,1,0,1,2,3]
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v8i16_012dcde3:
; AVX1: # BB#0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,4,5,8,9,14,15,8,9,14,15,12,13,14,15]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,6,7,8,9,0,1,0,1,2,3]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i16_012dcde3:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastq %xmm0, %xmm2
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,4,5,8,9,14,15,8,9,14,15,12,13,14,15]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,6,7,8,9,0,1,0,1,2,3]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7]
; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 13, i32 12, i32 13, i32 14, i32 3>
ret <8 x i16> %shuffle
}
+define <8 x i16> @shuffle_v8i16_0923cde7(<8 x i16> %a, <8 x i16> %b) {
+; SSE2-LABEL: shuffle_v8i16_0923cde7:
+; SSE2: # BB#0:
+; SSE2-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,65535,0,0,0,65535]
+; SSE2-NEXT: andps %xmm2, %xmm0
+; SSE2-NEXT: andnps %xmm1, %xmm2
+; SSE2-NEXT: orps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v8i16_0923cde7:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,65535,0,0,0,65535]
+; SSSE3-NEXT: andps %xmm2, %xmm0
+; SSSE3-NEXT: andnps %xmm1, %xmm2
+; SSSE3-NEXT: orps %xmm2, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v8i16_0923cde7:
+; SSE41: # BB#0:
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4,5,6],xmm0[7]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_0923cde7:
+; AVX: # BB#0:
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4,5,6],xmm0[7]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 9, i32 2, i32 3, i32 12, i32 13, i32 14, i32 7>
+ ret <8 x i16> %shuffle
+}
+
define <8 x i16> @shuffle_v8i16_XXX1X579(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_XXX1X579:
; SSE2: # BB#0:
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,2,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,2,0]
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,65535,65535,65535,0]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_XXX1X579:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,10,11,14,15,14,15,10,11,12,13,14,15]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,5,8,9,8,9,12,13,6,7]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u],zero,zero,xmm1[u,u],zero,zero,zero,zero,xmm1[2,3]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,2,3,u,u,10,11,14,15],zero,zero
+; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_XXX1X579:
; SSE41: # BB#0:
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,10,11,14,15,14,15,10,11,12,13,14,15]
-; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,5,8,9,8,9,12,13,6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
+; SSE41-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
; SSE41-NEXT: retq
;
-; AVX-LABEL: shuffle_v8i16_XXX1X579:
-; AVX: # BB#0:
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,10,11,14,15,14,15,10,11,12,13,14,15]
-; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,5,8,9,8,9,12,13,6,7]
-; AVX-NEXT: retq
+; AVX1-LABEL: shuffle_v8i16_XXX1X579:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i16_XXX1X579:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
+; AVX2-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
+; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
+; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 5, i32 7, i32 9>
ret <8 x i16> %shuffle
}
@@ -1331,42 +1326,40 @@ define <8 x i16> @shuffle_v8i16_XXX1X579(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @shuffle_v8i16_XX4X8acX(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: shuffle_v8i16_XX4X8acX:
; SSE2: # BB#0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,1,2,0,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,4,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,4,7]
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: shuffle_v8i16_XX4X8acX:
; SSSE3: # BB#0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,0,1,0,1,4,5,8,9,0,1]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,8,9,u,u],zero,zero,zero,zero,zero,zero,xmm0[u,u]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,u],zero,zero,xmm1[u,u,0,1,4,5,8,9,u,u]
+; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_v8i16_XX4X8acX:
; SSE41: # BB#0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,0,1,0,1,4,5,8,9,0,1]
+; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
-; AVX-LABEL: shuffle_v8i16_XX4X8acX:
-; AVX: # BB#0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,0,1,0,1,4,5,8,9,0,1]
-; AVX-NEXT: retq
+; AVX1-LABEL: shuffle_v8i16_XX4X8acX:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i16_XX4X8acX:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 4, i32 undef, i32 8, i32 10, i32 12, i32 undef>
ret <8 x i16> %shuffle
}
@@ -1429,15 +1422,13 @@ define <8 x i16> @shuffle_v8i16_zzzzz8zz(i16 %i) {
define <8 x i16> @shuffle_v8i16_zuuzuuz8(i16 %i) {
; SSE-LABEL: shuffle_v8i16_zuuzuuz8:
; SSE: # BB#0:
-; SSE-NEXT: movzwl %di, %eax
-; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: movd %edi, %xmm0
; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_zuuzuuz8:
; AVX: # BB#0:
-; AVX-NEXT: movzwl %di, %eax
-; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: vmovd %edi, %xmm0
; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
; AVX-NEXT: retq
%a = insertelement <8 x i16> undef, i16 %i, i32 0
@@ -1571,20 +1562,10 @@ define <8 x i16> @shuffle_v8i16_u6uu123u(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_uuuu123u(<8 x i16> %a, <8 x i16> %b) {
-; SSE2-LABEL: shuffle_v8i16_uuuu123u:
-; SSE2: # BB#0:
-; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: shuffle_v8i16_uuuu123u:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: shuffle_v8i16_uuuu123u:
-; SSE41: # BB#0:
-; SSE41-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9]
-; SSE41-NEXT: retq
+; SSE-LABEL: shuffle_v8i16_uuuu123u:
+; SSE: # BB#0:
+; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9]
+; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_uuuu123u:
; AVX: # BB#0:
@@ -1701,20 +1682,10 @@ define <8 x i16> @shuffle_v8i16_u456uu1u(<8 x i16> %a, <8 x i16> %b) {
}
define <8 x i16> @shuffle_v8i16_u456uuuu(<8 x i16> %a, <8 x i16> %b) {
-; SSE2-LABEL: shuffle_v8i16_u456uuuu:
-; SSE2: # BB#0:
-; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: shuffle_v8i16_u456uuuu:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: shuffle_v8i16_u456uuuu:
-; SSE41: # BB#0:
-; SSE41-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: retq
+; SSE-LABEL: shuffle_v8i16_u456uuuu:
+; SSE: # BB#0:
+; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_u456uuuu:
; AVX: # BB#0:
@@ -1851,12 +1822,12 @@ define <8 x i16> @shuffle_v8i16_0uuu1uuu(<8 x i16> %a) {
;
; SSE41-LABEL: shuffle_v8i16_0uuu1uuu:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxwq %xmm0, %xmm0
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0uuu1uuu:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxwq %xmm0, %xmm0
+; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 undef>
ret <8 x i16> %shuffle
@@ -1879,12 +1850,12 @@ define <8 x i16> @shuffle_v8i16_0zzz1zzz(<8 x i16> %a) {
;
; SSE41-LABEL: shuffle_v8i16_0zzz1zzz:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxwq %xmm0, %xmm0
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0zzz1zzz:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxwq %xmm0, %xmm0
+; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
ret <8 x i16> %shuffle
@@ -1903,12 +1874,12 @@ define <8 x i16> @shuffle_v8i16_0u1u2u3u(<8 x i16> %a) {
;
; SSE41-LABEL: shuffle_v8i16_0u1u2u3u:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxwd %xmm0, %xmm0
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0u1u2u3u:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxwd %xmm0, %xmm0
+; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 undef, i32 1, i32 undef, i32 2, i32 undef, i32 3, i32 undef>
ret <8 x i16> %shuffle
@@ -1929,13 +1900,254 @@ define <8 x i16> @shuffle_v8i16_0z1z2z3z(<8 x i16> %a) {
;
; SSE41-LABEL: shuffle_v8i16_0z1z2z3z:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxwd %xmm0, %xmm0
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v8i16_0z1z2z3z:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxwd %xmm0, %xmm0
+; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 1, i32 11, i32 2, i32 13, i32 3, i32 15>
ret <8 x i16> %shuffle
}
+
+;
+; Shuffle to logical bit shifts
+;
+define <8 x i16> @shuffle_v8i16_z0z2z4z6(<8 x i16> %a) {
+; SSE-LABEL: shuffle_v8i16_z0z2z4z6:
+; SSE: # BB#0:
+; SSE-NEXT: pslld $16, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_z0z2z4z6:
+; AVX: # BB#0:
+; AVX-NEXT: vpslld $16, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 8, i32 0, i32 8, i32 2, i32 8, i32 4, i32 8, i32 6>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_zzz0zzz4(<8 x i16> %a) {
+; SSE-LABEL: shuffle_v8i16_zzz0zzz4:
+; SSE: # BB#0:
+; SSE-NEXT: psllq $48, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_zzz0zzz4:
+; AVX: # BB#0:
+; AVX-NEXT: vpsllq $48, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 8, i32 8, i32 8, i32 0, i32 8, i32 8, i32 8, i32 4>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_zz01zX4X(<8 x i16> %a) {
+; SSE-LABEL: shuffle_v8i16_zz01zX4X:
+; SSE: # BB#0:
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_zz01zX4X:
+; AVX: # BB#0:
+; AVX-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 8, i32 8, i32 0, i32 1, i32 8, i32 undef, i32 4, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_z0X2z456(<8 x i16> %a) {
+; SSE-LABEL: shuffle_v8i16_z0X2z456:
+; SSE: # BB#0:
+; SSE-NEXT: psllq $16, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_z0X2z456:
+; AVX: # BB#0:
+; AVX-NEXT: vpsllq $16, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 8, i32 0, i32 undef, i32 2, i32 8, i32 4, i32 5, i32 6>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_1z3zXz7z(<8 x i16> %a) {
+; SSE-LABEL: shuffle_v8i16_1z3zXz7z:
+; SSE: # BB#0:
+; SSE-NEXT: psrld $16, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_1z3zXz7z:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 1, i32 8, i32 3, i32 8, i32 undef, i32 8, i32 7, i32 8>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_1X3z567z(<8 x i16> %a) {
+; SSE-LABEL: shuffle_v8i16_1X3z567z:
+; SSE: # BB#0:
+; SSE-NEXT: psrlq $16, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_1X3z567z:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlq $16, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 1, i32 undef, i32 3, i32 8, i32 5, i32 6, i32 7, i32 8>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_23zz67zz(<8 x i16> %a) {
+; SSE-LABEL: shuffle_v8i16_23zz67zz:
+; SSE: # BB#0:
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_23zz67zz:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 2, i32 3, i32 8, i32 8, i32 6, i32 7, i32 8, i32 8>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_3zXXXzzz(<8 x i16> %a) {
+; SSE-LABEL: shuffle_v8i16_3zXXXzzz:
+; SSE: # BB#0:
+; SSE-NEXT: psrlq $48, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_3zXXXzzz:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlq $48, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32><i32 3, i32 8, i32 undef, i32 undef, i32 undef, i32 8, i32 8, i32 8>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_01u3zzuz(<8 x i16> %a) {
+; SSE-LABEL: shuffle_v8i16_01u3zzuz:
+; SSE: # BB#0:
+; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_01u3zzuz:
+; AVX: # BB#0:
+; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 undef, i32 3, i32 8, i32 8, i32 undef, i32 8>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0z234567(<8 x i16> %a) {
+; SSE2-LABEL: shuffle_v8i16_0z234567:
+; SSE2: # BB#0:
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v8i16_0z234567:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v8i16_0z234567:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_0z234567:
+; AVX: # BB#0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0zzzz5z7(<8 x i16> %a) {
+; SSE2-LABEL: shuffle_v8i16_0zzzz5z7:
+; SSE2: # BB#0:
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v8i16_0zzzz5z7:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v8i16_0zzzz5z7:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4],xmm0[5],xmm1[6],xmm0[7]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_0zzzz5z7:
+; AVX: # BB#0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4],xmm0[5],xmm1[6],xmm0[7]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 8, i32 8, i32 8, i32 8, i32 5, i32 8, i32 7>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_0123456z(<8 x i16> %a) {
+; SSE2-LABEL: shuffle_v8i16_0123456z:
+; SSE2: # BB#0:
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v8i16_0123456z:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v8i16_0123456z:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6],xmm1[7]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_0123456z:
+; AVX: # BB#0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6],xmm1[7]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 2, i32 3, i32 4, i32 5, i32 6, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_fu3ucc5u(<8 x i16> %a, <8 x i16> %b) {
+; SSE-LABEL: shuffle_v8i16_fu3ucc5u:
+; SSE: # BB#0:
+; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,4,4]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_fu3ucc5u:
+; AVX: # BB#0:
+; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; AVX-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,4,4]
+; AVX-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 15, i32 undef, i32 3, i32 undef, i32 12, i32 12, i32 5, i32 undef>
+ ret <8 x i16> %shuffle
+}
+
+define <8 x i16> @shuffle_v8i16_8012345u(<8 x i16> %a) {
+; SSE-LABEL: shuffle_v8i16_8012345u:
+; SSE: # BB#0:
+; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_v8i16_8012345u:
+; AVX: # BB#0:
+; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; AVX-NEXT: retq
+ %shuffle = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> <i32 8, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 undef>
+
+ ret <8 x i16> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-256-v16.ll b/test/CodeGen/X86/vector-shuffle-256-v16.ll
index 4db0280..d00596d 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v16.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v16.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX1
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX1
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
target triple = "x86_64-unknown-unknown"
@@ -151,9 +151,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_08_00_00_00_00_00_00_00_0
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,1,4,5,6,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,0,1,0,1,0,1,0,1,0,1,2,3]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
@@ -175,9 +173,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_0
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,3,0,4,5,6,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,0,1,0,1,0,1,0,1,6,7,0,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
@@ -185,10 +181,9 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_0
; AVX2-LABEL: shuffle_v16i16_00_00_00_00_00_00_09_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,2,3,u,u,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
-; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <0,0,255,255,u,u,u,u,u,u,u,u,u,u,u,u,255,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,0,1,0,1,2,3,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <16 x i16> %shuffle
@@ -199,10 +194,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_0
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,3,0,0,4,5,6,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,0,1,0,1,0,1,10,11,0,1,0,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
@@ -210,10 +202,8 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_0
; AVX2-LABEL: shuffle_v16i16_00_00_00_00_00_10_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,4,5,u,u,u,u,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
-; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,0,1,4,5,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 10, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <16 x i16> %shuffle
@@ -224,10 +214,7 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_0
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,0,1,0,1,14,15,0,1,0,1,0,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
@@ -235,10 +222,8 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_0
; AVX2-LABEL: shuffle_v16i16_00_00_00_00_11_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,6,7,u,u,u,u,u,u,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
-; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,6,7,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 11, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <16 x i16> %shuffle
@@ -248,11 +233,8 @@ define <16 x i16> @shuffle_v16i16_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_0
; AVX1-LABEL: shuffle_v16i16_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,1,4,5,6,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,0,1,8,9,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
@@ -260,10 +242,8 @@ define <16 x i16> @shuffle_v16i16_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_0
; AVX2-LABEL: shuffle_v16i16_00_00_00_12_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,8,9,u,u,u,u,u,u,u,u,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
-; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,8,9,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 12, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <16 x i16> %shuffle
@@ -273,11 +253,8 @@ define <16 x i16> @shuffle_v16i16_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_0
; AVX1-LABEL: shuffle_v16i16_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,3,0,4,5,6,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,10,11,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
@@ -285,10 +262,8 @@ define <16 x i16> @shuffle_v16i16_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_0
; AVX2-LABEL: shuffle_v16i16_00_00_13_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,10,11,u,u,u,u,u,u,u,u,u,u,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
-; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,10,11,0,1,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 13, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <16 x i16> %shuffle
@@ -298,12 +273,8 @@ define <16 x i16> @shuffle_v16i16_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_0
; AVX1-LABEL: shuffle_v16i16_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,3,0,0,4,5,6,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,12,13,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
@@ -311,10 +282,8 @@ define <16 x i16> @shuffle_v16i16_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_0
; AVX2-LABEL: shuffle_v16i16_00_14_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,12,13,u,u,u,u,u,u,u,u,u,u,u,u,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
-; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,12,13,0,1,0,1,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 14, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <16 x i16> %shuffle
@@ -324,12 +293,8 @@ define <16 x i16> @shuffle_v16i16_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_0
; AVX1-LABEL: shuffle_v16i16_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm0[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[14,15,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
@@ -337,10 +302,8 @@ define <16 x i16> @shuffle_v16i16_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_0
; AVX2-LABEL: shuffle_v16i16_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[14,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
-; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[14,15,0,1,0,1,0,1,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
; AVX2-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <16 x i16> %shuffle
@@ -724,18 +687,16 @@ define <16 x i16> @shuffle_v16i16_00_01_18_19_20_21_06_07_08_09_26_27_12_13_30_3
define <16 x i16> @shuffle_v16i16_00_16_00_16_00_16_00_16_00_16_00_16_00_16_00_16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_16_00_16_00_16_00_16_00_16_00_16_00_16_00_16:
; AVX1: # BB#0:
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_16_00_16_00_16_00_16_00_16_00_16_00_16_00_16:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
-; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastw %xmm1, %ymm1
+; AVX2-NEXT: vpbroadcastd %xmm0, %ymm0
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 16, i32 0, i32 16, i32 0, i32 16, i32 0, i32 16, i32 0, i32 16, i32 0, i32 16, i32 0, i32 16, i32 0, i32 16>
ret <16 x i16> %shuffle
@@ -744,15 +705,13 @@ define <16 x i16> @shuffle_v16i16_00_16_00_16_00_16_00_16_00_16_00_16_00_16_00_1
define <16 x i16> @shuffle_v16i16_00_16_00_16_00_16_00_16_08_24_08_24_08_24_08_24(<16 x i16> %a, <16 x i16> %b) {
; AVX1-LABEL: shuffle_v16i16_00_16_00_16_00_16_00_16_08_24_08_24_08_24_08_24:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_00_16_00_16_00_16_00_16_08_24_08_24_08_24_08_24:
@@ -806,9 +765,8 @@ define <16 x i16> @shuffle_v16i16_19_18_17_16_07_06_05_04_27_26_25_24_15_14_13_1
;
; AVX2-LABEL: shuffle_v16i16_19_18_17_16_07_06_05_04_27_26_25_24_15_14_13_12:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,14,15,12,13,10,11,8,9,u,u,u,u,u,u,u,u,30,31,28,29,26,27,24,25]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[6,7,4,5,2,3,0,1,u,u,u,u,u,u,u,u,22,23,20,21,18,19,16,17,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[6,7,4,5,2,3,0,1,14,15,12,13,10,11,8,9,22,23,20,21,18,19,16,17,30,31,28,29,26,27,24,25]
; AVX2-NEXT: retq
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 19, i32 18, i32 17, i32 16, i32 7, i32 6, i32 5, i32 4, i32 27, i32 26, i32 25, i32 24, i32 15, i32 14, i32 13, i32 12>
ret <16 x i16> %shuffle
@@ -818,13 +776,12 @@ define <16 x i16> @shuffle_v16i16_19_18_17_16_03_02_01_00_27_26_25_24_11_10_09_0
; AVX1-LABEL: shuffle_v16i16_19_18_17_16_03_02_01_00_27_26_25_24_11_10_09_08:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [12,13,8,9,4,5,0,1,14,15,10,11,6,7,2,3]
+; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -1265,3 +1222,347 @@ define <16 x i16> @shuffle_v16i16_04_04_04_04_uu_uu_uu_uu_08_08_08_uu_uu_12_12_1
%shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 undef, i32 undef, i32 undef, i32 undef, i32 8, i32 8, i32 8, i32 undef, i32 undef, i32 12, i32 12, i32 12>
ret <16 x i16> %shuffle
}
+
+define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_04_16_16_16_16_20_20_20_20(<16 x i16> %a, <16 x i16> %b) {
+; AVX1-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_04_16_16_16_16_20_20_20_20:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_04_16_16_16_16_20_20_20_20:
+; AVX2: # BB#0:
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,8,9,8,9,8,9,8,9,16,17,16,17,16,17,16,17,24,25,24,25,24,25,24,25]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4, i32 16, i32 16, i32 16, i32 16, i32 20, i32 20, i32 20, i32 20>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_08_08_08_08_12_12_12_12_16_16_16_16_20_20_20_20(<16 x i16> %a, <16 x i16> %b) {
+; AVX1-LABEL: shuffle_v16i16_08_08_08_08_12_12_12_12_16_16_16_16_20_20_20_20:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_08_08_08_08_12_12_12_12_16_16_16_16_20_20_20_20:
+; AVX2: # BB#0:
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,8,9,8,9,8,9,8,9,16,17,16,17,16,17,16,17,24,25,24,25,24,25,24,25]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 8, i32 8, i32 8, i32 8, i32 12, i32 12, i32 12, i32 12, i32 16, i32 16, i32 16, i32 16, i32 20, i32 20, i32 20, i32 20>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_08_08_08_08_12_12_12_12_24_24_24_24_28_28_28_28(<16 x i16> %a, <16 x i16> %b) {
+; AVX1-LABEL: shuffle_v16i16_08_08_08_08_12_12_12_12_24_24_24_24_28_28_28_28:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_08_08_08_08_12_12_12_12_24_24_24_24_28_28_28_28:
+; AVX2: # BB#0:
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,8,9,8,9,8,9,8,9,16,17,16,17,16,17,16,17,24,25,24,25,24,25,24,25]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 8, i32 8, i32 8, i32 8, i32 12, i32 12, i32 12, i32 12, i32 24, i32 24, i32 24, i32 24, i32 28, i32 28, i32 28, i32 28>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_00_00_00_00_04_04_04_04_24_24_24_24_28_28_28_28(<16 x i16> %a, <16 x i16> %b) {
+; AVX1-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_04_24_24_24_24_28_28_28_28:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_00_00_00_00_04_04_04_04_24_24_24_24_28_28_28_28:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,8,9,8,9,8,9,8,9,16,17,16,17,16,17,16,17,24,25,24,25,24,25,24,25]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4, i32 24, i32 24, i32 24, i32 24, i32 28, i32 28, i32 28, i32 28>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23(<16 x i16> %a, <16 x i16> %b) {
+; AVX1-LABEL: shuffle_v16i16_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_23:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_zz_zz_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_24(<16 x i16> %a) {
+; AVX1-LABEL: shuffle_v16i16_zz_zz_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_24:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_zz_zz_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_24:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> zeroinitializer, <16 x i16> %a, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 24>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_17_18_19_20_21_22_23_zz_25_26_27_28_29_30_31_zz(<16 x i16> %a) {
+; AVX1-LABEL: shuffle_v16i16_17_18_19_20_21_22_23_zz_25_26_27_28_29_30_31_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm1 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_17_18_19_20_21_22_23_zz_25_26_27_28_29_30_31_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,ymm0[18,19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> zeroinitializer, <16 x i16> %a, <16 x i32> <i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 0, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0>
+ ret <16 x i16> %shuffle
+}
+
+;
+; Shuffle to logical bit shifts
+;
+
+define <16 x i16> @shuffle_v16i16_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14(<16 x i16> %a) {
+; AVX1-LABEL: shuffle_v16i16_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpslld $16, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpslld $16, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpslld $16, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 16, i32 0, i32 16, i32 2, i32 16, i32 4, i32 16, i32 6, i32 16, i32 8, i32 16, i32 10, i32 16, i32 12, i32 16, i32 14>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_12(<16 x i16> %a) {
+; AVX1-LABEL: shuffle_v16i16_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_12:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsllq $48, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_zz_zz_zz_00_zz_zz_zz_04_zz_zz_zz_08_zz_zz_zz_12:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsllq $48, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 16, i32 16, i32 16, i32 0, i32 16, i32 16, i32 16, i32 4, i32 16, i32 16, i32 16, i32 8, i32 16, i32 16, i32 16, i32 12>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz(<16 x i16> %a) {
+; AVX1-LABEL: shuffle_v16i16_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 1, i32 16, i32 3, i32 16, i32 5, i32 16, i32 7, i32 16, i32 9, i32 16, i32 11, i32 16, i32 13, i32 16, i32 15, i32 16>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz(<16 x i16> %a) {
+; AVX1-LABEL: shuffle_v16i16_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 2, i32 3, i32 16, i32 16, i32 6, i32 7, i32 16, i32 16, i32 10, i32 11, i32 16, i32 16, i32 14, i32 15, i32 16, i32 16>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_16_zz_zz_zz_17_zz_zz_zz_18_zz_zz_zz_19_zz_zz_zz(<16 x i16> %a) {
+; AVX1-LABEL: shuffle_v16i16_16_zz_zz_zz_17_zz_zz_zz_18_zz_zz_zz_19_zz_zz_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[4,5,2,3,4,5,6,7,6,7,10,11,4,5,6,7]
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_16_zz_zz_zz_17_zz_zz_zz_18_zz_zz_zz_19_zz_zz_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> zeroinitializer, <16 x i16> %a, <16 x i32> <i32 16, i32 0, i32 0, i32 0, i32 17, i32 0, i32 0, i32 0, i32 18, i32 0, i32 0, i32 0, i32 19, i32 0, i32 0, i32 0>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_16_zz_17_zz_18_zz_19_zz_20_zz_21_zz_22_zz_22_zz(<16 x i16> %a) {
+; AVX1-LABEL: shuffle_v16i16_16_zz_17_zz_18_zz_19_zz_20_zz_21_zz_22_zz_22_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_16_zz_17_zz_18_zz_19_zz_20_zz_21_zz_22_zz_22_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> zeroinitializer, <16 x i16> %a, <16 x i32> <i32 16, i32 0, i32 17, i32 0, i32 18, i32 0, i32 19, i32 0, i32 20, i32 0, i32 21, i32 0, i32 22, i32 0, i32 23, i32 0>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_23_00_01_02_03_04_05_06_31_08_09_10_11_12_13_14(<16 x i16> %a, <16 x i16> %b) {
+; AVX1-LABEL: shuffle_v16i16_23_00_01_02_03_04_05_06_31_08_09_10_11_12_13_14:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_23_00_01_02_03_04_05_06_31_08_09_10_11_12_13_14:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13],ymm1[30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 23, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 31, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_01_02_03_04_05_06_07_16_09_10_11_12_13_14_15_24(<16 x i16> %a, <16 x i16> %b) {
+; AVX1-LABEL: shuffle_v16i16_01_02_03_04_05_06_07_16_09_10_11_12_13_14_15_24:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1]
+; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_01_02_03_04_05_06_07_16_09_10_11_12_13_14_15_24:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1],ymm0[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_17_18_19_20_21_22_23_00_25_26_27_28_29_30_31_8(<16 x i16> %a, <16 x i16> %b) {
+; AVX1-LABEL: shuffle_v16i16_17_18_19_20_21_22_23_00_25_26_27_28_29_30_31_8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1]
+; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_17_18_19_20_21_22_23_00_25_26_27_28_29_30_31_8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1],ymm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 00, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 8>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_07_16_17_18_19_20_21_22_15_24_25_26_27_28_29_30(<16 x i16> %a, <16 x i16> %b) {
+; AVX1-LABEL: shuffle_v16i16_07_16_17_18_19_20_21_22_15_24_25_26_27_28_29_30:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_07_16_17_18_19_20_21_22_15_24_25_26_27_28_29_30:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13],ymm0[30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26,27,28,29]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_01_02_03_04_05_06_07_00_17_18_19_20_21_22_23_16(<16 x i16> %a, <16 x i16> %b) {
+; AVX1-LABEL: shuffle_v16i16_01_02_03_04_05_06_07_00_17_18_19_20_21_22_23_16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
+; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_01_02_03_04_05_06_07_00_17_18_19_20_21_22_23_16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1,18,19,20,21,22,23,24,25,26,27,28,29,30,31,16,17]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 16>
+ ret <16 x i16> %shuffle
+}
+
+define <16 x i16> @shuffle_v16i16_07_00_01_02_03_04_05_06_23_16_17_18_19_20_21_22(<16 x i16> %a, <16 x i16> %b) {
+; AVX1-LABEL: shuffle_v16i16_07_00_01_02_03_04_05_06_23_16_17_18_19_20_21_22:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v16i16_07_00_01_02_03_04_05_06_23_16_17_18_19_20_21_22:
+; AVX2: # BB#0:
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[14,15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,30,31,16,17,18,19,20,21,22,23,24,25,26,27,28,29]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 23, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22>
+ ret <16 x i16> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-256-v32.ll b/test/CodeGen/X86/vector-shuffle-256-v32.ll
index 79c906b..ed3c666 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v32.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v32.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
target triple = "x86_64-unknown-unknown"
@@ -314,9 +314,8 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_16_
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],zero
-; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -339,19 +338,17 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_17_00_
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0],zero,xmm0[0]
-; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_17_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,1,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <0,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 17, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -363,19 +360,17 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_18_00_00_
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[2],zero,zero
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0],zero,xmm0[0,0]
-; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_18_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,2,u,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <0,0,255,255,u,u,u,u,u,u,u,u,u,u,u,u,255,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 18, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -387,19 +382,17 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_19_00_00_00_
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0],zero,xmm0[0,0,0]
-; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,0,7,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_19_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,3,u,u,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <0,0,255,255,u,u,u,u,u,u,u,u,u,u,u,u,255,255,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 19, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -411,19 +404,16 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_20_00_00_00_00_
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[4],zero,zero,zero,zero
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0],zero,xmm0[0,0,0,0]
-; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,0,9,0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_20_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,4,u,u,u,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 20, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -435,19 +425,16 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_21_00_00_00_00_00_
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[5],zero,zero,zero,zero,zero
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0],zero,xmm0[0,0,0,0,0]
-; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,0,11,0,0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_21_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,5,u,u,u,u,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 21, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -459,19 +446,16 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_22_00_00_00_00_00_00_
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[6],zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0],zero,xmm0[0,0,0,0,0,0]
-; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,0,13,0,0,0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_22_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,6,u,u,u,u,u,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 22, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -483,19 +467,16 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_23_00_00_00_00_00_00_00_
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[7],zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0],zero,xmm0[0,0,0,0,0,0,0]
-; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,15,0,0,0,0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_23_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,7,u,u,u,u,u,u,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 23, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -516,10 +497,8 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_24_00_00_00_00_00_00_00_00_
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_24_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,8,u,u,u,u,u,u,u,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 24, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -540,10 +519,8 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_25_00_00_00_00_00_00_00_00_00_
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_25_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,9,u,u,u,u,u,u,u,u,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,9,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 25, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -564,10 +541,8 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_26_00_00_00_00_00_00_00_00_00_00_
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_26_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,10,u,u,u,u,u,u,u,u,u,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,10,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 26, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -588,10 +563,8 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_27_00_00_00_00_00_00_00_00_00_00_00_
; AVX2-LABEL: shuffle_v32i8_00_00_00_00_27_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,11,u,u,u,u,u,u,u,u,u,u,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,11,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 27, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -612,10 +585,8 @@ define <32 x i8> @shuffle_v32i8_00_00_00_28_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX2-LABEL: shuffle_v32i8_00_00_00_28_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,12,u,u,u,u,u,u,u,u,u,u,u,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,12,0,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 28, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -636,10 +607,8 @@ define <32 x i8> @shuffle_v32i8_00_00_29_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX2-LABEL: shuffle_v32i8_00_00_29_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,13,u,u,u,u,u,u,u,u,u,u,u,u,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,13,0,0,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 29, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -660,10 +629,8 @@ define <32 x i8> @shuffle_v32i8_00_30_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
; AVX2-LABEL: shuffle_v32i8_00_30_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 30, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -685,15 +652,13 @@ define <32 x i8> @shuffle_v32i8_31_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
;
; AVX2-LABEL: shuffle_v32i8_31_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; AVX2: # BB#0:
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: movl $15, %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
-; AVX2-NEXT: vpxor %ymm3, %ymm3, %ymm3
-; AVX2-NEXT: vinserti128 $0, %xmm2, %ymm3, %ymm2
-; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vinserti128 $0, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
+; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 31, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
ret <32 x i8> %shuffle
@@ -947,16 +912,11 @@ define <32 x i8> @shuffle_v32i8_15_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_
define <32 x i8> @shuffle_v32i8_00_33_02_35_04_37_06_39_08_41_10_43_12_45_14_47_16_49_18_51_20_53_22_55_24_57_26_59_28_61_30_63(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_33_02_35_04_37_06_39_08_41_10_43_12_45_14_47_16_49_18_51_20_53_22_55_24_57_26_59_28_61_30_63:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
-; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpshufb %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpblendvb %xmm4, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -972,16 +932,11 @@ define <32 x i8> @shuffle_v32i8_00_33_02_35_04_37_06_39_08_41_10_43_12_45_14_47_
define <32 x i8> @shuffle_v32i8_32_01_34_03_36_05_38_07_40_09_42_11_44_13_46_15_48_17_50_19_52_21_54_23_56_25_58_27_60_29_62_31(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_32_01_34_03_36_05_38_07_40_09_42_11_44_13_46_15_48_17_50_19_52_21_54_23_56_25_58_27_60_29_62_31:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
-; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpblendvb %xmm4, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -997,20 +952,17 @@ define <32 x i8> @shuffle_v32i8_32_01_34_03_36_05_38_07_40_09_42_11_44_13_46_15_
define <32 x i8> @shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32:
; AVX1: # BB#0:
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32:
; AVX2: # BB#0:
-; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
+; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32, i32 0, i32 32>
ret <32 x i8> %shuffle
@@ -1020,17 +972,12 @@ define <32 x i8> @shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_
; AVX1-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_16_48_16_48_16_48_16_48_16_48_16_48_16_48_16_48:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -1050,15 +997,15 @@ define <32 x i8> @shuffle_v32i8_32_32_32_32_32_32_32_32_08_09_10_11_12_13_14_15_
; AVX1-LABEL: shuffle_v32i8_32_32_32_32_32_32_32_32_08_09_10_11_12_13_14_15_48_48_48_48_48_48_48_48_24_25_26_27_28_29_30_31:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,128,128,128,128,128,8,9,10,11,12,13,14,15]
-; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [0,0,0,0,0,0,0,0,128,128,128,128,128,128,128,128]
-; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -1076,23 +1023,22 @@ define <32 x i8> @shuffle_v32i8_39_38_37_36_35_34_33_32_15_14_13_12_11_10_09_08_
; AVX1-LABEL: shuffle_v32i8_39_38_37_36_35_34_33_32_15_14_13_12_11_10_09_08_55_54_53_52_51_50_49_48_31_30_29_28_27_26_25_24:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,128,128,128,128,128,15,14,13,12,11,10,9,8]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = <15,14,13,12,11,10,9,8,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [7,6,5,4,3,2,1,0,128,128,128,128,128,128,128,128]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = <7,6,5,4,3,2,1,0,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm4[0],xmm2[0]
; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_39_38_37_36_35_34_33_32_15_14_13_12_11_10_09_08_55_54_53_52_51_50_49_48_31_30_29_28_27_26_25_24:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,15,14,13,12,11,10,9,8,u,u,u,u,u,u,u,u,31,30,29,28,27,26,25,24]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[7,6,5,4,3,2,1,0,u,u,u,u,u,u,u,u,23,22,21,20,19,18,17,16,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
; AVX2-NEXT: retq
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24>
ret <32 x i8> %shuffle
@@ -1102,15 +1048,12 @@ define <32 x i8> @shuffle_v32i8_39_38_37_36_35_34_33_32_07_06_05_04_03_02_01_00_
; AVX1-LABEL: shuffle_v32i8_39_38_37_36_35_34_33_32_07_06_05_04_03_02_01_00_55_54_53_52_51_50_49_48_23_22_21_20_19_18_17_16:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,128,128,128,128,128,7,6,5,4,3,2,1,0]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [14,12,10,8,6,4,2,0,15,13,11,9,7,5,3,1]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [7,6,5,4,3,2,1,0,128,128,128,128,128,128,128,128]
-; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -1520,27 +1463,24 @@ define <32 x i8> @shuffle_v32i8_08_08_08_08_08_08_08_08_uu_uu_uu_uu_uu_uu_uu_uu_
define <32 x i8> @shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39:
; AVX1: # BB#0:
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm0[u],zero,xmm0[u,u,u,u,u,u,u,7,u,u,u,u]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm3[4,3,u,3,u,u,u,u,u,u,u],zero,xmm3[u,u,u,u]
-; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1],zero,xmm2[3],zero,zero,zero,zero,zero,zero,zero,xmm2[11],zero,zero,zero,zero
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[u,u,4,u,1,6],zero,zero,xmm4[0],zero,xmm4[11,u],zero,zero,zero,zero
-; AVX1-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[u,u],zero,xmm1[u],zero,zero,xmm1[5,0],zero,xmm1[10],zero,xmm1[u,4,2,4,7]
-; AVX1-NEXT: vpor %xmm5, %xmm6, %xmm5
-; AVX1-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[2],zero,xmm5[4,5,6,7,8,9,10],zero,xmm5[12,13,14,15]
-; AVX1-NEXT: vpor %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[u,u,u,u,1,6,13,u,u],zero,xmm3[u,u]
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,12,13,u,u,u,u],zero,zero,zero,xmm0[u,u,12,u,u]
-; AVX1-NEXT: vpor %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[2,3],zero,zero,zero,zero,xmm0[8,9,10],zero,zero,xmm0[13],zero,zero
-; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm4[u,u],zero,zero,xmm4[12],zero,xmm4[u,u,u],zero,zero,xmm4[u,0,3]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[u,u,4,u,1,6],zero,zero,xmm2[0],zero,xmm2[11,u],zero,zero,zero,zero
+; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm1[u,u],zero,xmm1[u],zero,zero,xmm1[5,0],zero,xmm1[10],zero,xmm1[u,4,2,4,7]
+; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[8,6,u,6,u,u,u,u,u,u,u,15,u,u,u,u]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,0,255,0,255,255,255,255,255,255,255,0,255,255,255,255]
+; AVX1-NEXT: vpblendvb %xmm6, %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[u,u],zero,zero,xmm2[12],zero,xmm2[u,u,u],zero,zero,xmm2[u,0,3]
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[10,13,u,u,3,3],zero,xmm1[8,u,u,u,12,1,u],zero,zero
-; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1],zero,zero,xmm1[4,5,6,7],zero,zero,zero,xmm1[11,12],zero,xmm1[14,15]
-; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm4[u,u],zero,zero,xmm4[u,u,u,u,1,6,13,u,u],zero,xmm4[u,u]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,12,13,u,u,u,u],zero,zero,zero,xmm0[u,u,12,u,u]
+; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,0,0,255,255,255,255,0,0,0,255,255,0,255,255]
+; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_20_19_52_19_49_54_37_32_48_42_59_07_36_34_36_39:
@@ -1560,3 +1500,461 @@ define <32 x i8> @shuffle_v32i8_42_45_12_13_35_35_60_40_17_22_29_44_33_12_48_51_
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 42, i32 45, i32 12, i32 13, i32 35, i32 35, i32 60, i32 40, i32 17, i32 22, i32 29, i32 44, i32 33, i32 12, i32 48, i32 51, i32 20, i32 19, i32 52, i32 19, i32 49, i32 54, i32 37, i32 32, i32 48, i32 42, i32 59, i32 7, i32 36, i32 34, i32 36, i32 39>
ret <32 x i8> %shuffle
}
+
+define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_32_32_32_32_32_32_32_32_40_40_40_40_40_40_40_40(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_32_32_32_32_32_32_32_32_40_40_40_40_40_40_40_40:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_32_32_32_32_32_32_32_32_40_40_40_40_40_40_40_40:
+; AVX2: # BB#0:
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,16,16,16,16,16,16,16,16,24,24,24,24,24,24,24,24]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 40, i32 40, i32 40, i32 40, i32 40, i32 40, i32 40, i32 40>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_32_32_32_32_32_32_32_32_40_40_40_40_40_40_40_40(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_32_32_32_32_32_32_32_32_40_40_40_40_40_40_40_40:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_32_32_32_32_32_32_32_32_40_40_40_40_40_40_40_40:
+; AVX2: # BB#0:
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,16,16,16,16,16,16,16,16,24,24,24,24,24,24,24,24]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 40, i32 40, i32 40, i32 40, i32 40, i32 40, i32 40, i32 40>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_48_48_48_48_48_48_48_48_56_56_56_56_56_56_56_56(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_48_48_48_48_48_48_48_48_56_56_56_56_56_56_56_56:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_16_16_16_16_16_16_16_16_24_24_24_24_24_24_24_24_48_48_48_48_48_48_48_48_56_56_56_56_56_56_56_56:
+; AVX2: # BB#0:
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,16,16,16,16,16,16,16,16,24,24,24,24,24,24,24,24]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 48, i32 48, i32 48, i32 48, i32 48, i32 48, i32 48, i32 48, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_48_48_48_48_48_48_48_48_56_56_56_56_56_56_56_56(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_48_48_48_48_48_48_48_48_56_56_56_56_56_56_56_56:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_08_08_08_08_08_08_08_08_48_48_48_48_48_48_48_48_56_56_56_56_56_56_56_56:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,16,16,16,16,16,16,16,16,24,24,24,24,24,24,24,24]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 48, i32 48, i32 48, i32 48, i32 48, i32 48, i32 48, i32 48, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56, i32 56>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_00_32_01_33_02_34_03_35_04_36_05_37_06_38_07_39_08_40_09_41_10_42_11_43_12_44_13_45_14_46_15_47:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_32_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_48(<32 x i8> %a) {
+; AVX1-LABEL: shuffle_v32i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_32_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_48:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_32_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_48:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 32, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 48>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_47_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_63_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz(<32 x i8> %a) {
+; AVX1-LABEL: shuffle_v32i8_47_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_63_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm1 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_47_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_63_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 47, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 63, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <32 x i8> %shuffle
+}
+
+;
+; Shuffle to logical bit shifts
+;
+
+define <32 x i8> @shuffle_v32i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14_zz_16_zz_18_zz_20_zz_22_zz_24_zz_26_zz_28_zz_30(<32 x i8> %a) {
+; AVX1-LABEL: shuffle_v32i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14_zz_16_zz_18_zz_20_zz_22_zz_24_zz_26_zz_28_zz_30:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsllw $8, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_zz_00_zz_02_zz_04_zz_06_zz_08_zz_10_zz_12_zz_14_zz_16_zz_18_zz_20_zz_22_zz_24_zz_26_zz_28_zz_30:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsllw $8, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 32, i32 0, i32 32, i32 2, i32 32, i32 4, i32 32, i32 6, i32 32, i32 8, i32 32, i32 10, i32 32, i32 12, i32 32, i32 14, i32 32, i32 16, i32 32, i32 18, i32 32, i32 20, i32 32, i32 22, i32 32, i32 24, i32 32, i32 26, i32 32, i32 28, i32 32, i32 30>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_zz_zz_00_01_zz_zz_04_05_zz_zz_08_09_zz_zz_12_13_zz_zz_16_17_zz_zz_20_21_zz_zz_24_25_zz_zz_28_29(<32 x i8> %a) {
+; AVX1-LABEL: shuffle_v32i8_zz_zz_00_01_zz_zz_04_05_zz_zz_08_09_zz_zz_12_13_zz_zz_16_17_zz_zz_20_21_zz_zz_24_25_zz_zz_28_29:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpslld $16, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpslld $16, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_zz_zz_00_01_zz_zz_04_05_zz_zz_08_09_zz_zz_12_13_zz_zz_16_17_zz_zz_20_21_zz_zz_24_25_zz_zz_28_29:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpslld $16, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 32, i32 32, i32 0, i32 1, i32 32, i32 32, i32 4, i32 5, i32 32, i32 32, i32 8, i32 9, i32 32, i32 32, i32 12, i32 13, i32 32, i32 32, i32 16, i32 17, i32 32, i32 32, i32 20, i32 21, i32 32, i32 32, i32 24, i32 25, i32 32, i32 32, i32 28, i32 29>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_zz_zz_zz_zz_zz_zz_00_01_zz_zz_zz_zz_zz_zz_08_09_zz_zz_zz_zz_zz_zz_16_17_zz_zz_zz_zz_zz_zz_24_25(<32 x i8> %a) {
+; AVX1-LABEL: shuffle_v32i8_zz_zz_zz_zz_zz_zz_00_01_zz_zz_zz_zz_zz_zz_08_09_zz_zz_zz_zz_zz_zz_16_17_zz_zz_zz_zz_zz_zz_24_25:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsllq $48, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_zz_zz_zz_zz_zz_zz_00_01_zz_zz_zz_zz_zz_zz_08_09_zz_zz_zz_zz_zz_zz_16_17_zz_zz_zz_zz_zz_zz_24_25:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsllq $48, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 0, i32 1, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 8, i32 9, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 16, i32 17, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 24, i32 25>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz_17_zz_19_zz_21_zz_23_zz_25_zz_27_zz_29_zz_31_zz(<32 x i8> %a) {
+; AVX1-LABEL: shuffle_v32i8_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz_17_zz_19_zz_21_zz_23_zz_25_zz_27_zz_29_zz_31_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_01_zz_03_zz_05_zz_07_zz_09_zz_11_zz_13_zz_15_zz_17_zz_19_zz_21_zz_23_zz_25_zz_27_zz_29_zz_31_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 1, i32 32, i32 3, i32 32, i32 5, i32 32, i32 7, i32 32, i32 9, i32 32, i32 11, i32 32, i32 13, i32 32, i32 15, i32 32, i32 17, i32 32, i32 19, i32 32, i32 21, i32 32, i32 23, i32 32, i32 25, i32 32, i32 27, i32 32, i32 29, i32 32, i32 31, i32 32>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz_18_19_zz_zz_22_23_zz_zz_26_27_zz_zz_30_31_zz_zz(<32 x i8> %a) {
+; AVX1-LABEL: shuffle_v32i8_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz_18_19_zz_zz_22_23_zz_zz_26_27_zz_zz_30_31_zz_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_02_03_zz_zz_06_07_zz_zz_10_11_zz_zz_14_15_zz_zz_18_19_zz_zz_22_23_zz_zz_26_27_zz_zz_30_31_zz_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 2, i32 3, i32 32, i32 32, i32 6, i32 7, i32 32, i32 32, i32 10, i32 11, i32 32, i32 32, i32 14, i32 15, i32 32, i32 32, i32 18, i32 19, i32 32, i32 32, i32 22, i32 23, i32 32, i32 32, i32 26, i32 27, i32 32, i32 32, i32 30, i32 31, i32 32, i32 32>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_07_zz_zz_zz_zz_zz_zz_zz_15_zz_zz_zz_zz_z_zz_zz_23_zz_zz_zz_zz_zz_zz_zz_31_zz_zz_zz_zz_zz_zz_zz(<32 x i8> %a) {
+; AVX1-LABEL: shuffle_v32i8_07_zz_zz_zz_zz_zz_zz_zz_15_zz_zz_zz_zz_z_zz_zz_23_zz_zz_zz_zz_zz_zz_zz_31_zz_zz_zz_zz_zz_zz_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrlq $56, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlq $56, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_07_zz_zz_zz_zz_zz_zz_zz_15_zz_zz_zz_zz_z_zz_zz_23_zz_zz_zz_zz_zz_zz_zz_31_zz_zz_zz_zz_zz_zz_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrlq $56, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 7, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 15, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 23, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 31, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_32_zz_zz_zz_zz_zz_zz_zz_33_zz_zz_zz_zz_zz_zz_zz_34_zz_zz_zz_zz_zz_zz_zz_35_zz_zz_zz_zz_zz_zz_zz(<32 x i8> %a) {
+; AVX1-LABEL: shuffle_v32i8_32_zz_zz_zz_zz_zz_zz_zz_33_zz_zz_zz_zz_zz_zz_zz_34_zz_zz_zz_zz_zz_zz_zz_35_zz_zz_zz_zz_zz_zz_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_32_zz_zz_zz_zz_zz_zz_zz_33_zz_zz_zz_zz_zz_zz_zz_34_zz_zz_zz_zz_zz_zz_zz_35_zz_zz_zz_zz_zz_zz_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: retq
+
+ %shuffle = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 32, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 33, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 34, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 35, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_36_zz_zz_zz_37_zz_zz_zz_38_zz_zz_zz_39_zz_zz_zz(<32 x i8> %a) {
+; AVX1-LABEL: shuffle_v32i8_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_36_zz_zz_zz_37_zz_zz_zz_38_zz_zz_zz_39_zz_zz_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_32_zz_zz_zz_33_zz_zz_zz_34_zz_zz_zz_35_zz_zz_zz_36_zz_zz_zz_37_zz_zz_zz_38_zz_zz_zz_39_zz_zz_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 32, i32 0, i32 0, i32 0, i32 33, i32 0, i32 0, i32 0, i32 34, i32 0, i32 0, i32 0, i32 35, i32 0, i32 0, i32 0, i32 36, i32 0, i32 0, i32 0, i32 37, i32 0, i32 0, i32 0, i32 38, i32 0, i32 0, i32 0, i32 39, i32 0, i32 0, i32 0>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz(<32 x i8> %a) {
+; AVX1-LABEL: shuffle_v32i8_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_32_zz_33_zz_34_zz_35_zz_36_zz_37_zz_38_zz_39_zz_40_zz_41_zz_42_zz_43_zz_44_zz_45_zz_46_zz_47_zz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a, <32 x i32> <i32 32, i32 0, i32 33, i32 0, i32 34, i32 0, i32 35, i32 0, i32 36, i32 0, i32 37, i32 0, i32 38, i32 0, i32 39, i32 0, i32 40, i32 0, i32 41, i32 0, i32 42, i32 0, i32 43, i32 0, i32 44, i32 0, i32 45, i32 0, i32 46, i32 0, i32 47, i32 0>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[15],xmm3[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm1[31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 47, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 63, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[15],xmm2[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX1-NEXT: vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm1[31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 undef, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 63, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_uu_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_uu_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_uu_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm1[31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 47, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 undef, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_uu_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm1[31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 undef, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm1[31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 63, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_32_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_48(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_32_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_48:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm3[0]
+; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_32_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_48:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0],ymm0[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_33_34_35_36_37_38_39_40_41_42_43_44_45_46_47_00_49_50_51_52_53_54_55_56_57_58_59_60_61_62_63_16(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_33_34_35_36_37_38_39_40_41_42_43_44_45_46_47_00_49_50_51_52_53_54_55_56_57_58_59_60_61_62_63_16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm3[0]
+; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_33_34_35_36_37_38_39_40_41_42_43_44_45_46_47_00_49_50_51_52_53_54_55_56_57_58_59_60_61_62_63_16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 00, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 16>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_15_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_31_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_15_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_31_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[15],xmm3[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_15_32_33_34_35_36_37_38_39_40_41_42_43_44_45_46_31_48_49_50_51_52_53_54_55_56_57_58_59_60_61_62:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[15],ymm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],ymm0[31],ymm1[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 15, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 31, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_16(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_01_02_03_04_05_06_07_08_09_10_11_12_13_14_15_00_17_18_19_20_21_22_23_24_25_26_27_28_29_30_31_16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,16]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16>
+ ret <32 x i8> %shuffle
+}
+
+define <32 x i8> @shuffle_v32i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_31_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(<32 x i8> %a, <32 x i8> %b) {
+; AVX1-LABEL: shuffle_v32i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_31_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm0[15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_15_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_31_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,31,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
+ ret <32 x i8> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-256-v4.ll b/test/CodeGen/X86/vector-shuffle-256-v4.ll
index 0bd1bd9..3d6ada6 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v4.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v4.ll
@@ -1,12 +1,12 @@
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
target triple = "x86_64-unknown-unknown"
define <4 x double> @shuffle_v4f64_0000(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_0000:
; AVX1: # BB#0:
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -21,7 +21,7 @@ define <4 x double> @shuffle_v4f64_0000(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0001(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_0001:
; AVX1: # BB#0:
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0,0]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
@@ -38,7 +38,7 @@ define <4 x double> @shuffle_v4f64_0020(<4 x double> %a, <4 x double> %b) {
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -70,7 +70,7 @@ define <4 x double> @shuffle_v4f64_1000(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_1000:
; AVX1: # BB#0:
; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
@@ -86,7 +86,7 @@ define <4 x double> @shuffle_v4f64_2200(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_2200:
; AVX1: # BB#0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_2200:
@@ -101,9 +101,8 @@ define <4 x double> @shuffle_v4f64_3330(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_3330:
; AVX1: # BB#0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
-; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
-; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3]
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
+; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4f64_3330:
@@ -141,7 +140,7 @@ define <4 x double> @shuffle_v4f64_0023(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0022(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_0022:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
ret <4 x double> %shuffle
@@ -186,7 +185,7 @@ define <4 x double> @shuffle_v4f64_1022(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0423(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: shuffle_v4f64_0423:
; AVX1: # BB#0:
-; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,0,2,2]
+; AVX1-NEXT: vmovddup {{.*#+}} ymm1 = ymm1[0,0,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
; AVX1-NEXT: retq
;
@@ -202,8 +201,8 @@ define <4 x double> @shuffle_v4f64_0423(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_0462(<4 x double> %a, <4 x double> %b) {
; ALL-LABEL: shuffle_v4f64_0462:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,0,2,2]
-; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; ALL-NEXT: vmovddup {{.*#+}} ymm1 = ymm1[0,0,2,2]
+; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
; ALL-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 4, i32 6, i32 2>
@@ -300,10 +299,77 @@ define <4 x double> @shuffle_v4f64_0167(<4 x double> %a, <4 x double> %b) {
ret <4 x double> %shuffle
}
+define <4 x double> @shuffle_v4f64_1054(<4 x double> %a, <4 x double> %b) {
+; ALL-LABEL: shuffle_v4f64_1054:
+; ALL: # BB#0:
+; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 0, i32 5, i32 4>
+ ret <4 x double> %shuffle
+}
+
+define <4 x double> @shuffle_v4f64_3254(<4 x double> %a, <4 x double> %b) {
+; ALL-LABEL: shuffle_v4f64_3254:
+; ALL: # BB#0:
+; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 3, i32 2, i32 5, i32 4>
+ ret <4 x double> %shuffle
+}
+
+define <4 x double> @shuffle_v4f64_3276(<4 x double> %a, <4 x double> %b) {
+; ALL-LABEL: shuffle_v4f64_3276:
+; ALL: # BB#0:
+; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 3, i32 2, i32 7, i32 6>
+ ret <4 x double> %shuffle
+}
+
+define <4 x double> @shuffle_v4f64_1076(<4 x double> %a, <4 x double> %b) {
+; ALL-LABEL: shuffle_v4f64_1076:
+; ALL: # BB#0:
+; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 0, i32 7, i32 6>
+ ret <4 x double> %shuffle
+}
+
+define <4 x double> @shuffle_v4f64_0415(<4 x double> %a, <4 x double> %b) {
+; AVX1-LABEL: shuffle_v4f64_0415:
+; AVX1: # BB#0:
+; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
+; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4f64_0415:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
+; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x double> %shuffle
+}
+
+define <4 x double> @shuffle_v4f64_u062(<4 x double> %a, <4 x double> %b) {
+; ALL-LABEL: shuffle_v4f64_u062:
+; ALL: # BB#0:
+; ALL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 undef, i32 0, i32 6, i32 2>
+ ret <4 x double> %shuffle
+}
+
define <4 x i64> @shuffle_v4i64_0000(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0000:
; AVX1: # BB#0:
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -318,7 +384,7 @@ define <4 x i64> @shuffle_v4i64_0000(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_0001(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0001:
; AVX1: # BB#0:
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0,0]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
@@ -335,7 +401,7 @@ define <4 x i64> @shuffle_v4i64_0020(<4 x i64> %a, <4 x i64> %b) {
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -383,7 +449,7 @@ define <4 x i64> @shuffle_v4i64_1000(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_1000:
; AVX1: # BB#0:
; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
@@ -399,7 +465,7 @@ define <4 x i64> @shuffle_v4i64_2200(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_2200:
; AVX1: # BB#0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_2200:
@@ -414,9 +480,8 @@ define <4 x i64> @shuffle_v4i64_3330(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_3330:
; AVX1: # BB#0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
-; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
-; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3]
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
+; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_3330:
@@ -445,7 +510,7 @@ define <4 x i64> @shuffle_v4i64_3210(<4 x i64> %a, <4 x i64> %b) {
define <4 x i64> @shuffle_v4i64_0124(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: shuffle_v4i64_0124:
; AVX1: # BB#0:
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0,0]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX1-NEXT: retq
@@ -483,7 +548,7 @@ define <4 x i64> @shuffle_v4i64_0412(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1],xmm2[0]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,0,2,2]
+; AVX1-NEXT: vmovddup {{.*#+}} ymm1 = ymm1[0,0,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
; AVX1-NEXT: retq
;
@@ -502,7 +567,7 @@ define <4 x i64> @shuffle_v4i64_4012(<4 x i64> %a, <4 x i64> %b) {
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1],xmm2[0]
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
; AVX1-NEXT: retq
@@ -580,9 +645,8 @@ define <4 x i64> @shuffle_v4i64_2u35(<4 x i64> %a, <4 x i64> %b) {
;
; AVX2-LABEL: shuffle_v4i64_2u35:
; AVX2: # BB#0:
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,3,3]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,3,1]
; AVX2-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 2, i32 undef, i32 3, i32 5>
ret <4 x i64> %shuffle
@@ -608,22 +672,135 @@ define <4 x i64> @shuffle_v4i64_1251(<4 x i64> %a, <4 x i64> %b) {
ret <4 x i64> %shuffle
}
-define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) {
-; AVX1-LABEL: stress_test1:
+define <4 x i64> @shuffle_v4i64_1054(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: shuffle_v4i64_1054:
; AVX1: # BB#0:
-; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
-; AVX1-NEXT: vpermilpd {{.*#+}} ymm2 = ymm0[1,0,3,2]
-; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3]
-; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i64_1054:
+; AVX2: # BB#0:
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 0, i32 5, i32 4>
+ ret <4 x i64> %shuffle
+}
+
+define <4 x i64> @shuffle_v4i64_3254(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: shuffle_v4i64_3254:
+; AVX1: # BB#0:
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i64_3254:
+; AVX2: # BB#0:
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 3, i32 2, i32 5, i32 4>
+ ret <4 x i64> %shuffle
+}
+
+define <4 x i64> @shuffle_v4i64_3276(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: shuffle_v4i64_3276:
+; AVX1: # BB#0:
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i64_3276:
+; AVX2: # BB#0:
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 3, i32 2, i32 7, i32 6>
+ ret <4 x i64> %shuffle
+}
+
+define <4 x i64> @shuffle_v4i64_1076(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: shuffle_v4i64_1076:
+; AVX1: # BB#0:
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i64_1076:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 0, i32 7, i32 6>
+ ret <4 x i64> %shuffle
+}
+
+define <4 x i64> @shuffle_v4i64_0415(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: shuffle_v4i64_0415:
+; AVX1: # BB#0:
+; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
+; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i64_0415:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i64> %shuffle
+}
+
+define <4 x i64> @shuffle_v4i64_z4z6(<4 x i64> %a) {
+; AVX1-LABEL: shuffle_v4i64_z4z6:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i64_z4z6:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i64> zeroinitializer, <4 x i64> %a, <4 x i32> <i32 0, i32 4, i32 0, i32 6>
+ ret <4 x i64> %shuffle
+}
+
+define <4 x i64> @shuffle_v4i64_5zuz(<4 x i64> %a) {
+; AVX1-LABEL: shuffle_v4i64_5zuz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX1-NEXT: retq
;
-; AVX2-LABEL: stress_test1:
+; AVX2-LABEL: shuffle_v4i64_5zuz:
; AVX2: # BB#0:
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm1[3,1,1,0]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,3,1,3]
-; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX2-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i64> zeroinitializer, <4 x i64> %a, <4 x i32> <i32 5, i32 0, i32 undef, i32 0>
+ ret <4 x i64> %shuffle
+}
+
+define <4 x i64> @shuffle_v4i64_40u2(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: shuffle_v4i64_40u2:
+; AVX1: # BB#0:
+; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i64_40u2:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 0, i32 undef, i32 2>
+ ret <4 x i64> %shuffle
+}
+
+define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) {
+; ALL-LABEL: stress_test1:
+; ALL: retq
%c = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> <i32 3, i32 1, i32 1, i32 0>
%d = shufflevector <4 x i64> %c, <4 x i64> undef, <4 x i32> <i32 3, i32 undef, i32 2, i32 undef>
%e = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> <i32 3, i32 3, i32 1, i32 undef>
@@ -654,14 +831,14 @@ define <4 x i64> @insert_reg_and_zero_v4i64(i64 %a) {
define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) {
; AVX1-LABEL: insert_mem_and_zero_v4i64:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovq (%rdi), %xmm0
+; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_mem_and_zero_v4i64:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovq (%rdi), %xmm0
+; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX2-NEXT: retq
@@ -674,8 +851,8 @@ define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) {
define <4 x double> @insert_reg_and_zero_v4f64(double %a) {
; ALL-LABEL: insert_reg_and_zero_v4f64:
; ALL: # BB#0:
-; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; ALL-NEXT: vmovsd %xmm0, %xmm1, %xmm0
+; ALL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; ALL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; ALL-NEXT: retq
%v = insertelement <4 x double> undef, double %a, i32 0
%shuffle = shufflevector <4 x double> %v, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
@@ -685,7 +862,7 @@ define <4 x double> @insert_reg_and_zero_v4f64(double %a) {
define <4 x double> @insert_mem_and_zero_v4f64(double* %ptr) {
; ALL-LABEL: insert_mem_and_zero_v4f64:
; ALL: # BB#0:
-; ALL-NEXT: vmovsd (%rdi), %xmm0
+; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
%a = load double* %ptr
%v = insertelement <4 x double> undef, double %a, i32 0
@@ -707,8 +884,7 @@ define <4 x double> @splat_mem_v4f64(double* %ptr) {
define <4 x i64> @splat_mem_v4i64(i64* %ptr) {
; AVX1-LABEL: splat_mem_v4i64:
; AVX1: # BB#0:
-; AVX1-NEXT: vmovddup (%rdi), %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splat_mem_v4i64:
@@ -735,7 +911,7 @@ define <4 x double> @splat_mem_v4f64_2(double* %p) {
define <4 x double> @splat_v4f64(<2 x double> %r) {
; AVX1-LABEL: splat_v4f64:
; AVX1: # BB#0:
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
diff --git a/test/CodeGen/X86/vector-shuffle-256-v8.ll b/test/CodeGen/X86/vector-shuffle-256-v8.ll
index ded8232..f4e9a3b 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v8.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v8.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX1
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX1
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
target triple = "x86_64-unknown-unknown"
@@ -91,9 +91,8 @@ define <8 x float> @shuffle_v8f32_00500000(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_00500000:
; AVX1: # BB#0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[u,u,1,u,4,4,4,4]
-; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,0,4,4,6,4]
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3],ymm1[4,5,6,7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,1,0,4,4,4,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_00500000:
@@ -109,9 +108,8 @@ define <8 x float> @shuffle_v8f32_06000000(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_06000000:
; AVX1: # BB#0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[u,2,u,u,4,4,4,4]
-; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,0,4,5,4,4]
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4,5,6,7]
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,0,0,4,4,4,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_06000000:
@@ -127,9 +125,8 @@ define <8 x float> @shuffle_v8f32_70000000(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_70000000:
; AVX1: # BB#0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[3,u,u,u,4,4,4,4]
-; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,0,0,0,4,4,4,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_70000000:
@@ -148,7 +145,7 @@ define <8 x float> @shuffle_v8f32_70000000(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_01014545(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_01014545:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5>
ret <8 x float> %shuffle
@@ -202,7 +199,7 @@ define <8 x float> @shuffle_v8f32_08080808(<8 x float> %a, <8 x float> %b) {
; AVX1: # BB#0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,2,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX1-NEXT: retq
@@ -295,11 +292,11 @@ define <8 x float> @shuffle_v8f32_08192a3b(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_08991abb(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_08991abb:
; AVX1: # BB#0:
-; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,0],xmm1[2,0]
-; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[3,3]
-; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
-; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,1]
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm0[0,0],xmm1[0,0]
+; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[1,1]
+; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,2,3,3]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8f32_08991abb:
@@ -336,7 +333,7 @@ define <8 x float> @shuffle_v8f32_091b2d3f(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_09ab1def(<8 x float> %a, <8 x float> %b) {
; AVX1-LABEL: shuffle_v8f32_09ab1def:
; AVX1: # BB#0:
-; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; AVX1-NEXT: retq
@@ -426,7 +423,7 @@ define <8 x float> @shuffle_v8f32_00234467(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_00224466(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_00224466:
; ALL: # BB#0:
-; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
+; ALL-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
ret <8 x float> %shuffle
@@ -444,7 +441,7 @@ define <8 x float> @shuffle_v8f32_10325476(<8 x float> %a, <8 x float> %b) {
define <8 x float> @shuffle_v8f32_11335577(<8 x float> %a, <8 x float> %b) {
; ALL-LABEL: shuffle_v8f32_11335577:
; ALL: # BB#0:
-; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; ALL-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
ret <8 x float> %shuffle
@@ -736,123 +733,106 @@ define <8 x float> @shuffle_v8f32_76543210(<8 x float> %a, <8 x float> %b) {
}
define <8 x float> @shuffle_v8f32_3210ba98(<8 x float> %a, <8 x float> %b) {
-; AVX1-LABEL: shuffle_v8f32_3210ba98:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: shuffle_v8f32_3210ba98:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <u,u,u,u,3,2,1,0>
-; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX2-NEXT: retq
+; ALL-LABEL: shuffle_v8f32_3210ba98:
+; ALL: # BB#0:
+; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
+; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 11, i32 10, i32 9, i32 8>
ret <8 x float> %shuffle
}
define <8 x float> @shuffle_v8f32_3210fedc(<8 x float> %a, <8 x float> %b) {
-; AVX1-LABEL: shuffle_v8f32_3210fedc:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: shuffle_v8f32_3210fedc:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX2-NEXT: retq
+; ALL-LABEL: shuffle_v8f32_3210fedc:
+; ALL: # BB#0:
+; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
+; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12>
ret <8 x float> %shuffle
}
define <8 x float> @shuffle_v8f32_7654fedc(<8 x float> %a, <8 x float> %b) {
-; AVX1-LABEL: shuffle_v8f32_7654fedc:
-; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: shuffle_v8f32_7654fedc:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <7,6,5,4,u,u,u,u>
-; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX2-NEXT: retq
+; ALL-LABEL: shuffle_v8f32_7654fedc:
+; ALL: # BB#0:
+; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
+; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 15, i32 14, i32 13, i32 12>
ret <8 x float> %shuffle
}
define <8 x float> @shuffle_v8f32_fedc7654(<8 x float> %a, <8 x float> %b) {
-; AVX1-LABEL: shuffle_v8f32_fedc7654:
+; ALL-LABEL: shuffle_v8f32_fedc7654:
+; ALL: # BB#0:
+; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 15, i32 14, i32 13, i32 12, i32 7, i32 6, i32 5, i32 4>
+ ret <8 x float> %shuffle
+}
+
+define <8 x float> @PR21138(<8 x float> %truc, <8 x float> %tchose) {
+; AVX1-LABEL: PR21138:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX1-NEXT: retq
;
-; AVX2-LABEL: shuffle_v8f32_fedc7654:
+; AVX2-LABEL: PR21138:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <7,6,5,4,u,u,u,u>
+; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <u,u,u,u,1,3,5,7>
; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <1,3,5,7,u,u,u,u>
+; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX2-NEXT: retq
- %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 15, i32 14, i32 13, i32 12, i32 7, i32 6, i32 5, i32 4>
+ %shuffle = shufflevector <8 x float> %truc, <8 x float> %tchose, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
ret <8 x float> %shuffle
}
define <8 x float> @shuffle_v8f32_ba987654(<8 x float> %a, <8 x float> %b) {
-; AVX1-LABEL: shuffle_v8f32_ba987654:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: shuffle_v8f32_ba987654:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; AVX2-NEXT: retq
+; ALL-LABEL: shuffle_v8f32_ba987654:
+; ALL: # BB#0:
+; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
+; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4>
ret <8 x float> %shuffle
}
define <8 x float> @shuffle_v8f32_ba983210(<8 x float> %a, <8 x float> %b) {
-; AVX1-LABEL: shuffle_v8f32_ba983210:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: shuffle_v8f32_ba983210:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; AVX2-NEXT: retq
+; ALL-LABEL: shuffle_v8f32_ba983210:
+; ALL: # BB#0:
+; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
+; ALL-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4>
ret <8 x float> %shuffle
}
+define <8 x float> @shuffle_v8f32_80u1c4u5(<8 x float> %a, <8 x float> %b) {
+; ALL-LABEL: shuffle_v8f32_80u1c4u5:
+; ALL: # BB#0:
+; ALL-NEXT: vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 8, i32 0, i32 undef, i32 1, i32 12, i32 4, i32 undef, i32 5>
+ ret <8 x float> %shuffle
+}
+
+define <8 x float> @shuffle_v8f32_a2u3e6f7(<8 x float> %a, <8 x float> %b) {
+; ALL-LABEL: shuffle_v8f32_a2u3e6f7:
+; ALL: # BB#0:
+; ALL-NEXT: vunpckhps {{.*#+}} ymm0 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 10, i32 2, i32 undef, i32 3, i32 14, i32 6, i32 15, i32 7>
+ ret <8 x float> %shuffle
+}
+
define <8 x i32> @shuffle_v8i32_00000000(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00000000:
; AVX1: # BB#0:
@@ -941,9 +921,8 @@ define <8 x i32> @shuffle_v8i32_00500000(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00500000:
; AVX1: # BB#0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[u,u,1,u,4,4,4,4]
-; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,0,4,4,6,4]
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3],ymm1[4,5,6,7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,1,0,4,4,4,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_00500000:
@@ -959,9 +938,8 @@ define <8 x i32> @shuffle_v8i32_06000000(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_06000000:
; AVX1: # BB#0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[u,2,u,u,4,4,4,4]
-; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,0,4,5,4,4]
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4,5,6,7]
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,0,0,4,4,4,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_06000000:
@@ -977,9 +955,8 @@ define <8 x i32> @shuffle_v8i32_70000000(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_70000000:
; AVX1: # BB#0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[3,u,u,u,4,4,4,4]
-; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,0,0,0,4,4,4,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_70000000:
@@ -998,7 +975,7 @@ define <8 x i32> @shuffle_v8i32_70000000(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_01014545(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_01014545:
; AVX1: # BB#0:
-; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_01014545:
@@ -1012,8 +989,8 @@ define <8 x i32> @shuffle_v8i32_01014545(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00112233(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00112233:
; AVX1: # BB#0:
-; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0,0,1,1]
-; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,0,1,1]
+; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
@@ -1062,7 +1039,7 @@ define <8 x i32> @shuffle_v8i32_08080808(<8 x i32> %a, <8 x i32> %b) {
; AVX1: # BB#0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,2,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
-; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX1-NEXT: retq
@@ -1117,9 +1094,8 @@ define <8 x i32> @shuffle_v8i32_9832dc76(<8 x i32> %a, <8 x i32> %b) {
;
; AVX2-LABEL: shuffle_v8i32_9832dc76:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,3,2,4,5,7,6]
-; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,0,2,3,5,4,6,7]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6]
; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 9, i32 8, i32 3, i32 2, i32 13, i32 12, i32 7, i32 6>
ret <8 x i32> %shuffle
@@ -1181,8 +1157,7 @@ define <8 x i32> @shuffle_v8i32_08192a3b(<8 x i32> %a, <8 x i32> %b) {
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <u,0,u,1,u,2,u,3>
; AVX2-NEXT: vpermd %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <0,u,1,u,2,u,3,u>
-; AVX2-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
@@ -1192,11 +1167,11 @@ define <8 x i32> @shuffle_v8i32_08192a3b(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_08991abb(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_08991abb:
; AVX1: # BB#0:
-; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,0],xmm1[2,0]
-; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[3,3]
-; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
-; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,1]
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm0[0,0],xmm1[0,0]
+; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[1,1]
+; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,2,3,3]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_08991abb:
@@ -1222,8 +1197,7 @@ define <8 x i32> @shuffle_v8i32_091b2d3f(<8 x i32> %a, <8 x i32> %b) {
;
; AVX2-LABEL: shuffle_v8i32_091b2d3f:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <0,u,1,u,2,u,3,u>
-; AVX2-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 9, i32 1, i32 11, i32 2, i32 13, i32 3, i32 15>
@@ -1233,7 +1207,7 @@ define <8 x i32> @shuffle_v8i32_091b2d3f(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_09ab1def(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_09ab1def:
; AVX1: # BB#0:
-; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; AVX1-NEXT: retq
@@ -1363,7 +1337,7 @@ define <8 x i32> @shuffle_v8i32_00234467(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_00224466(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_00224466:
; AVX1: # BB#0:
-; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
+; AVX1-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_00224466:
@@ -1391,7 +1365,7 @@ define <8 x i32> @shuffle_v8i32_10325476(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_11335577(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_11335577:
; AVX1: # BB#0:
-; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; AVX1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_11335577:
@@ -1789,17 +1763,14 @@ define <8 x i32> @shuffle_v8i32_76543210(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_3210ba98(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_3210ba98:
; AVX1: # BB#0:
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_3210ba98:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <u,u,u,u,3,2,1,0>
-; AVX2-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 11, i32 10, i32 9, i32 8>
ret <8 x i32> %shuffle
@@ -1808,17 +1779,14 @@ define <8 x i32> @shuffle_v8i32_3210ba98(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_3210fedc(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_3210fedc:
; AVX1: # BB#0:
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_3210fedc:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12>
ret <8 x i32> %shuffle
@@ -1827,19 +1795,14 @@ define <8 x i32> @shuffle_v8i32_3210fedc(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_7654fedc(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_7654fedc:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_7654fedc:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <7,6,5,4,u,u,u,u>
-; AVX2-NEXT: vpermd %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 15, i32 14, i32 13, i32 12>
ret <8 x i32> %shuffle
@@ -1848,19 +1811,14 @@ define <8 x i32> @shuffle_v8i32_7654fedc(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_fedc7654(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_fedc7654:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_fedc7654:
; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <7,6,5,4,u,u,u,u>
-; AVX2-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 15, i32 14, i32 13, i32 12, i32 7, i32 6, i32 5, i32 4>
ret <8 x i32> %shuffle
@@ -1869,17 +1827,14 @@ define <8 x i32> @shuffle_v8i32_fedc7654(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_ba987654(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_ba987654:
; AVX1: # BB#0:
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_ba987654:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[3,2,1,0,7,6,5,4]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4>
ret <8 x i32> %shuffle
@@ -1888,22 +1843,64 @@ define <8 x i32> @shuffle_v8i32_ba987654(<8 x i32> %a, <8 x i32> %b) {
define <8 x i32> @shuffle_v8i32_ba983210(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: shuffle_v8i32_ba983210:
; AVX1: # BB#0:
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_ba983210:
; AVX2: # BB#0:
-; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[3,2,1,0,7,6,5,4]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4>
ret <8 x i32> %shuffle
}
+define <8 x i32> @shuffle_v8i32_zuu8zuuc(<8 x i32> %a) {
+; AVX1-LABEL: shuffle_v8i32_zuu8zuuc:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i32_zuu8zuuc:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <8 x i32> zeroinitializer, <8 x i32> %a, <8 x i32> <i32 0, i32 undef, i32 undef, i32 8, i32 0, i32 undef, i32 undef, i32 12>
+ ret <8 x i32> %shuffle
+}
+
+define <8 x i32> @shuffle_v8i32_9ubzdefz(<8 x i32> %a) {
+; AVX1-LABEL: shuffle_v8i32_9ubzdefz:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,2],ymm1[2,0],ymm0[5,6],ymm1[6,4]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i32_9ubzdefz:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,ymm0[20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <8 x i32> zeroinitializer, <8 x i32> %a, <8 x i32> <i32 9, i32 undef, i32 11, i32 0, i32 13, i32 14, i32 15, i32 0>
+ ret <8 x i32> %shuffle
+}
+
+define <8 x i32> @shuffle_v8i32_80u1b4uu(<8 x i32> %a, <8 x i32> %b) {
+; AVX1-LABEL: shuffle_v8i32_80u1b4uu:
+; AVX1: # BB#0:
+; AVX1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i32_80u1b4uu:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 8, i32 0, i32 undef, i32 1, i32 12, i32 4, i32 undef, i32 undef>
+ ret <8 x i32> %shuffle
+}
+
define <8 x float> @splat_mem_v8f32_2(float* %p) {
; ALL-LABEL: splat_mem_v8f32_2:
; ALL: # BB#0:
@@ -1929,3 +1926,169 @@ define <8 x float> @splat_v8f32(<4 x float> %r) {
%1 = shufflevector <4 x float> %r, <4 x float> undef, <8 x i32> zeroinitializer
ret <8 x float> %1
}
+
+;
+; Shuffle to logical bit shifts
+;
+
+define <8 x i32> @shuffle_v8i32_z0U2zUz6(<8 x i32> %a) {
+; AVX1-LABEL: shuffle_v8i32_z0U2zUz6:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,3,1,6,4,7,5]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i32_z0U2zUz6:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> zeroinitializer, <8 x i32> <i32 8, i32 0, i32 undef, i32 2, i32 8, i32 undef, i32 8, i32 6>
+ ret <8 x i32> %shuffle
+}
+
+define <8 x i32> @shuffle_v8i32_1U3z5zUU(<8 x i32> %a) {
+; AVX1-LABEL: shuffle_v8i32_1U3z5zUU:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i32_1U3z5zUU:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> zeroinitializer, <8 x i32> <i32 1, i32 undef, i32 3, i32 8, i32 5, i32 8, i32 undef, i32 undef>
+ ret <8 x i32> %shuffle
+}
+
+define <8 x i32> @shuffle_v8i32_B012F456(<8 x i32> %a, <8 x i32> %b) {
+; AVX1-LABEL: shuffle_v8i32_B012F456:
+; AVX1: # BB#0:
+; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm0[0,0],ymm1[7,4],ymm0[4,4]
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,2],ymm0[1,2],ymm1[4,6],ymm0[5,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i32_B012F456:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11],ymm1[28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 11, i32 0, i32 1, i32 2, i32 15, i32 4, i32 5, i32 6>
+ ret <8 x i32> %shuffle
+}
+
+define <8 x i32> @shuffle_v8i32_1238567C(<8 x i32> %a, <8 x i32> %b) {
+; AVX1-LABEL: shuffle_v8i32_1238567C:
+; AVX1: # BB#0:
+; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm0[3,0],ymm1[4,4],ymm0[7,4]
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,2],ymm1[2,0],ymm0[5,6],ymm1[6,4]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i32_1238567C:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1,2,3],ymm0[20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17,18,19]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 8, i32 5, i32 6, i32 7, i32 12>
+ ret <8 x i32> %shuffle
+}
+
+define <8 x i32> @shuffle_v8i32_9AB0DEF4(<8 x i32> %a, <8 x i32> %b) {
+; AVX1-LABEL: shuffle_v8i32_9AB0DEF4:
+; AVX1: # BB#0:
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[3,0],ymm0[4,4],ymm1[7,4]
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,2],ymm0[2,0],ymm1[5,6],ymm0[6,4]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i32_9AB0DEF4:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3],ymm1[20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 9, i32 10, i32 11, i32 0, i32 13, i32 14, i32 15, i32 4>
+ ret <8 x i32> %shuffle
+}
+
+define <8 x i32> @shuffle_v8i32_389A7CDE(<8 x i32> %a, <8 x i32> %b) {
+; AVX1-LABEL: shuffle_v8i32_389A7CDE:
+; AVX1: # BB#0:
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm1[0,0],ymm0[7,4],ymm1[4,4]
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[1,2],ymm0[4,6],ymm1[5,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i32_389A7CDE:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10,11],ymm0[28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26,27]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 3, i32 8, i32 9, i32 10, i32 7, i32 12, i32 13, i32 14>
+ ret <8 x i32> %shuffle
+}
+
+define <8 x i32> @shuffle_v8i32_30127456(<8 x i32> %a, <8 x i32> %b) {
+; AVX1-LABEL: shuffle_v8i32_30127456:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,0,1,2,7,4,5,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i32_30127456:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,0,1,2,7,4,5,6]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 3, i32 0, i32 1, i32 2, i32 7, i32 4, i32 5, i32 6>
+ ret <8 x i32> %shuffle
+}
+
+define <8 x i32> @shuffle_v8i32_12305674(<8 x i32> %a, <8 x i32> %b) {
+; AVX1-LABEL: shuffle_v8i32_12305674:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,2,3,0,5,6,7,4]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v8i32_12305674:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,2,3,0,5,6,7,4]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 1, i32 2, i32 3, i32 0, i32 5, i32 6, i32 7, i32 4>
+ ret <8 x i32> %shuffle
+}
+
+define <8x float> @concat_v2f32_1(<2 x float>* %tmp64, <2 x float>* %tmp65) {
+; ALL-LABEL: concat_v2f32_1:
+; ALL: # BB#0: # %entry
+; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; ALL-NEXT: vmovhpd (%rsi), %xmm0, %xmm0
+; ALL-NEXT: retq
+entry:
+ %tmp74 = load <2 x float>* %tmp65, align 8
+ %tmp72 = load <2 x float>* %tmp64, align 8
+ %tmp73 = shufflevector <2 x float> %tmp72, <2 x float> undef, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %tmp75 = shufflevector <2 x float> %tmp74, <2 x float> undef, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %tmp76 = shufflevector <8 x float> %tmp73, <8 x float> %tmp75, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x float> %tmp76
+}
+
+define <8x float> @concat_v2f32_2(<2 x float>* %tmp64, <2 x float>* %tmp65) {
+; ALL-LABEL: concat_v2f32_2:
+; ALL: # BB#0: # %entry
+; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; ALL-NEXT: vmovhpd (%rsi), %xmm0, %xmm0
+; ALL-NEXT: retq
+entry:
+ %tmp74 = load <2 x float>* %tmp65, align 8
+ %tmp72 = load <2 x float>* %tmp64, align 8
+ %tmp76 = shufflevector <2 x float> %tmp72, <2 x float> %tmp74, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x float> %tmp76
+}
+
+define <8x float> @concat_v2f32_3(<2 x float>* %tmp64, <2 x float>* %tmp65) {
+; ALL-LABEL: concat_v2f32_3:
+; ALL: # BB#0: # %entry
+; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; ALL-NEXT: vmovhpd (%rsi), %xmm0, %xmm0
+; ALL-NEXT: retq
+entry:
+ %tmp74 = load <2 x float>* %tmp65, align 8
+ %tmp72 = load <2 x float>* %tmp64, align 8
+ %tmp76 = shufflevector <2 x float> %tmp72, <2 x float> %tmp74, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %res = shufflevector <4 x float> %tmp76, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x float> %res
+}
diff --git a/test/CodeGen/X86/vector-shuffle-512-v16.ll b/test/CodeGen/X86/vector-shuffle-512-v16.ll
new file mode 100644
index 0000000..406d524
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-512-v16.ll
@@ -0,0 +1,40 @@
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
+
+target triple = "x86_64-unknown-unknown"
+
+define <16 x float> @shuffle_v16f32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d_1d(<16 x float> %a, <16 x float> %b) {
+; ALL-LABEL: shuffle_v16f32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d_1d:
+; ALL: # BB#0:
+; ALL-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32><i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
+ ret <16 x float> %shuffle
+}
+
+define <16 x i32> @shuffle_v16i32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d_1d(<16 x i32> %a, <16 x i32> %b) {
+; ALL-LABEL: shuffle_v16i32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d_1d:
+; ALL: # BB#0:
+; ALL-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 0, i32 16, i32 1, i32 17, i32 4, i32 20, i32 5, i32 21, i32 8, i32 24, i32 9, i32 25, i32 12, i32 28, i32 13, i32 29>
+ ret <16 x i32> %shuffle
+}
+
+define <16 x float> @shuffle_v16f32_02_12_03_13_06_16_07_17_0a_1a_0b_1b_0e_1e_0f_1f(<16 x float> %a, <16 x float> %b) {
+; ALL-LABEL: shuffle_v16f32_02_12_03_13_06_16_07_17_0a_1a_0b_1b_0e_1e_0f_1f:
+; ALL: # BB#0:
+; ALL-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32><i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x float> %shuffle
+}
+
+define <16 x i32> @shuffle_v16i32_02_12_03_13_06_16_07_17_0a_1a_0b_1b_0e_1e_0f_1f(<16 x i32> %a, <16 x i32> %b) {
+; ALL-LABEL: shuffle_v16i32_02_12_03_13_06_16_07_17_0a_1a_0b_1b_0e_1e_0f_1f:
+; ALL: # BB#0:
+; ALL-NEXT: vpunpckhdq {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i32> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-512-v8.ll b/test/CodeGen/X86/vector-shuffle-512-v8.ll
index 8f87c7c..5ddec49 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v8.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v8.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx512f -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512F
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx512bw -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mcpu=x86-64 -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
target triple = "x86_64-unknown-unknown"
@@ -62,9 +62,9 @@ define <8 x double> @shuffle_v8f64_00500000(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00500000:
; ALL: # BB#0:
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
+; ALL-NEXT: vblendpd {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3]
+; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,1,0]
; ALL-NEXT: vbroadcastsd %xmm0, %ymm0
-; ALL-NEXT: vblendpd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3]
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 5, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -75,9 +75,9 @@ define <8 x double> @shuffle_v8f64_06000000(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_06000000:
; ALL: # BB#0:
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; ALL-NEXT: vblendpd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3]
+; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,0]
; ALL-NEXT: vbroadcastsd %xmm0, %ymm0
-; ALL-NEXT: vblendpd {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3]
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 6, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -88,9 +88,9 @@ define <8 x double> @shuffle_v8f64_70000000(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_70000000:
; ALL: # BB#0:
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,1,2,3]
+; ALL-NEXT: vblendpd {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3]
+; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,0,0,0]
; ALL-NEXT: vbroadcastsd %xmm0, %ymm0
-; ALL-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3]
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 7, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -191,15 +191,13 @@ define <8 x double> @shuffle_v8f64_8823cc67(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_9832dc76(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_9832dc76:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm2
-; ALL-NEXT: vpermilpd {{.*#+}} ymm2 = ymm2[0,0,3,2]
-; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm3
-; ALL-NEXT: vpermilpd {{.*#+}} ymm3 = ymm3[1,0,2,2]
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
-; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,0,2,2]
+; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm1[0,1],ymm0[2,3]
+; ALL-NEXT: vpermilpd {{.*#+}} ymm2 = ymm2[1,0,3,2]
+; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
+; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm1
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 9, i32 8, i32 3, i32 2, i32 13, i32 12, i32 7, i32 6>
ret <8 x double> %shuffle
@@ -208,15 +206,13 @@ define <8 x double> @shuffle_v8f64_9832dc76(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_9810dc54(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_9810dc54:
; ALL: # BB#0:
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm2
-; ALL-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,1,0]
-; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm3
-; ALL-NEXT: vpermilpd {{.*#+}} ymm3 = ymm3[1,0,2,2]
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,0]
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,0,2,2]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm2
+; ALL-NEXT: vpermilpd {{.*#+}} ymm2 = ymm2[1,0,3,2]
+; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm1
+; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
+; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 9, i32 8, i32 1, i32 0, i32 13, i32 12, i32 5, i32 4>
ret <8 x double> %shuffle
@@ -274,12 +270,11 @@ define <8 x double> @shuffle_v8f64_08192a3b(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_08991abb(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_08991abb:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm2 = ymm0[1,0,2,2]
-; ALL-NEXT: vpermpd {{.*#+}} ymm3 = ymm1[0,2,3,3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3]
-; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,1,1]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vpermpd {{.*#+}} ymm2 = ymm1[0,0,1,1]
+; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm0[0],ymm2[1,2,3]
+; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3]
+; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,2,3,3]
+; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 8, i32 9, i32 9, i32 1, i32 10, i32 11, i32 11>
ret <8 x double> %shuffle
@@ -411,9 +406,9 @@ define <8 x double> @shuffle_v8f64_00234467(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00224466(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00224466:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm0[0,0,2,2]
+; ALL-NEXT: vmovddup {{.*#+}} ymm1 = ymm0[0,0,2,2]
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
-; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; ALL-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -566,7 +561,7 @@ define <8 x double> @shuffle_v8f64_00236744(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_00226644(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_00226644:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm0[0,0,2,2]
+; ALL-NEXT: vmovddup {{.*#+}} ymm1 = ymm0[0,0,2,2]
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,0,0]
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
@@ -622,7 +617,7 @@ define <8 x double> @shuffle_v8f64_01235466(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_002u6u44(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_002u6u44:
; ALL: # BB#0:
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm0[0,0,2,2]
+; ALL-NEXT: vmovddup {{.*#+}} ymm1 = ymm0[0,0,2,2]
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,0,0]
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
@@ -680,7 +675,7 @@ define <8 x double> @shuffle_v8f64_uuu3uu66(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_uuu3uu66:
; ALL: # BB#0:
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,0,2,2]
+; ALL-NEXT: vmovddup {{.*#+}} ymm1 = ymm1[0,0,2,2]
; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 6, i32 6>
@@ -708,18 +703,17 @@ define <8 x double> @shuffle_v8f64_c348cda0(<8 x double> %a, <8 x double> %b) {
define <8 x double> @shuffle_v8f64_f511235a(<8 x double> %a, <8 x double> %b) {
; ALL-LABEL: shuffle_v8f64_f511235a:
; ALL: # BB#0:
-; ALL-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm3
-; ALL-NEXT: vpermpd {{.*#+}} ymm4 = ymm3[0,1,1,3]
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3]
-; ALL-NEXT: vpermilpd {{.*#+}} ymm4 = ymm1[0,0,2,2]
-; ALL-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3]
+; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm2
+; ALL-NEXT: vblendpd {{.*#+}} ymm3 = ymm0[0],ymm2[1],ymm0[2,3]
+; ALL-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[2,3,1,3]
+; ALL-NEXT: vmovddup {{.*#+}} ymm4 = ymm1[0,0,2,2]
+; ALL-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3]
; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,1]
-; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3]
+; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3]
; ALL-NEXT: vextractf64x4 $1, %zmm1, %ymm1
; ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,1,2,3]
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
-; ALL-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vinsertf64x4 $1, %ymm3, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 15, i32 5, i32 1, i32 1, i32 2, i32 3, i32 5, i32 10>
ret <8 x double> %shuffle
@@ -784,9 +778,9 @@ define <8 x i64> @shuffle_v8i64_00500000(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_00500000:
; ALL: # BB#0:
; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
+; ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
+; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,0]
; ALL-NEXT: vpbroadcastq %xmm0, %ymm0
-; ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 0, i32 5, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -797,9 +791,9 @@ define <8 x i64> @shuffle_v8i64_06000000(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_06000000:
; ALL: # BB#0:
; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,0,0]
; ALL-NEXT: vpbroadcastq %xmm0, %ymm0
-; ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 6, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -810,9 +804,9 @@ define <8 x i64> @shuffle_v8i64_70000000(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_70000000:
; ALL: # BB#0:
; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,1,2,3]
+; ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,0,0,0]
; ALL-NEXT: vpbroadcastq %xmm0, %ymm0
-; ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 7, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -913,15 +907,13 @@ define <8 x i64> @shuffle_v8i64_8823cc67(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_9832dc76(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_9832dc76:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; ALL-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; ALL-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; ALL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,0,1,6,7,4,5]
+; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 9, i32 8, i32 3, i32 2, i32 13, i32 12, i32 7, i32 6>
ret <8 x i64> %shuffle
@@ -930,15 +922,13 @@ define <8 x i64> @shuffle_v8i64_9832dc76(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_9810dc54(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_9810dc54:
; ALL: # BB#0:
-; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,1,0]
-; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; ALL-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,0]
-; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,0,1,6,7,4,5]
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm2
+; ALL-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
+; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; ALL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; ALL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 9, i32 8, i32 1, i32 0, i32 13, i32 12, i32 5, i32 4>
ret <8 x i64> %shuffle
@@ -996,12 +986,11 @@ define <8 x i64> @shuffle_v8i64_08192a3b(<8 x i64> %a, <8 x i64> %b) {
define <8 x i64> @shuffle_v8i64_08991abb(<8 x i64> %a, <8 x i64> %b) {
; ALL-LABEL: shuffle_v8i64_08991abb:
; ALL: # BB#0:
-; ALL-NEXT: vpshufd {{.*#+}} ymm2 = ymm0[2,3,2,3,6,7,6,7]
-; ALL-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,2,3,3]
-; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3,4,5,6,7]
-; ALL-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
-; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; ALL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; ALL-NEXT: vpermq {{.*#+}} ymm2 = ymm1[0,0,1,1]
+; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0,1],ymm2[2,3,4,5,6,7]
+; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
+; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,3,3]
+; ALL-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 8, i32 9, i32 9, i32 1, i32 10, i32 11, i32 11>
ret <8 x i64> %shuffle
@@ -1418,12 +1407,47 @@ define <8 x i64> @shuffle_v8i64_6caa87e5(<8 x i64> %a, <8 x i64> %b) {
; ALL-NEXT: vpblendd {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm3[4,5],ymm1[6,7]
; ALL-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
; ALL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; ALL-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,0,1,4,5,4,5]
-; ALL-NEXT: vpbroadcastq %xmm3, %ymm3
-; ALL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5,6,7]
; ALL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; ALL-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; ALL-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 6, i32 12, i32 10, i32 10, i32 8, i32 7, i32 14, i32 5>
ret <8 x i64> %shuffle
}
+
+define <8 x double> @shuffle_v8f64_082a4c6e(<8 x double> %a, <8 x double> %b) {
+; ALL-LABEL: shuffle_v8f64_082a4c6e:
+; ALL: # BB#0:
+; ALL-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32><i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x double> %shuffle
+}
+
+define <8 x i64> @shuffle_v8i64_082a4c6e(<8 x i64> %a, <8 x i64> %b) {
+; ALL-LABEL: shuffle_v8i64_082a4c6e:
+; ALL: # BB#0:
+; ALL-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32><i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i64> %shuffle
+}
+
+define <8 x double> @shuffle_v8f64_193b5d7f(<8 x double> %a, <8 x double> %b) {
+; ALL-LABEL: shuffle_v8f64_193b5d7f:
+; ALL: # BB#0:
+; ALL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32><i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x double> %shuffle
+}
+
+define <8 x i64> @shuffle_v8i64_193b5d7f(<8 x i64> %a, <8 x i64> %b) {
+; ALL-LABEL: shuffle_v8i64_193b5d7f:
+; ALL: # BB#0:
+; ALL-NEXT: vpunpckhqdq {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
+; ALL-NEXT: retq
+ %shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32><i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i64> %shuffle
+}
diff --git a/test/CodeGen/X86/vector-shuffle-combining.ll b/test/CodeGen/X86/vector-shuffle-combining.ll
index 22a6749..b99946f 100644
--- a/test/CodeGen/X86/vector-shuffle-combining.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -275,16 +275,18 @@ define <4 x i32> @combine_bitwise_ops_test6(<4 x i32> %a, <4 x i32> %b, <4 x i32
define <4 x i32> @combine_bitwise_ops_test1b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test1b:
; SSE2: # BB#0:
-; SSE2-NEXT: andps %xmm1, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test1b:
; SSSE3: # BB#0:
-; SSSE3-NEXT: andps %xmm1, %xmm0
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test1b:
@@ -313,16 +315,18 @@ define <4 x i32> @combine_bitwise_ops_test1b(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test2b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test2b:
; SSE2: # BB#0:
-; SSE2-NEXT: orps %xmm1, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test2b:
; SSSE3: # BB#0:
-; SSSE3-NEXT: orps %xmm1, %xmm0
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test2b:
@@ -352,17 +356,13 @@ define <4 x i32> @combine_bitwise_ops_test3b(<4 x i32> %a, <4 x i32> %b, <4 x i3
; SSE2-LABEL: combine_bitwise_ops_test3b:
; SSE2: # BB#0:
; SSE2-NEXT: xorps %xmm1, %xmm0
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test3b:
; SSSE3: # BB#0:
; SSSE3-NEXT: xorps %xmm1, %xmm0
-; SSSE3-NEXT: xorps %xmm1, %xmm1
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test3b:
@@ -394,18 +394,18 @@ define <4 x i32> @combine_bitwise_ops_test3b(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test4b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test4b:
; SSE2: # BB#0:
-; SSE2-NEXT: andps %xmm1, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test4b:
; SSSE3: # BB#0:
-; SSSE3-NEXT: andps %xmm1, %xmm0
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
-; SSSE3-NEXT: movaps %xmm2, %xmm0
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test4b:
@@ -434,18 +434,18 @@ define <4 x i32> @combine_bitwise_ops_test4b(<4 x i32> %a, <4 x i32> %b, <4 x i3
define <4 x i32> @combine_bitwise_ops_test5b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; SSE2-LABEL: combine_bitwise_ops_test5b:
; SSE2: # BB#0:
-; SSE2-NEXT: orps %xmm1, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test5b:
; SSSE3: # BB#0:
-; SSSE3-NEXT: orps %xmm1, %xmm0
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
-; SSSE3-NEXT: movaps %xmm2, %xmm0
+; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test5b:
@@ -475,19 +475,13 @@ define <4 x i32> @combine_bitwise_ops_test6b(<4 x i32> %a, <4 x i32> %b, <4 x i3
; SSE2-LABEL: combine_bitwise_ops_test6b:
; SSE2: # BB#0:
; SSE2-NEXT: xorps %xmm1, %xmm0
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
-; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_bitwise_ops_test6b:
; SSSE3: # BB#0:
; SSSE3-NEXT: xorps %xmm1, %xmm0
-; SSSE3-NEXT: xorps %xmm1, %xmm1
-; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
-; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_bitwise_ops_test6b:
@@ -517,17 +511,42 @@ define <4 x i32> @combine_bitwise_ops_test6b(<4 x i32> %a, <4 x i32> %b, <4 x i3
}
define <4 x i32> @combine_bitwise_ops_test1c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
-; SSE-LABEL: combine_bitwise_ops_test1c:
-; SSE: # BB#0:
-; SSE-NEXT: andps %xmm1, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_bitwise_ops_test1c:
+; SSE2: # BB#0:
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
;
-; AVX-LABEL: combine_bitwise_ops_test1c:
-; AVX: # BB#0:
-; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
-; AVX-NEXT: retq
+; SSSE3-LABEL: combine_bitwise_ops_test1c:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: combine_bitwise_ops_test1c:
+; SSE41: # BB#0:
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: combine_bitwise_ops_test1c:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_bitwise_ops_test1c:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX2-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%and = and <4 x i32> %shuf1, %shuf2
@@ -535,17 +554,42 @@ define <4 x i32> @combine_bitwise_ops_test1c(<4 x i32> %a, <4 x i32> %b, <4 x i3
}
define <4 x i32> @combine_bitwise_ops_test2c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
-; SSE-LABEL: combine_bitwise_ops_test2c:
-; SSE: # BB#0:
-; SSE-NEXT: orps %xmm1, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_bitwise_ops_test2c:
+; SSE2: # BB#0:
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
;
-; AVX-LABEL: combine_bitwise_ops_test2c:
-; AVX: # BB#0:
-; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
-; AVX-NEXT: retq
+; SSSE3-LABEL: combine_bitwise_ops_test2c:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: combine_bitwise_ops_test2c:
+; SSE41: # BB#0:
+; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: combine_bitwise_ops_test2c:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_bitwise_ops_test2c:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX2-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%or = or <4 x i32> %shuf1, %shuf2
@@ -553,18 +597,34 @@ define <4 x i32> @combine_bitwise_ops_test2c(<4 x i32> %a, <4 x i32> %b, <4 x i3
}
define <4 x i32> @combine_bitwise_ops_test3c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
-; SSE-LABEL: combine_bitwise_ops_test3c:
-; SSE: # BB#0:
-; SSE-NEXT: xorps %xmm1, %xmm0
-; SSE-NEXT: xorps %xmm1, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_bitwise_ops_test3c:
+; SSE2: # BB#0:
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: combine_bitwise_ops_test3c:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pxor %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: combine_bitwise_ops_test3c:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE41-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
+; SSE41-NEXT: retq
;
; AVX-LABEL: combine_bitwise_ops_test3c:
; AVX: # BB#0:
-; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
@@ -573,18 +633,42 @@ define <4 x i32> @combine_bitwise_ops_test3c(<4 x i32> %a, <4 x i32> %b, <4 x i3
}
define <4 x i32> @combine_bitwise_ops_test4c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
-; SSE-LABEL: combine_bitwise_ops_test4c:
-; SSE: # BB#0:
-; SSE-NEXT: andps %xmm1, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
-; SSE-NEXT: movaps %xmm2, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_bitwise_ops_test4c:
+; SSE2: # BB#0:
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
;
-; AVX-LABEL: combine_bitwise_ops_test4c:
-; AVX: # BB#0:
-; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
-; AVX-NEXT: retq
+; SSSE3-LABEL: combine_bitwise_ops_test4c:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: combine_bitwise_ops_test4c:
+; SSE41: # BB#0:
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: combine_bitwise_ops_test4c:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_bitwise_ops_test4c:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX2-NEXT: retq
%shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%and = and <4 x i32> %shuf1, %shuf2
@@ -592,18 +676,42 @@ define <4 x i32> @combine_bitwise_ops_test4c(<4 x i32> %a, <4 x i32> %b, <4 x i3
}
define <4 x i32> @combine_bitwise_ops_test5c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
-; SSE-LABEL: combine_bitwise_ops_test5c:
-; SSE: # BB#0:
-; SSE-NEXT: orps %xmm1, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
-; SSE-NEXT: movaps %xmm2, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_bitwise_ops_test5c:
+; SSE2: # BB#0:
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
;
-; AVX-LABEL: combine_bitwise_ops_test5c:
-; AVX: # BB#0:
-; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
-; AVX-NEXT: retq
+; SSSE3-LABEL: combine_bitwise_ops_test5c:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: combine_bitwise_ops_test5c:
+; SSE41: # BB#0:
+; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: combine_bitwise_ops_test5c:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_bitwise_ops_test5c:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX2-NEXT: retq
%shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%or = or <4 x i32> %shuf1, %shuf2
@@ -611,20 +719,45 @@ define <4 x i32> @combine_bitwise_ops_test5c(<4 x i32> %a, <4 x i32> %b, <4 x i3
}
define <4 x i32> @combine_bitwise_ops_test6c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
-; SSE-LABEL: combine_bitwise_ops_test6c:
-; SSE: # BB#0:
-; SSE-NEXT: xorps %xmm1, %xmm0
-; SSE-NEXT: xorps %xmm1, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
-; SSE-NEXT: movaps %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_bitwise_ops_test6c:
+; SSE2: # BB#0:
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
;
-; AVX-LABEL: combine_bitwise_ops_test6c:
-; AVX: # BB#0:
-; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[1,3]
-; AVX-NEXT: retq
+; SSSE3-LABEL: combine_bitwise_ops_test6c:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pxor %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
+; SSSE3-NEXT: pxor %xmm0, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: combine_bitwise_ops_test6c:
+; SSE41: # BB#0:
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3]
+; SSE41-NEXT: pxor %xmm0, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: combine_bitwise_ops_test6c:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_bitwise_ops_test6c:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX2-NEXT: retq
%shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 2, i32 5, i32 7>
%xor = xor <4 x i32> %shuf1, %shuf2
@@ -855,19 +988,40 @@ define <4 x i32> @combine_nested_undef_test14(<4 x i32> %A, <4 x i32> %B) {
; it.
define <4 x i32> @combine_nested_undef_test15(<4 x i32> %A, <4 x i32> %B) {
-; SSE-LABEL: combine_nested_undef_test15:
-; SSE: # BB#0:
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,1,0,3]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_nested_undef_test15:
+; SSE2: # BB#0:
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,1]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
;
-; AVX-LABEL: combine_nested_undef_test15:
-; AVX: # BB#0:
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[2,0],xmm0[3,1]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; AVX-NEXT: retq
+; SSSE3-LABEL: combine_nested_undef_test15:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,1]
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: combine_nested_undef_test15:
+; SSE41: # BB#0:
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: combine_nested_undef_test15:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_nested_undef_test15:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 3, i32 1>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
ret <4 x i32> %2
@@ -876,34 +1030,34 @@ define <4 x i32> @combine_nested_undef_test15(<4 x i32> %A, <4 x i32> %B) {
define <4 x i32> @combine_nested_undef_test16(<4 x i32> %A, <4 x i32> %B) {
; SSE2-LABEL: combine_nested_undef_test16:
; SSE2: # BB#0:
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_nested_undef_test16:
; SSSE3: # BB#0:
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,2,3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_nested_undef_test16:
; SSE41: # BB#0:
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_nested_undef_test16:
; AVX1: # BB#0:
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_nested_undef_test16:
; AVX2: # BB#0:
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
@@ -911,19 +1065,35 @@ define <4 x i32> @combine_nested_undef_test16(<4 x i32> %A, <4 x i32> %B) {
}
define <4 x i32> @combine_nested_undef_test17(<4 x i32> %A, <4 x i32> %B) {
-; SSE-LABEL: combine_nested_undef_test17:
-; SSE: # BB#0:
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[1,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[3,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,1,0,3]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_nested_undef_test17:
+; SSE2: # BB#0:
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[1,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[0,2]
+; SSE2-NEXT: retq
;
-; AVX-LABEL: combine_nested_undef_test17:
-; AVX: # BB#0:
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[1,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[3,1]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; AVX-NEXT: retq
+; SSSE3-LABEL: combine_nested_undef_test17:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[1,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[0,2]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: combine_nested_undef_test17:
+; SSE41: # BB#0:
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: combine_nested_undef_test17:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_nested_undef_test17:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
+; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 3, i32 1>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
ret <4 x i32> %2
@@ -945,55 +1115,107 @@ define <4 x i32> @combine_nested_undef_test18(<4 x i32> %A, <4 x i32> %B) {
}
define <4 x i32> @combine_nested_undef_test19(<4 x i32> %A, <4 x i32> %B) {
-; SSE-LABEL: combine_nested_undef_test19:
-; SSE: # BB#0:
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,2]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,0,0]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_nested_undef_test19:
+; SSE2: # BB#0:
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,0,0,0]
+; SSE2-NEXT: retq
;
-; AVX-LABEL: combine_nested_undef_test19:
-; AVX: # BB#0:
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,2]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,0,0,0]
-; AVX-NEXT: retq
+; SSSE3-LABEL: combine_nested_undef_test19:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,0,0,0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: combine_nested_undef_test19:
+; SSE41: # BB#0:
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,0,0]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: combine_nested_undef_test19:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,0,0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_nested_undef_test19:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,0,0]
+; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 5, i32 6>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 0, i32 0>
ret <4 x i32> %2
}
define <4 x i32> @combine_nested_undef_test20(<4 x i32> %A, <4 x i32> %B) {
-; SSE-LABEL: combine_nested_undef_test20:
-; SSE: # BB#0:
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2],xmm1[0,0]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_nested_undef_test20:
+; SSE2: # BB#0:
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
;
-; AVX-LABEL: combine_nested_undef_test20:
-; AVX: # BB#0:
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,2],xmm1[0,0]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; AVX-NEXT: retq
+; SSSE3-LABEL: combine_nested_undef_test20:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: combine_nested_undef_test20:
+; SSE41: # BB#0:
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,3,0]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: combine_nested_undef_test20:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,3,0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_nested_undef_test20:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,3,0]
+; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 3, i32 2, i32 4, i32 4>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
ret <4 x i32> %2
}
define <4 x i32> @combine_nested_undef_test21(<4 x i32> %A, <4 x i32> %B) {
-; SSE-LABEL: combine_nested_undef_test21:
-; SSE: # BB#0:
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[1,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[3,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,3]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_nested_undef_test21:
+; SSE2: # BB#0:
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,3,0,3]
+; SSE2-NEXT: retq
;
-; AVX-LABEL: combine_nested_undef_test21:
-; AVX: # BB#0:
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[1,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[3,1]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
-; AVX-NEXT: retq
+; SSSE3-LABEL: combine_nested_undef_test21:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,3,0,3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: combine_nested_undef_test21:
+; SSE41: # BB#0:
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: combine_nested_undef_test21:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_nested_undef_test21:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: retq
%1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 3, i32 1>
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 3>
ret <4 x i32> %2
@@ -1119,20 +1341,10 @@ define <4 x i32> @combine_nested_undef_test28(<4 x i32> %A, <4 x i32> %B) {
}
define <4 x float> @combine_test1(<4 x float> %a, <4 x float> %b) {
-; SSE2-LABEL: combine_test1:
-; SSE2: # BB#0:
-; SSE2-NEXT: movaps %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: combine_test1:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: movaps %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: combine_test1:
-; SSE41: # BB#0:
-; SSE41-NEXT: movaps %xmm1, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: combine_test1:
+; SSE: # BB#0:
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
;
; AVX-LABEL: combine_test1:
; AVX: # BB#0:
@@ -1146,13 +1358,13 @@ define <4 x float> @combine_test1(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test2(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_test2:
; SSE2: # BB#0:
-; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test2:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -1204,22 +1416,14 @@ define <4 x float> @combine_test4(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test5(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_test5:
; SSE2: # BB#0:
-; SSE2-NEXT: movaps %xmm1, %xmm2
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm2[2,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test5:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movaps %xmm1, %xmm2
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm2[2,0]
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
-; SSSE3-NEXT: movaps %xmm2, %xmm0
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test5:
@@ -1237,20 +1441,10 @@ define <4 x float> @combine_test5(<4 x float> %a, <4 x float> %b) {
}
define <4 x i32> @combine_test6(<4 x i32> %a, <4 x i32> %b) {
-; SSE2-LABEL: combine_test6:
-; SSE2: # BB#0:
-; SSE2-NEXT: movaps %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: combine_test6:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: movaps %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: combine_test6:
-; SSE41: # BB#0:
-; SSE41-NEXT: movaps %xmm1, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: combine_test6:
+; SSE: # BB#0:
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
;
; AVX-LABEL: combine_test6:
; AVX: # BB#0:
@@ -1264,13 +1458,13 @@ define <4 x i32> @combine_test6(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test7(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: combine_test7:
; SSE2: # BB#0:
-; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test7:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -1327,22 +1521,14 @@ define <4 x i32> @combine_test9(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test10(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: combine_test10:
; SSE2: # BB#0:
-; SSE2-NEXT: movaps %xmm1, %xmm2
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm2[2,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test10:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movaps %xmm1, %xmm2
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm2[2,0]
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,0]
-; SSSE3-NEXT: movaps %xmm2, %xmm0
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test10:
@@ -1376,13 +1562,13 @@ define <4 x float> @combine_test11(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test12(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_test12:
; SSE2: # BB#0:
-; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test12:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -1433,20 +1619,14 @@ define <4 x float> @combine_test14(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_test15(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_test15:
; SSE2: # BB#0:
-; SSE2-NEXT: movaps %xmm0, %xmm2
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm1[0,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[2,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm2[0,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[2,3]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test15:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movaps %xmm0, %xmm2
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm1[0,0]
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[2,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm2[0,0]
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[2,3]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test15:
@@ -1475,13 +1655,13 @@ define <4 x i32> @combine_test16(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test17(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: combine_test17:
; SSE2: # BB#0:
-; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test17:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -1537,20 +1717,14 @@ define <4 x i32> @combine_test19(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @combine_test20(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: combine_test20:
; SSE2: # BB#0:
-; SSE2-NEXT: movaps %xmm0, %xmm2
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm1[0,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[2,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm2[0,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[2,3]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test20:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movaps %xmm0, %xmm2
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm1[0,0]
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[2,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm2[0,0]
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[2,3]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test20:
@@ -1572,28 +1746,66 @@ define <4 x i32> @combine_test20(<4 x i32> %a, <4 x i32> %b) {
ret <4 x i32> %2
}
+define <4 x i32> @combine_test21(<8 x i32> %a, <4 x i32>* %ptr) {
+; SSE-LABEL: combine_test21:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; SSE-NEXT: movdqa %xmm2, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: combine_test21:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-NEXT: vmovdqa %xmm2, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_test21:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX2-NEXT: vmovdqa %xmm2, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %1 = shufflevector <8 x i32> %a, <8 x i32> %a, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ %2 = shufflevector <8 x i32> %a, <8 x i32> %a, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
+ store <4 x i32> %1, <4 x i32>* %ptr, align 16
+ ret <4 x i32> %2
+}
+
+define <8 x float> @combine_test22(<2 x float>* %a, <2 x float>* %b) {
+; SSE-LABEL: combine_test22:
+; SSE: # BB#0:
+; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movhpd (%rsi), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_test22:
+; AVX: # BB#0:
+; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovhpd (%rsi), %xmm0, %xmm0
+; AVX-NEXT: retq
+; Current AVX2 lowering of this is still awful, not adding a test case.
+ %1 = load <2 x float>* %a, align 8
+ %2 = load <2 x float>* %b, align 8
+ %3 = shufflevector <2 x float> %1, <2 x float> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ ret <8 x float> %3
+}
; Check some negative cases.
; FIXME: Do any of these really make sense? Are they redundant with the above tests?
define <4 x float> @combine_test1b(<4 x float> %a, <4 x float> %b) {
-; SSE2-LABEL: combine_test1b:
-; SSE2: # BB#0:
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; SSE2-NEXT: movaps %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: combine_test1b:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; SSSE3-NEXT: movaps %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: combine_test1b:
-; SSE41: # BB#0:
-; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; SSE41-NEXT: movaps %xmm1, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: combine_test1b:
+; SSE: # BB#0:
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
;
; AVX-LABEL: combine_test1b:
; AVX: # BB#0:
@@ -1613,19 +1825,17 @@ define <4 x float> @combine_test2b(<4 x float> %a, <4 x float> %b) {
;
; SSSE3-LABEL: combine_test2b:
; SSSE3: # BB#0:
-; SSSE3-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0,0]
-; SSSE3-NEXT: movapd %xmm1, %xmm0
+; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test2b:
; SSE41: # BB#0:
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0,0]
-; SSE41-NEXT: movapd %xmm1, %xmm0
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_test2b:
; AVX: # BB#0:
-; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0,0]
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
%2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 0, i32 5>
@@ -1633,21 +1843,28 @@ define <4 x float> @combine_test2b(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @combine_test3b(<4 x float> %a, <4 x float> %b) {
-; SSE-LABEL: combine_test3b:
-; SSE: # BB#0:
-; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[3,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm2[0,2]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[3,3]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_test3b:
+; SSE2: # BB#0:
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: combine_test3b:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: combine_test3b:
+; SSE41: # BB#0:
+; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
+; SSE41-NEXT: retq
;
; AVX-LABEL: combine_test3b:
; AVX: # BB#0:
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm1[2,0],xmm0[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm2[0,2]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[3,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,3,2,3]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 6, i32 3>
%2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 7, i32 2, i32 7>
@@ -1655,23 +1872,11 @@ define <4 x float> @combine_test3b(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @combine_test4b(<4 x float> %a, <4 x float> %b) {
-; SSE2-LABEL: combine_test4b:
-; SSE2: # BB#0:
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSE2-NEXT: movaps %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: combine_test4b:
-; SSSE3: # BB#0:
-; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSSE3-NEXT: movaps %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: combine_test4b:
-; SSE41: # BB#0:
-; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSE41-NEXT: movaps %xmm1, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: combine_test4b:
+; SSE: # BB#0:
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
;
; AVX-LABEL: combine_test4b:
; AVX: # BB#0:
@@ -1688,44 +1893,44 @@ define <4 x float> @combine_test4b(<4 x float> %a, <4 x float> %b) {
define <4 x i8> @combine_test1c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-LABEL: combine_test1c:
; SSE2: # BB#0:
-; SSE2-NEXT: movd (%rdi), %xmm1
+; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: movd (%rsi), %xmm0
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT: movss %xmm1, %xmm0
+; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test1c:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movd (%rdi), %xmm1
+; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: movd (%rsi), %xmm0
+; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSSE3-NEXT: movss %xmm1, %xmm0
+; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test1c:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxbd (%rdi), %xmm1
-; SSE41-NEXT: pmovzxbd (%rsi), %xmm0
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_test1c:
; AVX1: # BB#0:
-; AVX1-NEXT: vpmovzxbd (%rdi), %xmm0
-; AVX1-NEXT: vpmovzxbd (%rsi), %xmm1
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_test1c:
; AVX2: # BB#0:
-; AVX2-NEXT: vpmovzxbd (%rdi), %xmm0
-; AVX2-NEXT: vpmovzxbd (%rsi), %xmm1
+; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX2-NEXT: retq
%A = load <4 x i8>* %a
@@ -1738,10 +1943,10 @@ define <4 x i8> @combine_test1c(<4 x i8>* %a, <4 x i8>* %b) {
define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-LABEL: combine_test2c:
; SSE2: # BB#0:
-; SSE2-NEXT: movd (%rdi), %xmm0
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT: movd (%rsi), %xmm1
+; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -1749,10 +1954,10 @@ define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
;
; SSSE3-LABEL: combine_test2c:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movd (%rdi), %xmm0
+; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSSE3-NEXT: movd (%rsi), %xmm1
+; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -1760,15 +1965,15 @@ define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
;
; SSE41-LABEL: combine_test2c:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxbd (%rdi), %xmm0
-; SSE41-NEXT: pmovzxbd (%rsi), %xmm1
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_test2c:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxbd (%rdi), %xmm0
-; AVX-NEXT: vpmovzxbd (%rsi), %xmm1
+; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%A = load <4 x i8>* %a
@@ -1781,10 +1986,10 @@ define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-LABEL: combine_test3c:
; SSE2: # BB#0:
-; SSE2-NEXT: movd (%rdi), %xmm1
+; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: movd (%rsi), %xmm0
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@@ -1792,10 +1997,10 @@ define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
;
; SSSE3-LABEL: combine_test3c:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movd (%rdi), %xmm1
+; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: movd (%rsi), %xmm0
+; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@@ -1803,15 +2008,15 @@ define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
;
; SSE41-LABEL: combine_test3c:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxbd (%rdi), %xmm1
-; SSE41-NEXT: pmovzxbd (%rsi), %xmm0
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_test3c:
; AVX: # BB#0:
-; AVX-NEXT: vpmovzxbd (%rdi), %xmm0
-; AVX-NEXT: vpmovzxbd (%rsi), %xmm1
+; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; AVX-NEXT: retq
%A = load <4 x i8>* %a
@@ -1824,52 +2029,46 @@ define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) {
; SSE2-LABEL: combine_test4c:
; SSE2: # BB#0:
-; SSE2-NEXT: movd (%rdi), %xmm1
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: movd (%rsi), %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,0],xmm0[2,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_test4c:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movd (%rdi), %xmm1
+; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: movd (%rsi), %xmm2
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSSE3-NEXT: movdqa %xmm2, %xmm0
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,0],xmm0[2,0]
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_test4c:
; SSE41: # BB#0:
-; SSE41-NEXT: pmovzxbd (%rdi), %xmm1
-; SSE41-NEXT: pmovzxbd (%rsi), %xmm0
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_test4c:
; AVX1: # BB#0:
-; AVX1-NEXT: vpmovzxbd (%rdi), %xmm0
-; AVX1-NEXT: vpmovzxbd (%rsi), %xmm1
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_test4c:
; AVX2: # BB#0:
-; AVX2-NEXT: vpmovzxbd (%rdi), %xmm0
-; AVX2-NEXT: vpmovzxbd (%rsi), %xmm1
+; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; AVX2-NEXT: retq
%A = load <4 x i8>* %a
@@ -1912,12 +2111,12 @@ define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) {
define <4 x float> @combine_blend_01(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_blend_01:
; SSE2: # BB#0:
-; SSE2-NEXT: movsd %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_blend_01:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movsd %xmm1, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_blend_01:
@@ -1937,16 +2136,16 @@ define <4 x float> @combine_blend_01(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_blend_02(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_blend_02:
; SSE2: # BB#0:
-; SSE2-NEXT: movss %xmm1, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_blend_02:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movss %xmm1, %xmm0
-; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
+; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_blend_02:
@@ -1966,13 +2165,13 @@ define <4 x float> @combine_blend_02(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_blend_123(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_blend_123:
; SSE2: # BB#0:
-; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_blend_123:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -2046,12 +2245,12 @@ define <4 x i32> @combine_test_movhl_3(<4 x i32> %a, <4 x i32> %b) {
define <4 x float> @combine_undef_input_test1(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_undef_input_test1:
; SSE2: # BB#0:
-; SSE2-NEXT: movsd %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_undef_input_test1:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movsd %xmm1, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test1:
@@ -2117,14 +2316,14 @@ define <4 x float> @combine_undef_input_test4(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test5(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_undef_input_test5:
; SSE2: # BB#0:
-; SSE2-NEXT: movsd %xmm0, %xmm1
-; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_undef_input_test5:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movsd %xmm0, %xmm1
-; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test5:
@@ -2162,17 +2361,17 @@ define <4 x float> @combine_undef_input_test7(<4 x float> %a) {
;
; SSSE3-LABEL: combine_undef_input_test7:
; SSSE3: # BB#0:
-; SSSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test7:
; SSE41: # BB#0:
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test7:
; AVX: # BB#0:
-; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
%2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 1, i32 2, i32 4, i32 5>
@@ -2187,17 +2386,17 @@ define <4 x float> @combine_undef_input_test8(<4 x float> %a) {
;
; SSSE3-LABEL: combine_undef_input_test8:
; SSSE3: # BB#0:
-; SSSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test8:
; SSE41: # BB#0:
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test8:
; AVX: # BB#0:
-; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
%2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
@@ -2231,12 +2430,12 @@ define <4 x float> @combine_undef_input_test10(<4 x float> %a) {
define <4 x float> @combine_undef_input_test11(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_undef_input_test11:
; SSE2: # BB#0:
-; SSE2-NEXT: movsd %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_undef_input_test11:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movsd %xmm1, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test11:
@@ -2302,14 +2501,14 @@ define <4 x float> @combine_undef_input_test14(<4 x float> %a, <4 x float> %b) {
define <4 x float> @combine_undef_input_test15(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: combine_undef_input_test15:
; SSE2: # BB#0:
-; SSE2-NEXT: movsd %xmm0, %xmm1
-; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_undef_input_test15:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movsd %xmm0, %xmm1
-; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test15:
@@ -2353,17 +2552,17 @@ define <4 x float> @combine_undef_input_test17(<4 x float> %a) {
;
; SSSE3-LABEL: combine_undef_input_test17:
; SSSE3: # BB#0:
-; SSSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test17:
; SSE41: # BB#0:
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test17:
; AVX: # BB#0:
-; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
%2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 5, i32 6, i32 0, i32 1>
@@ -2378,17 +2577,17 @@ define <4 x float> @combine_undef_input_test18(<4 x float> %a) {
;
; SSSE3-LABEL: combine_undef_input_test18:
; SSSE3: # BB#0:
-; SSSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_undef_input_test18:
; SSE41: # BB#0:
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_undef_input_test18:
; AVX: # BB#0:
-; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
%2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 0, i32 5>
@@ -2463,19 +2662,16 @@ define <8 x i32> @combine_unneeded_subvector2(<8 x i32> %a, <8 x i32> %b) {
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_unneeded_subvector2:
; AVX2: # BB#0:
; AVX2-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <7,6,5,4,u,u,u,u>
-; AVX2-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: retq
%c = add <8 x i32> %a, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
%d = shufflevector <8 x i32> %b, <8 x i32> %c, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 15, i32 14, i32 13, i32 12>
@@ -2483,6 +2679,20 @@ define <8 x i32> @combine_unneeded_subvector2(<8 x i32> %a, <8 x i32> %b) {
}
define <4 x float> @combine_insertps1(<4 x float> %a, <4 x float> %b) {
+; SSE2-LABEL: combine_insertps1:
+; SSE2: # BB#0:
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: combine_insertps1:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
; SSE41-LABEL: combine_insertps1:
; SSE41: # BB#0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm1[2],xmm0[1,2,3]
@@ -2499,6 +2709,20 @@ define <4 x float> @combine_insertps1(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @combine_insertps2(<4 x float> %a, <4 x float> %b) {
+; SSE2-LABEL: combine_insertps2:
+; SSE2: # BB#0:
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: combine_insertps2:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
; SSE41-LABEL: combine_insertps2:
; SSE41: # BB#0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[2],xmm0[2,3]
@@ -2515,6 +2739,18 @@ define <4 x float> @combine_insertps2(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @combine_insertps3(<4 x float> %a, <4 x float> %b) {
+; SSE2-LABEL: combine_insertps3:
+; SSE2: # BB#0:
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: combine_insertps3:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
+; SSSE3-NEXT: retq
+;
; SSE41-LABEL: combine_insertps3:
; SSE41: # BB#0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
@@ -2531,6 +2767,18 @@ define <4 x float> @combine_insertps3(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @combine_insertps4(<4 x float> %a, <4 x float> %b) {
+; SSE2-LABEL: combine_insertps4:
+; SSE2: # BB#0:
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: combine_insertps4:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
+; SSSE3-NEXT: retq
+;
; SSE41-LABEL: combine_insertps4:
; SSE41: # BB#0:
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
@@ -2545,3 +2793,115 @@ define <4 x float> @combine_insertps4(<4 x float> %a, <4 x float> %b) {
%d = shufflevector <4 x float> %a, <4 x float> %c, <4 x i32><i32 4, i32 1, i32 6, i32 5>
ret <4 x float> %d
}
+
+define <4 x float> @PR22377(<4 x float> %a, <4 x float> %b) {
+; SSE-LABEL: PR22377:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: movaps %xmm0, %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3,1,3]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,0,2]
+; SSE-NEXT: addps %xmm0, %xmm1
+; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: PR22377:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,3,1,3]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2]
+; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm1
+; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX-NEXT: retq
+entry:
+ %s1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 1, i32 3>
+ %s2 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 0, i32 2>
+ %r2 = fadd <4 x float> %s1, %s2
+ %s3 = shufflevector <4 x float> %s2, <4 x float> %r2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x float> %s3
+}
+
+define <4 x float> @PR22390(<4 x float> %a, <4 x float> %b) {
+; SSE2-LABEL: PR22390:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0,1,2]
+; SSE2-NEXT: movaps %xmm0, %xmm2
+; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
+; SSE2-NEXT: addps %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: PR22390:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0,1,2]
+; SSSE3-NEXT: movaps %xmm0, %xmm2
+; SSSE3-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
+; SSSE3-NEXT: addps %xmm0, %xmm2
+; SSSE3-NEXT: movaps %xmm2, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: PR22390:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0,1,2]
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3]
+; SSE41-NEXT: addps %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: PR22390:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,0,1,2]
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3]
+; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %s1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
+ %s2 = shufflevector <4 x float> %s1, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
+ %r2 = fadd <4 x float> %s1, %s2
+ ret <4 x float> %r2
+}
+
+define <8 x float> @PR22412(<8 x float> %a, <8 x float> %b) {
+; SSE2-LABEL: PR22412:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT: movapd %xmm2, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm3[3,2]
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm2[3,2]
+; SSE2-NEXT: movaps %xmm3, %xmm1
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: PR22412:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSSE3-NEXT: movapd %xmm2, %xmm0
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm3[3,2]
+; SSSE3-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm2[3,2]
+; SSSE3-NEXT: movaps %xmm3, %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: PR22412:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm2[1]
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm3[3,2]
+; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm0[3,2]
+; SSE41-NEXT: movaps %xmm1, %xmm0
+; SSE41-NEXT: movaps %xmm3, %xmm1
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: PR22412:
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm1[3,2],ymm0[5,4],ymm1[7,6]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: PR22412:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; AVX2-NEXT: vmovaps {{.*#+}} ymm1 = [1,0,7,6,5,4,3,2]
+; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: retq
+entry:
+ %s1 = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %s2 = shufflevector <8 x float> %s1, <8 x float> undef, <8 x i32> <i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
+ ret <8 x float> %s2
+}
diff --git a/test/CodeGen/X86/vector-shuffle-mmx.ll b/test/CodeGen/X86/vector-shuffle-mmx.ll
new file mode 100644
index 0000000..19608bd
--- /dev/null
+++ b/test/CodeGen/X86/vector-shuffle-mmx.ll
@@ -0,0 +1,106 @@
+; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X32 %s
+; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X64 %s
+
+; If there is no explicit MMX type usage, always promote to XMM.
+
+define void @test0(<1 x i64>* %x) {
+; X32-LABEL: test0:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,3]
+; X32-NEXT: movlpd %xmm0, (%eax)
+; X32-NEXT: retl
+;
+; X64-LABEL: test0:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,3]
+; X64-NEXT: movq %xmm0, (%rdi)
+; X64-NEXT: retq
+entry:
+ %tmp2 = load <1 x i64>* %x
+ %tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32>
+ %tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >
+ %tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64>
+ store <1 x i64> %tmp10, <1 x i64>* %x
+ ret void
+}
+
+define void @test1() {
+; X32-LABEL: test1:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: pushl %edi
+; X32-NEXT: Ltmp0:
+; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: subl $16, %esp
+; X32-NEXT: Ltmp1:
+; X32-NEXT: .cfi_def_cfa_offset 24
+; X32-NEXT: Ltmp2:
+; X32-NEXT: .cfi_offset %edi, -8
+; X32-NEXT: xorpd %xmm0, %xmm0
+; X32-NEXT: movlpd %xmm0, (%esp)
+; X32-NEXT: movq (%esp), %mm0
+; X32-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
+; X32-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-NEXT: movlpd %xmm0, {{[0-9]+}}(%esp)
+; X32-NEXT: movq {{[0-9]+}}(%esp), %mm1
+; X32-NEXT: xorl %edi, %edi
+; X32-NEXT: maskmovq %mm1, %mm0
+; X32-NEXT: addl $16, %esp
+; X32-NEXT: popl %edi
+; X32-NEXT: retl
+;
+; X64-LABEL: test1:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: pxor %xmm0, %xmm0
+; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %mm0
+; X64-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
+; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %mm1
+; X64-NEXT: xorl %edi, %edi
+; X64-NEXT: maskmovq %mm1, %mm0
+; X64-NEXT: retq
+entry:
+ %tmp528 = bitcast <8 x i8> zeroinitializer to <2 x i32>
+ %tmp529 = and <2 x i32> %tmp528, bitcast (<4 x i16> < i16 -32640, i16 16448, i16 8224, i16 4112 > to <2 x i32>)
+ %tmp542 = bitcast <2 x i32> %tmp529 to <4 x i16>
+ %tmp543 = add <4 x i16> %tmp542, < i16 0, i16 16448, i16 24672, i16 28784 >
+ %tmp555 = bitcast <4 x i16> %tmp543 to <8 x i8>
+ %tmp556 = bitcast <8 x i8> %tmp555 to x86_mmx
+ %tmp557 = bitcast <8 x i8> zeroinitializer to x86_mmx
+ tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp557, x86_mmx %tmp556, i8* null)
+ ret void
+}
+
+@tmp_V2i = common global <2 x i32> zeroinitializer
+
+define void @test2() nounwind {
+; X32-LABEL: test2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl L_tmp_V2i$non_lazy_ptr, %eax
+; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-NEXT: movlpd %xmm0, (%eax)
+; X32-NEXT: retl
+;
+; X64-LABEL: test2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: movq _tmp_V2i@{{.*}}(%rip), %rax
+; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,1]
+; X64-NEXT: movq %xmm0, (%rax)
+; X64-NEXT: retq
+entry:
+ %0 = load <2 x i32>* @tmp_V2i, align 8
+ %1 = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer
+ store <2 x i32> %1, <2 x i32>* @tmp_V2i, align 8
+ ret void
+}
+
+declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*)
diff --git a/test/CodeGen/X86/vector-shuffle-sse1.ll b/test/CodeGen/X86/vector-shuffle-sse1.ll
index 226deb0..b4cb0ec 100644
--- a/test/CodeGen/X86/vector-shuffle-sse1.ll
+++ b/test/CodeGen/X86/vector-shuffle-sse1.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=x86-64 -mattr=-sse2 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=SSE1
+; RUN: llc < %s -mcpu=x86-64 -mattr=-sse2 | FileCheck %s --check-prefix=SSE1
target triple = "x86_64-unknown-unknown"
@@ -95,7 +95,7 @@ define <4 x float> @shuffle_v4f32_4zzz(<4 x float> %a) {
; SSE1-LABEL: shuffle_v4f32_4zzz:
; SSE1: # BB#0:
; SSE1-NEXT: xorps %xmm1, %xmm1
-; SSE1-NEXT: movss %xmm0, %xmm1
+; SSE1-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE1-NEXT: movaps %xmm1, %xmm0
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
@@ -106,8 +106,8 @@ define <4 x float> @shuffle_v4f32_z4zz(<4 x float> %a) {
; SSE1-LABEL: shuffle_v4f32_z4zz:
; SSE1: # BB#0:
; SSE1-NEXT: xorps %xmm1, %xmm1
-; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
-; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[3,0]
+; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
+; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 2, i32 4, i32 3, i32 0>
ret <4 x float> %shuffle
@@ -117,8 +117,8 @@ define <4 x float> @shuffle_v4f32_zz4z(<4 x float> %a) {
; SSE1-LABEL: shuffle_v4f32_zz4z:
; SSE1: # BB#0:
; SSE1-NEXT: xorps %xmm1, %xmm1
-; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0]
-; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,2]
+; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
+; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
; SSE1-NEXT: movaps %xmm1, %xmm0
; SSE1-NEXT: retq
%shuffle = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 0, i32 0, i32 4, i32 0>
@@ -163,7 +163,7 @@ define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
; SSE1-LABEL: insert_reg_and_zero_v4f32:
; SSE1: # BB#0:
; SSE1-NEXT: xorps %xmm1, %xmm1
-; SSE1-NEXT: movss %xmm0, %xmm1
+; SSE1-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE1-NEXT: movaps %xmm1, %xmm0
; SSE1-NEXT: retq
%v = insertelement <4 x float> undef, float %a, i32 0
@@ -174,7 +174,7 @@ define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
define <4 x float> @insert_mem_and_zero_v4f32(float* %ptr) {
; SSE1-LABEL: insert_mem_and_zero_v4f32:
; SSE1: # BB#0:
-; SSE1-NEXT: movss (%rdi), %xmm0
+; SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE1-NEXT: retq
%a = load float* %ptr
%v = insertelement <4 x float> undef, float %a, i32 0
@@ -186,14 +186,14 @@ define <4 x float> @insert_mem_lo_v4f32(<2 x float>* %ptr, <4 x float> %b) {
; SSE1-LABEL: insert_mem_lo_v4f32:
; SSE1: # BB#0:
; SSE1-NEXT: movq (%rdi), %rax
-; SSE1-NEXT: movl %eax, {{[-0-9]+}}(%rsp)
+; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; SSE1-NEXT: shrq $32, %rax
; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
-; SSE1-NEXT: movss {{[-0-9]+}}(%rsp), %xmm1
-; SSE1-NEXT: movss {{[-0-9]+}}(%rsp), %xmm2
+; SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE1-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE1-NEXT: xorps %xmm2, %xmm2
-; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,1]
+; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3]
; SSE1-NEXT: movaps %xmm1, %xmm0
; SSE1-NEXT: retq
@@ -207,14 +207,14 @@ define <4 x float> @insert_mem_hi_v4f32(<2 x float>* %ptr, <4 x float> %b) {
; SSE1-LABEL: insert_mem_hi_v4f32:
; SSE1: # BB#0:
; SSE1-NEXT: movq (%rdi), %rax
-; SSE1-NEXT: movl %eax, {{[-0-9]+}}(%rsp)
+; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
; SSE1-NEXT: shrq $32, %rax
-; SSE1-NEXT: movl %eax, {{[-0-9]+}}(%rsp)
-; SSE1-NEXT: movss {{[-0-9]+}}(%rsp), %xmm1
-; SSE1-NEXT: movss {{[-0-9]+}}(%rsp), %xmm2
+; SSE1-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
+; SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE1-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE1-NEXT: xorps %xmm2, %xmm2
-; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,1]
+; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1]
; SSE1-NEXT: retq
%a = load <2 x float>* %ptr
diff --git a/test/CodeGen/X86/vector-trunc.ll b/test/CodeGen/X86/vector-trunc.ll
new file mode 100644
index 0000000..a336015
--- /dev/null
+++ b/test/CodeGen/X86/vector-trunc.ll
@@ -0,0 +1,223 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+
+define <4 x i32> @trunc2x2i64(<2 x i64> %a, <2 x i64> %b) {
+; SSE2-LABEL: trunc2x2i64:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc2x2i64:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc2x2i64:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc2x2i64:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
+; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX-NEXT: retq
+
+
+entry:
+ %0 = trunc <2 x i64> %a to <2 x i32>
+ %1 = trunc <2 x i64> %b to <2 x i32>
+ %2 = shufflevector <2 x i32> %0, <2 x i32> %1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %2
+}
+
+define i64 @trunc2i64(<2 x i64> %inval) {
+; SSE-LABEL: trunc2i64:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: movd %xmm0, %rax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc2i64:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: retq
+
+
+entry:
+ %0 = trunc <2 x i64> %inval to <2 x i32>
+ %1 = bitcast <2 x i32> %0 to i64
+ ret i64 %1
+}
+
+define <8 x i16> @trunc2x4i32(<4 x i32> %a, <4 x i32> %b) {
+; SSE2-LABEL: trunc2x4i32:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc2x4i32:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSSE3-NEXT: pshufb %xmm2, %xmm1
+; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc2x4i32:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc2x4i32:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: retq
+
+
+
+
+entry:
+ %0 = trunc <4 x i32> %a to <4 x i16>
+ %1 = trunc <4 x i32> %b to <4 x i16>
+ %2 = shufflevector <4 x i16> %0, <4 x i16> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %2
+}
+
+; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524
+define i64 @trunc4i32(<4 x i32> %inval) {
+; SSE2-LABEL: trunc4i32:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc4i32:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSSE3-NEXT: movd %xmm0, %rax
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc4i32:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE41-NEXT: movd %xmm0, %rax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc4i32:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: retq
+
+
+
+
+entry:
+ %0 = trunc <4 x i32> %inval to <4 x i16>
+ %1 = bitcast <4 x i16> %0 to i64
+ ret i64 %1
+}
+
+define <16 x i8> @trunc2x8i16(<8 x i16> %a, <8 x i16> %b) {
+; SSE2-LABEL: trunc2x8i16:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc2x8i16:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSSE3-NEXT: pshufb %xmm2, %xmm1
+; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc2x8i16:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc2x8i16:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: retq
+
+
+
+
+entry:
+ %0 = trunc <8 x i16> %a to <8 x i8>
+ %1 = trunc <8 x i16> %b to <8 x i8>
+ %2 = shufflevector <8 x i8> %0, <8 x i8> %1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %2
+}
+
+; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524
+define i64 @trunc8i16(<8 x i16> %inval) {
+; SSE2-LABEL: trunc8i16:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc8i16:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: movd %xmm0, %rax
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc8i16:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SSE41-NEXT: movd %xmm0, %rax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc8i16:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: retq
+
+
+
+
+entry:
+ %0 = trunc <8 x i16> %inval to <8 x i8>
+ %1 = bitcast <8 x i8> %0 to i64
+ ret i64 %1
+}
diff --git a/test/CodeGen/X86/vector-zext.ll b/test/CodeGen/X86/vector-zext.ll
index afd7a24..568687d 100644
--- a/test/CodeGen/X86/vector-zext.ll
+++ b/test/CodeGen/X86/vector-zext.ll
@@ -7,47 +7,43 @@
define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i16_to_8i32:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: # kill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pand .LCPI0_0(%rip), %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i16_to_8i32:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movdqa %xmm0, %xmm2
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
-; SSSE3-NEXT: pand %xmm1, %xmm2
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: pand %xmm0, %xmm1
-; SSSE3-NEXT: movdqa %xmm2, %xmm0
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: # kill
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSSE3-NEXT: pand .LCPI0_0(%rip), %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i16_to_8i32:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pmovzxwd %xmm0, %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
-; SSE41-NEXT: pand %xmm1, %xmm2
-; SSE41-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
-; SSE41-NEXT: pand %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSE41-NEXT: pand .LCPI0_0(%rip), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i16_to_8i32:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX1-NEXT: vpmovzxwd %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i16_to_8i32:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: retq
entry:
%B = zext <8 x i16> %A to <8 x i32>
@@ -77,7 +73,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
;
; SSE41-LABEL: zext_4i32_to_4i64:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pmovzxdq %xmm0, %xmm2
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295]
; SSE41-NEXT: pand %xmm3, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
@@ -89,13 +85,13 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vpmovzxdq %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_4i32_to_4i64:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxdq %xmm0, %ymm0
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: retq
entry:
%B = zext <4 x i32> %A to <4 x i64>
@@ -127,7 +123,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
;
; SSE41-LABEL: zext_8i8_to_8i32:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pmovzxwd %xmm0, %xmm2
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255]
; SSE41-NEXT: pand %xmm1, %xmm2
; SSE41-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
@@ -137,7 +133,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
;
; AVX1-LABEL: zext_8i8_to_8i32:
; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vpmovzxwd %xmm0, %xmm1
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
@@ -145,7 +141,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
;
; AVX2-LABEL: zext_8i8_to_8i32:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -158,49 +154,324 @@ entry:
define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %z) {
; SSE2-LABEL: zext_16i8_to_16i16:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: # kill
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: pand .LCPI3_0(%rip), %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_16i16:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movdqa %xmm0, %xmm2
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; SSSE3-NEXT: pand %xmm1, %xmm2
-; SSSE3-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSSE3-NEXT: pand %xmm0, %xmm1
-; SSSE3-NEXT: movdqa %xmm2, %xmm0
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: # kill
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSSE3-NEXT: pand .LCPI3_0(%rip), %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_16i16:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pmovzxbw %xmm0, %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: pand %xmm1, %xmm2
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE41-NEXT: pand %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pmovzxbw %xmm1, %xmm0 {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE41-NEXT: pand .LCPI3_0(%rip), %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_16i16:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vpmovzxbw %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_16i16:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2-NEXT: retq
entry:
%t = zext <16 x i8> %z to <16 x i16>
ret <16 x i16> %t
}
+
+define <16 x i16> @load_zext_16i8_to_16i16(<16 x i8> *%ptr) {
+; SSE2-LABEL: load_zext_16i8_to_16i16:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: pand .LCPI4_0(%rip), %xmm1
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_16i8_to_16i16:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa (%rdi), %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSSE3-NEXT: pand .LCPI4_0(%rip), %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_16i8_to_16i16:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: load_zext_16i8_to_16i16:
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_zext_16i8_to_16i16:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX2-NEXT: retq
+entry:
+ %X = load <16 x i8>* %ptr
+ %Y = zext <16 x i8> %X to <16 x i16>
+ ret <16 x i16> %Y
+}
+
+define <8 x i32> @load_zext_8i16_to_8i32(<8 x i16> *%ptr) {
+; SSE2-LABEL: load_zext_8i16_to_8i32:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pand .LCPI5_0(%rip), %xmm1
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_8i16_to_8i32:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa (%rdi), %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSSE3-NEXT: pand .LCPI5_0(%rip), %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_8i16_to_8i32:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: load_zext_8i16_to_8i32:
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_zext_8i16_to_8i32:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: retq
+entry:
+ %X = load <8 x i16>* %ptr
+ %Y = zext <8 x i16> %X to <8 x i32>
+ ret <8 x i32>%Y
+}
+
+define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
+; SSE2-LABEL: load_zext_4i32_to_4i64:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_4i32_to_4i64:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa (%rdi), %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
+; SSSE3-NEXT: pand %xmm2, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; SSSE3-NEXT: pand %xmm2, %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_4i32_to_4i64:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: load_zext_4i32_to_4i64:
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_zext_4i32_to_4i64:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX2-NEXT: retq
+entry:
+ %X = load <4 x i32>* %ptr
+ %Y = zext <4 x i32> %X to <4 x i64>
+ ret <4 x i64>%Y
+}
+
+define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: shuf_zext_8i16_to_8i32:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: # kill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_8i16_to_8i32:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: # kill
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_8i16_to_8i32:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuf_zext_8i16_to_8i32:
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuf_zext_8i16_to_8i32:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: # kill
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: retq
+entry:
+ %B = shufflevector <8 x i16> %A, <8 x i16> zeroinitializer, <16 x i32> <i32 0, i32 8, i32 1, i32 8, i32 2, i32 8, i32 3, i32 8, i32 4, i32 8, i32 5, i32 8, i32 6, i32 8, i32 7, i32 8>
+ %Z = bitcast <16 x i16> %B to <8 x i32>
+ ret <8 x i32> %Z
+}
+
+define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: shuf_zext_4i32_to_4i64:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: # kill
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_4i32_to_4i64:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: # kill
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_4i32_to_4i64:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuf_zext_4i32_to_4i64:
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
+; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuf_zext_4i32_to_4i64:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: # kill
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: retq
+entry:
+ %B = shufflevector <4 x i32> %A, <4 x i32> zeroinitializer, <8 x i32> <i32 0, i32 4, i32 1, i32 4, i32 2, i32 4, i32 3, i32 4>
+ %Z = bitcast <8 x i32> %B to <4 x i64>
+ ret <4 x i64> %Z
+}
+
+define <8 x i32> @shuf_zext_8i8_to_8i32(<8 x i8> %A) {
+; SSE2-LABEL: shuf_zext_8i8_to_8i32:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: pand .LCPI9_0(%rip), %xmm0
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255]
+; SSE2-NEXT: pandn %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_8i8_to_8i32:
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_8i8_to_8i32:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuf_zext_8i8_to_8i32:
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuf_zext_8i8_to_8i32:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: retq
+entry:
+ %B = shufflevector <8 x i8> %A, <8 x i8> zeroinitializer, <32 x i32> <i32 0, i32 8, i32 8, i32 8, i32 1, i32 8, i32 8, i32 8, i32 2, i32 8, i32 8, i32 8, i32 3, i32 8, i32 8, i32 8, i32 4, i32 8, i32 8, i32 8, i32 5, i32 8, i32 8, i32 8, i32 6, i32 8, i32 8, i32 8, i32 7, i32 8, i32 8, i32 8>
+ %Z = bitcast <32 x i8> %B to <8 x i32>
+ ret <8 x i32> %Z
+}
diff --git a/test/CodeGen/X86/vector-zmov.ll b/test/CodeGen/X86/vector-zmov.ll
new file mode 100644
index 0000000..4de2543
--- /dev/null
+++ b/test/CodeGen/X86/vector-zmov.ll
@@ -0,0 +1,37 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+
+define <4 x i32> @load_zmov_4i32_to_0zzz(<4 x i32> *%ptr) {
+; SSE-LABEL: load_zmov_4i32_to_0zzz:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: movd (%rdi), %xmm0
+; SSE-NEXT: retq
+
+; AVX-LABEL: load_zmov_4i32_to_0zzz:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vmovd (%rdi), %xmm0
+; AVX-NEXT: retq
+entry:
+ %X = load <4 x i32>* %ptr
+ %Y = shufflevector <4 x i32> %X, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 4, i32 4, i32 4>
+ ret <4 x i32>%Y
+}
+
+define <2 x i64> @load_zmov_2i64_to_0z(<2 x i64> *%ptr) {
+; SSE-LABEL: load_zmov_2i64_to_0z:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: movq (%rdi), %xmm0
+; SSE-NEXT: retq
+
+; AVX-LABEL: load_zmov_2i64_to_0z:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vmovq (%rdi), %xmm0
+; AVX-NEXT: retq
+entry:
+ %X = load <2 x i64>* %ptr
+ %Y = shufflevector <2 x i64> %X, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64>%Y
+}
diff --git a/test/CodeGen/X86/viabs.ll b/test/CodeGen/X86/viabs.ll
index d9f2cb0..c009235 100644
--- a/test/CodeGen/X86/viabs.ll
+++ b/test/CodeGen/X86/viabs.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -march=x86-64 -mcpu=x86-64 | FileCheck %s -check-prefix=SSE2
-; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s -check-prefix=SSSE3
-; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s -check-prefix=AVX2
-; RUN: llc < %s -march=x86-64 -mcpu=knl | FileCheck %s -check-prefix=AVX512
+; RUN: llc < %s -march=x86-64 -mattr=sse2 | FileCheck %s -check-prefix=SSE2
+; RUN: llc < %s -march=x86-64 -mattr=ssse3 | FileCheck %s -check-prefix=SSSE3
+; RUN: llc < %s -march=x86-64 -mattr=avx2 | FileCheck %s -check-prefix=AVX2
+; RUN: llc < %s -march=x86-64 -mattr=avx512f | FileCheck %s -check-prefix=AVX512
define <4 x i32> @test1(<4 x i32> %a) nounwind {
; SSE2-LABEL: test1:
diff --git a/test/CodeGen/X86/vselect-2.ll b/test/CodeGen/X86/vselect-2.ll
index 50da32c..fe4cfba 100644
--- a/test/CodeGen/X86/vselect-2.ll
+++ b/test/CodeGen/X86/vselect-2.ll
@@ -1,33 +1,60 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -mattr=sse2 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE41
define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) {
+; SSE2-LABEL: test1:
+; SSE2: # BB#0:
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test1:
+; SSE41: # BB#0:
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: retq
%select = select <4 x i1><i1 true, i1 true, i1 false, i1 false>, <4 x i32> %A, <4 x i32> %B
ret <4 x i32> %select
}
-; CHECK-LABEL: test1
-; CHECK: movsd
-; CHECK: ret
define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
+; SSE2-LABEL: test2:
+; SSE2: # BB#0:
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test2:
+; SSE41: # BB#0:
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: retq
%select = select <4 x i1><i1 false, i1 false, i1 true, i1 true>, <4 x i32> %A, <4 x i32> %B
ret <4 x i32> %select
}
-; CHECK-LABEL: test2
-; CHECK: movsd
-; CHECK-NEXT: ret
define <4 x float> @test3(<4 x float> %A, <4 x float> %B) {
+; SSE2-LABEL: test3:
+; SSE2: # BB#0:
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test3:
+; SSE41: # BB#0:
+; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; SSE41-NEXT: retq
%select = select <4 x i1><i1 true, i1 true, i1 false, i1 false>, <4 x float> %A, <4 x float> %B
ret <4 x float> %select
}
-; CHECK-LABEL: test3
-; CHECK: movsd
-; CHECK: ret
define <4 x float> @test4(<4 x float> %A, <4 x float> %B) {
+; SSE2-LABEL: test4:
+; SSE2: # BB#0:
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test4:
+; SSE41: # BB#0:
+; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE41-NEXT: retq
%select = select <4 x i1><i1 false, i1 false, i1 true, i1 true>, <4 x float> %A, <4 x float> %B
ret <4 x float> %select
}
-; CHECK-LABEL: test4
-; CHECK: movsd
-; CHECK-NEXT: ret
diff --git a/test/CodeGen/X86/vselect-avx.ll b/test/CodeGen/X86/vselect-avx.ll
index 0c0f4bb..02a9ef4 100644
--- a/test/CodeGen/X86/vselect-avx.ll
+++ b/test/CodeGen/X86/vselect-avx.ll
@@ -59,19 +59,15 @@ bb:
;
; <rdar://problem/18819506>
-; Note: For now, hard code ORIG_MASK and SHRUNK_MASK registers, because we
-; cannot express that ORIG_MASK must not be equal to ORIG_MASK. Otherwise,
-; even a faulty pattern would pass!
-;
; CHECK-LABEL: test3:
-; Compute the original mask.
-; CHECK: vpcmpeqd {{%xmm[0-9]+}}, {{%xmm[0-9]+}}, [[ORIG_MASK:%xmm0]]
-; Shrink the bit of the mask.
-; CHECK-NEXT: vpslld $31, [[ORIG_MASK]], [[SHRUNK_MASK:%xmm3]]
-; Use the shrunk mask in the blend.
-; CHECK-NEXT: vblendvps [[SHRUNK_MASK]], %xmm{{[0-9]+}}, %xmm{{[0-9]+}}, %xmm{{[0-9]+}}
-; Use the original mask in the and.
-; CHECK-NEXT: vpand LCPI2_2(%rip), [[ORIG_MASK]], {{%xmm[0-9]+}}
+; Compute the mask.
+; CHECK: vpcmpeqd {{%xmm[0-9]+}}, {{%xmm[0-9]+}}, [[MASK:%xmm[0-9]+]]
+; Do not shrink the bit of the mask.
+; CHECK-NOT: vpslld $31, [[MASK]], {{%xmm[0-9]+}}
+; Use the mask in the blend.
+; CHECK-NEXT: vblendvps [[MASK]], %xmm{{[0-9]+}}, %xmm{{[0-9]+}}, %xmm{{[0-9]+}}
+; Use the mask in the and.
+; CHECK-NEXT: vpand LCPI2_2(%rip), [[MASK]], {{%xmm[0-9]+}}
; CHECK: retq
define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17, <4 x i16> %tmp3, <4 x i16> %tmp12) {
%tmp6 = srem <4 x i32> %induction30, <i32 3, i32 3, i32 3, i32 3>
@@ -83,3 +79,14 @@ define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17,
store <4 x i16> %predphi, <4 x i16>* %tmp17, align 8
ret void
}
+
+; We shouldn't try to lower this directly using VSELECT because we don't have
+; vpblendvb in AVX1, only in AVX2. Instead, it should be expanded.
+;
+; CHECK-LABEL: PR22706:
+; CHECK: vpcmpgtb
+; CHECK: vpcmpgtb
+define <32 x i8> @PR22706(<32 x i1> %x) {
+ %tmp = select <32 x i1> %x, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <32 x i8> <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
+ ret <32 x i8> %tmp
+}
diff --git a/test/CodeGen/X86/vselect-minmax.ll b/test/CodeGen/X86/vselect-minmax.ll
index 25189f2..3efe568 100644
--- a/test/CodeGen/X86/vselect-minmax.ll
+++ b/test/CodeGen/X86/vselect-minmax.ll
@@ -2,6 +2,8 @@
; RUN: llc -march=x86-64 -mcpu=corei7 < %s | FileCheck %s -check-prefix=SSE4
; RUN: llc -march=x86-64 -mcpu=corei7-avx < %s | FileCheck %s -check-prefix=AVX1
; RUN: llc -march=x86-64 -mcpu=core-avx2 -mattr=+avx2 < %s | FileCheck %s -check-prefix=AVX2
+; RUN: llc -march=x86-64 -mcpu=knl < %s | FileCheck %s -check-prefix=AVX2 -check-prefix=AVX512F
+; RUN: llc -march=x86-64 -mcpu=skx < %s | FileCheck %s -check-prefix=AVX512BW -check-prefix=AVX512VL -check-prefix=AVX512F
define void @test1(i8* nocapture %a, i8* nocapture %b) nounwind {
vector.ph:
@@ -33,6 +35,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test1:
; AVX2: vpminsb
+
+; AVX512VL-LABEL: test1:
+; AVX512VL: vpminsb
}
define void @test2(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -65,6 +70,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test2:
; AVX2: vpminsb
+
+; AVX512VL-LABEL: test2:
+; AVX512VL: vpminsb
}
define void @test3(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -97,6 +105,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test3:
; AVX2: vpmaxsb
+
+; AVX512VL-LABEL: test3:
+; AVX512VL: vpmaxsb
}
define void @test4(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -129,6 +140,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test4:
; AVX2: vpmaxsb
+
+; AVX512VL-LABEL: test4:
+; AVX512VL: vpmaxsb
}
define void @test5(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -161,6 +175,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test5:
; AVX2: vpminub
+
+; AVX512VL-LABEL: test5:
+; AVX512VL: vpminub
}
define void @test6(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -193,6 +210,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test6:
; AVX2: vpminub
+
+; AVX512VL-LABEL: test6:
+; AVX512VL: vpminub
}
define void @test7(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -225,6 +245,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test7:
; AVX2: vpmaxub
+
+; AVX512VL-LABEL: test7:
+; AVX512VL: vpmaxub
}
define void @test8(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -257,6 +280,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test8:
; AVX2: vpmaxub
+
+; AVX512VL-LABEL: test8:
+; AVX512VL: vpmaxub
}
define void @test9(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -289,6 +315,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test9:
; AVX2: vpminsw
+
+; AVX512VL-LABEL: test9:
+; AVX512VL: vpminsw
}
define void @test10(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -321,6 +350,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test10:
; AVX2: vpminsw
+
+; AVX512VL-LABEL: test10:
+; AVX512VL: vpminsw
}
define void @test11(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -353,6 +385,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test11:
; AVX2: vpmaxsw
+
+; AVX512VL-LABEL: test11:
+; AVX512VL: vpmaxsw
}
define void @test12(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -385,6 +420,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test12:
; AVX2: vpmaxsw
+
+; AVX512VL-LABEL: test12:
+; AVX512VL: vpmaxsw
}
define void @test13(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -417,6 +455,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test13:
; AVX2: vpminuw
+
+; AVX512VL-LABEL: test13:
+; AVX512VL: vpminuw
}
define void @test14(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -449,6 +490,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test14:
; AVX2: vpminuw
+
+; AVX512VL-LABEL: test14:
+; AVX512VL: vpminuw
}
define void @test15(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -481,6 +525,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test15:
; AVX2: vpmaxuw
+
+; AVX512VL-LABEL: test15:
+; AVX512VL: vpmaxuw
}
define void @test16(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -513,6 +560,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test16:
; AVX2: vpmaxuw
+
+; AVX512VL-LABEL: test16:
+; AVX512VL: vpmaxuw
}
define void @test17(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -545,6 +595,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test17:
; AVX2: vpminsd
+
+; AVX512VL-LABEL: test17:
+; AVX512VL: vpminsd
}
define void @test18(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -577,6 +630,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test18:
; AVX2: vpminsd
+
+; AVX512VL-LABEL: test18:
+; AVX512VL: vpminsd
}
define void @test19(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -609,6 +665,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test19:
; AVX2: vpmaxsd
+
+; AVX512VL-LABEL: test19:
+; AVX512VL: vpmaxsd
}
define void @test20(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -641,6 +700,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test20:
; AVX2: vpmaxsd
+
+; AVX512VL-LABEL: test20:
+; AVX512VL: vpmaxsd
}
define void @test21(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -673,6 +735,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test21:
; AVX2: vpminud
+
+; AVX512VL-LABEL: test21:
+; AVX512VL: vpminud
}
define void @test22(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -705,6 +770,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test22:
; AVX2: vpminud
+
+; AVX512VL-LABEL: test22:
+; AVX512VL: vpminud
}
define void @test23(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -737,6 +805,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test23:
; AVX2: vpmaxud
+
+; AVX512VL-LABEL: test23:
+; AVX512VL: vpmaxud
}
define void @test24(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -769,6 +840,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test24:
; AVX2: vpmaxud
+
+; AVX512VL-LABEL: test24:
+; AVX512VL: vpmaxud
}
define void @test25(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -795,6 +869,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test25:
; AVX2: vpminsb
+
+; AVX512VL-LABEL: test25:
+; AVX512VL: vpminsb
}
define void @test26(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -821,6 +898,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test26:
; AVX2: vpminsb
+
+; AVX512VL-LABEL: test26:
+; AVX512VL: vpminsb
}
define void @test27(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -847,6 +927,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test27:
; AVX2: vpmaxsb
+
+; AVX512VL-LABEL: test27:
+; AVX512VL: vpmaxsb
}
define void @test28(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -873,6 +956,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test28:
; AVX2: vpmaxsb
+
+; AVX512VL-LABEL: test28:
+; AVX512VL: vpmaxsb
}
define void @test29(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -899,6 +985,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test29:
; AVX2: vpminub
+
+; AVX512VL-LABEL: test29:
+; AVX512VL: vpminub
}
define void @test30(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -925,6 +1014,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test30:
; AVX2: vpminub
+
+; AVX512VL-LABEL: test30:
+; AVX512VL: vpminub
}
define void @test31(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -951,6 +1043,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test31:
; AVX2: vpmaxub
+
+; AVX512VL-LABEL: test31:
+; AVX512VL: vpmaxub
}
define void @test32(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -977,6 +1072,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test32:
; AVX2: vpmaxub
+
+; AVX512VL-LABEL: test32:
+; AVX512VL: vpmaxub
}
define void @test33(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1003,6 +1101,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test33:
; AVX2: vpminsw
+
+; AVX512VL-LABEL: test33:
+; AVX512VL: vpminsw
}
define void @test34(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1029,6 +1130,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test34:
; AVX2: vpminsw
+
+; AVX512VL-LABEL: test34:
+; AVX512VL: vpminsw
}
define void @test35(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1055,6 +1159,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test35:
; AVX2: vpmaxsw
+
+; AVX512VL-LABEL: test35:
+; AVX512VL: vpmaxsw
}
define void @test36(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1081,6 +1188,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test36:
; AVX2: vpmaxsw
+
+; AVX512VL-LABEL: test36:
+; AVX512VL: vpmaxsw
}
define void @test37(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1107,6 +1217,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test37:
; AVX2: vpminuw
+
+; AVX512VL-LABEL: test37:
+; AVX512VL: vpminuw
}
define void @test38(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1133,6 +1246,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test38:
; AVX2: vpminuw
+
+; AVX512VL-LABEL: test38:
+; AVX512VL: vpminuw
}
define void @test39(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1159,6 +1275,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test39:
; AVX2: vpmaxuw
+
+; AVX512VL-LABEL: test39:
+; AVX512VL: vpmaxuw
}
define void @test40(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1185,6 +1304,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test40:
; AVX2: vpmaxuw
+
+; AVX512VL-LABEL: test40:
+; AVX512VL: vpmaxuw
}
define void @test41(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -1211,6 +1333,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test41:
; AVX2: vpminsd
+
+; AVX512VL-LABEL: test41:
+; AVX512VL: vpminsd
}
define void @test42(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -1237,6 +1362,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test42:
; AVX2: vpminsd
+
+; AVX512VL-LABEL: test42:
+; AVX512VL: vpminsd
}
define void @test43(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -1263,6 +1391,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test43:
; AVX2: vpmaxsd
+
+; AVX512VL-LABEL: test43:
+; AVX512VL: vpmaxsd
}
define void @test44(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -1289,6 +1420,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test44:
; AVX2: vpmaxsd
+
+; AVX512VL-LABEL: test44:
+; AVX512VL: vpmaxsd
}
define void @test45(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -1315,6 +1449,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test45:
; AVX2: vpminud
+
+; AVX512VL-LABEL: test45:
+; AVX512VL: vpminud
}
define void @test46(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -1341,6 +1478,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test46:
; AVX2: vpminud
+
+; AVX512VL-LABEL: test46:
+; AVX512VL: vpminud
}
define void @test47(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -1367,6 +1507,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test47:
; AVX2: vpmaxud
+
+; AVX512VL-LABEL: test47:
+; AVX512VL: vpmaxud
}
define void @test48(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -1393,6 +1536,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test48:
; AVX2: vpmaxud
+
+; AVX512VL-LABEL: test48:
+; AVX512VL: vpmaxud
}
define void @test49(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -1425,6 +1571,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test49:
; AVX2: vpmaxsb
+
+; AVX512VL-LABEL: test49:
+; AVX512VL: vpmaxsb
}
define void @test50(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -1457,6 +1606,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test50:
; AVX2: vpmaxsb
+
+; AVX512VL-LABEL: test50:
+; AVX512VL: vpmaxsb
}
define void @test51(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -1489,6 +1641,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test51:
; AVX2: vpminsb
+
+; AVX512VL-LABEL: test51:
+; AVX512VL: vpminsb
}
define void @test52(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -1521,6 +1676,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test52:
; AVX2: vpminsb
+
+; AVX512VL-LABEL: test52:
+; AVX512VL: vpminsb
}
define void @test53(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -1553,6 +1711,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test53:
; AVX2: vpmaxub
+
+; AVX512VL-LABEL: test53:
+; AVX512VL: vpmaxub
}
define void @test54(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -1585,6 +1746,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test54:
; AVX2: vpmaxub
+
+; AVX512VL-LABEL: test54:
+; AVX512VL: vpmaxub
}
define void @test55(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -1617,6 +1781,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test55:
; AVX2: vpminub
+
+; AVX512VL-LABEL: test55:
+; AVX512VL: vpminub
}
define void @test56(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -1649,6 +1816,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test56:
; AVX2: vpminub
+
+; AVX512VL-LABEL: test56:
+; AVX512VL: vpminub
}
define void @test57(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1681,6 +1851,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test57:
; AVX2: vpmaxsw
+
+; AVX512VL-LABEL: test57:
+; AVX512VL: vpmaxsw
}
define void @test58(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1713,6 +1886,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test58:
; AVX2: vpmaxsw
+
+; AVX512VL-LABEL: test58:
+; AVX512VL: vpmaxsw
}
define void @test59(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1745,6 +1921,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test59:
; AVX2: vpminsw
+
+; AVX512VL-LABEL: test59:
+; AVX512VL: vpminsw
}
define void @test60(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1777,6 +1956,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test60:
; AVX2: vpminsw
+
+; AVX512VL-LABEL: test60:
+; AVX512VL: vpminsw
}
define void @test61(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1809,6 +1991,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test61:
; AVX2: vpmaxuw
+
+; AVX512VL-LABEL: test61:
+; AVX512VL: vpmaxuw
}
define void @test62(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1841,6 +2026,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test62:
; AVX2: vpmaxuw
+
+; AVX512VL-LABEL: test62:
+; AVX512VL: vpmaxuw
}
define void @test63(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1873,6 +2061,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test63:
; AVX2: vpminuw
+
+; AVX512VL-LABEL: test63:
+; AVX512VL: vpminuw
}
define void @test64(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -1905,6 +2096,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test64:
; AVX2: vpminuw
+
+; AVX512VL-LABEL: test64:
+; AVX512VL: vpminuw
}
define void @test65(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -1937,6 +2131,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test65:
; AVX2: vpmaxsd
+
+; AVX512VL-LABEL: test65:
+; AVX512VL: vpmaxsd
}
define void @test66(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -1969,6 +2166,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test66:
; AVX2: vpmaxsd
+
+; AVX512VL-LABEL: test66:
+; AVX512VL: vpmaxsd
}
define void @test67(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2001,6 +2201,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test67:
; AVX2: vpminsd
+
+; AVX512VL-LABEL: test67:
+; AVX512VL: vpminsd
}
define void @test68(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2033,6 +2236,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test68:
; AVX2: vpminsd
+
+; AVX512VL-LABEL: test68:
+; AVX512VL: vpminsd
}
define void @test69(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2065,6 +2271,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test69:
; AVX2: vpmaxud
+
+; AVX512VL-LABEL: test69:
+; AVX512VL: vpmaxud
}
define void @test70(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2097,6 +2306,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test70:
; AVX2: vpmaxud
+
+; AVX512VL-LABEL: test70:
+; AVX512VL: vpmaxud
}
define void @test71(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2129,6 +2341,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test71:
; AVX2: vpminud
+
+; AVX512VL-LABEL: test71:
+; AVX512VL: vpminud
}
define void @test72(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2161,6 +2376,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test72:
; AVX2: vpminud
+
+; AVX512VL-LABEL: test72:
+; AVX512VL: vpminud
}
define void @test73(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -2187,6 +2405,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test73:
; AVX2: vpmaxsb
+
+; AVX512VL-LABEL: test73:
+; AVX512VL: vpmaxsb
}
define void @test74(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -2213,6 +2434,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test74:
; AVX2: vpmaxsb
+
+; AVX512VL-LABEL: test74:
+; AVX512VL: vpmaxsb
}
define void @test75(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -2239,6 +2463,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test75:
; AVX2: vpminsb
+
+; AVX512VL-LABEL: test75:
+; AVX512VL: vpminsb
}
define void @test76(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -2265,6 +2492,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test76:
; AVX2: vpminsb
+
+; AVX512VL-LABEL: test76:
+; AVX512VL: vpminsb
}
define void @test77(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -2291,6 +2521,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test77:
; AVX2: vpmaxub
+
+; AVX512VL-LABEL: test77:
+; AVX512VL: vpmaxub
}
define void @test78(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -2317,6 +2550,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test78:
; AVX2: vpmaxub
+
+; AVX512VL-LABEL: test78:
+; AVX512VL: vpmaxub
}
define void @test79(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -2343,6 +2579,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test79:
; AVX2: vpminub
+
+; AVX512VL-LABEL: test79:
+; AVX512VL: vpminub
}
define void @test80(i8* nocapture %a, i8* nocapture %b) nounwind {
@@ -2369,6 +2608,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test80:
; AVX2: vpminub
+
+; AVX512VL-LABEL: test80:
+; AVX512VL: vpminub
}
define void @test81(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -2395,6 +2637,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test81:
; AVX2: vpmaxsw
+
+; AVX512VL-LABEL: test81:
+; AVX512VL: vpmaxsw
}
define void @test82(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -2421,6 +2666,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test82:
; AVX2: vpmaxsw
+
+; AVX512VL-LABEL: test82:
+; AVX512VL: vpmaxsw
}
define void @test83(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -2447,6 +2695,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test83:
; AVX2: vpminsw
+
+; AVX512VL-LABEL: test83:
+; AVX512VL: vpminsw
}
define void @test84(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -2473,6 +2724,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test84:
; AVX2: vpminsw
+
+; AVX512VL-LABEL: test84:
+; AVX512VL: vpminsw
}
define void @test85(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -2499,6 +2753,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test85:
; AVX2: vpmaxuw
+
+; AVX512VL-LABEL: test85:
+; AVX512VL: vpmaxuw
}
define void @test86(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -2525,6 +2782,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test86:
; AVX2: vpmaxuw
+
+; AVX512VL-LABEL: test86:
+; AVX512VL: vpmaxuw
}
define void @test87(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -2551,6 +2811,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test87:
; AVX2: vpminuw
+
+; AVX512VL-LABEL: test87:
+; AVX512VL: vpminuw
}
define void @test88(i16* nocapture %a, i16* nocapture %b) nounwind {
@@ -2577,6 +2840,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test88:
; AVX2: vpminuw
+
+; AVX512VL-LABEL: test88:
+; AVX512VL: vpminuw
}
define void @test89(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2603,6 +2869,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test89:
; AVX2: vpmaxsd
+
+; AVX512VL-LABEL: test89:
+; AVX512VL: vpmaxsd
}
define void @test90(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2629,6 +2898,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test90:
; AVX2: vpmaxsd
+
+; AVX512VL-LABEL: test90:
+; AVX512VL: vpmaxsd
}
define void @test91(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2655,6 +2927,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test91:
; AVX2: vpminsd
+
+; AVX512VL-LABEL: test91:
+; AVX512VL: vpminsd
}
define void @test92(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2681,6 +2956,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test92:
; AVX2: vpminsd
+
+; AVX512VL-LABEL: test92:
+; AVX512VL: vpminsd
}
define void @test93(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2707,6 +2985,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test93:
; AVX2: vpmaxud
+
+; AVX512VL-LABEL: test93:
+; AVX512VL: vpmaxud
}
define void @test94(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2733,6 +3014,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test94:
; AVX2: vpmaxud
+
+; AVX512VL-LABEL: test94:
+; AVX512VL: vpmaxud
}
define void @test95(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2759,6 +3043,9 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test95:
; AVX2: vpminud
+
+; AVX512VL-LABEL: test95:
+; AVX512VL: vpminud
}
define void @test96(i32* nocapture %a, i32* nocapture %b) nounwind {
@@ -2785,4 +3072,2507 @@ for.end: ; preds = %vector.body
; AVX2-LABEL: test96:
; AVX2: vpminud
+
+; AVX512VL-LABEL: test96:
+; AVX512VL: vpminud
+}
+
+; ----------------------------
+
+define void @test97(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp slt <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test97:
+; AVX512BW: vpminsb {{.*}}
+}
+
+define void @test98(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp sle <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test98:
+; AVX512BW: vpminsb {{.*}}
+}
+
+define void @test99(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp sgt <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test99:
+; AVX512BW: vpmaxsb {{.*}}
+}
+
+define void @test100(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp sge <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test100:
+; AVX512BW: vpmaxsb {{.*}}
+}
+
+define void @test101(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp ult <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test101:
+; AVX512BW: vpminub {{.*}}
+}
+
+define void @test102(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp ule <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test102:
+; AVX512BW: vpminub {{.*}}
+}
+
+define void @test103(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp ugt <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test103:
+; AVX512BW: vpmaxub {{.*}}
+}
+
+define void @test104(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp uge <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test104:
+; AVX512BW: vpmaxub {{.*}}
+}
+
+define void @test105(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp slt <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test105:
+; AVX512BW: vpminsw {{.*}}
+}
+
+define void @test106(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp sle <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test106:
+; AVX512BW: vpminsw {{.*}}
+}
+
+define void @test107(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp sgt <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test107:
+; AVX512BW: vpmaxsw {{.*}}
+}
+
+define void @test108(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp sge <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test108:
+; AVX512BW: vpmaxsw {{.*}}
+}
+
+define void @test109(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp ult <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test109:
+; AVX512BW: vpminuw {{.*}}
+}
+
+define void @test110(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp ule <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test110:
+; AVX512BW: vpminuw {{.*}}
+}
+
+define void @test111(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp ugt <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test111:
+; AVX512BW: vpmaxuw {{.*}}
+}
+
+define void @test112(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp uge <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test112:
+; AVX512BW: vpmaxuw {{.*}}
+}
+
+define void @test113(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp slt <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test113:
+; AVX512F: vpminsd {{.*}}
+}
+
+define void @test114(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp sle <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test114:
+; AVX512F: vpminsd {{.*}}
+}
+
+define void @test115(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp sgt <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test115:
+; AVX512F: vpmaxsd {{.*}}
+}
+
+define void @test116(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp sge <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test116:
+; AVX512F: vpmaxsd {{.*}}
+}
+
+define void @test117(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp ult <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test117:
+; AVX512F: vpminud {{.*}}
+}
+
+define void @test118(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp ule <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test118:
+; AVX512F: vpminud {{.*}}
+}
+
+define void @test119(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp ugt <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test119:
+; AVX512F: vpmaxud {{.*}}
+}
+
+define void @test120(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp uge <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test120:
+; AVX512F: vpmaxud {{.*}}
+}
+
+define void @test121(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp slt <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test121:
+; AVX512F: vpminsq {{.*}}
+}
+
+define void @test122(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp sle <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test122:
+; AVX512F: vpminsq {{.*}}
+}
+
+define void @test123(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp sgt <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test123:
+; AVX512F: vpmaxsq {{.*}}
+}
+
+define void @test124(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp sge <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test124:
+; AVX512F: vpmaxsq {{.*}}
+}
+
+define void @test125(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp ult <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test125:
+; AVX512F: vpminuq {{.*}}
+}
+
+define void @test126(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp ule <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test126:
+; AVX512F: vpminuq {{.*}}
+}
+
+define void @test127(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp ugt <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test127:
+; AVX512F: vpmaxuq {{.*}}
+}
+
+define void @test128(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp uge <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test128:
+; AVX512F: vpmaxuq {{.*}}
+}
+
+define void @test129(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp slt <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test129:
+; AVX512BW: vpmaxsb
+}
+
+define void @test130(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp sle <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test130:
+; AVX512BW: vpmaxsb
+}
+
+define void @test131(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp sgt <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test131:
+; AVX512BW: vpminsb
+}
+
+define void @test132(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp sge <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test132:
+; AVX512BW: vpminsb
+}
+
+define void @test133(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp ult <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test133:
+; AVX512BW: vpmaxub
+}
+
+define void @test134(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp ule <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test134:
+; AVX512BW: vpmaxub
+}
+
+define void @test135(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp ugt <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test135:
+; AVX512BW: vpminub
+}
+
+define void @test136(i8* nocapture %a, i8* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %ptr.a = bitcast i8* %gep.a to <64 x i8>*
+ %ptr.b = bitcast i8* %gep.b to <64 x i8>*
+ %load.a = load <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>* %ptr.b, align 2
+ %cmp = icmp uge <64 x i8> %load.a, %load.b
+ %sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
+ store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
+ %index.next = add i64 %index, 32
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test136:
+; AVX512BW: vpminub
+}
+
+define void @test137(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp slt <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test137:
+; AVX512BW: vpmaxsw
+}
+
+define void @test138(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp sle <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test138:
+; AVX512BW: vpmaxsw
+}
+
+define void @test139(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp sgt <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test139:
+; AVX512BW: vpminsw
+}
+
+define void @test140(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp sge <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test140:
+; AVX512BW: vpminsw
+}
+
+define void @test141(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp ult <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test141:
+; AVX512BW: vpmaxuw
+}
+
+define void @test142(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp ule <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test142:
+; AVX512BW: vpmaxuw
+}
+
+define void @test143(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp ugt <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test143:
+; AVX512BW: vpminuw
+}
+
+define void @test144(i16* nocapture %a, i16* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %ptr.a = bitcast i16* %gep.a to <32 x i16>*
+ %ptr.b = bitcast i16* %gep.b to <32 x i16>*
+ %load.a = load <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>* %ptr.b, align 2
+ %cmp = icmp uge <32 x i16> %load.a, %load.b
+ %sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
+ store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
+ %index.next = add i64 %index, 16
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512BW-LABEL: test144:
+; AVX512BW: vpminuw
+}
+
+define void @test145(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp slt <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test145:
+; AVX512F: vpmaxsd
+}
+
+define void @test146(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp sle <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test146:
+; AVX512F: vpmaxsd
+}
+
+define void @test147(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp sgt <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test147:
+; AVX512F: vpminsd
+}
+
+define void @test148(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp sge <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test148:
+; AVX512F: vpminsd
+}
+
+define void @test149(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp ult <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test149:
+; AVX512F: vpmaxud
+}
+
+define void @test150(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp ule <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test150:
+; AVX512F: vpmaxud
+}
+
+define void @test151(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp ugt <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test151:
+; AVX512F: vpminud
+}
+
+define void @test152(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <16 x i32>*
+ %ptr.b = bitcast i32* %gep.b to <16 x i32>*
+ %load.a = load <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>* %ptr.b, align 2
+ %cmp = icmp uge <16 x i32> %load.a, %load.b
+ %sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
+ store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test152:
+; AVX512F: vpminud
+}
+
+; -----------------------
+
+define void @test153(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp slt <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test153:
+; AVX512F: vpmaxsq
+}
+
+define void @test154(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp sle <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test154:
+; AVX512F: vpmaxsq
+}
+
+define void @test155(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp sgt <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test155:
+; AVX512F: vpminsq
+}
+
+define void @test156(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp sge <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test156:
+; AVX512F: vpminsq
+}
+
+define void @test157(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp ult <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test157:
+; AVX512F: vpmaxuq
+}
+
+define void @test158(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp ule <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test158:
+; AVX512F: vpmaxuq
+}
+
+define void @test159(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp ugt <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test159:
+; AVX512F: vpminuq
+}
+
+define void @test160(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <8 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <8 x i64>*
+ %load.a = load <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>* %ptr.b, align 2
+ %cmp = icmp uge <8 x i64> %load.a, %load.b
+ %sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
+ store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512F-LABEL: test160:
+; AVX512F: vpminuq
+}
+
+define void @test161(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp slt <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test161:
+; AVX512VL: vpminsq
+}
+
+define void @test162(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp sle <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test162:
+; AVX512VL: vpminsq
+}
+
+define void @test163(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp sgt <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test163:
+; AVX512VL: vpmaxsq
+}
+
+define void @test164(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp sge <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test164:
+; AVX512VL: vpmaxsq
+}
+
+define void @test165(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp ult <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test165:
+; AVX512VL: vpminuq
+}
+
+define void @test166(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp ule <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test166:
+; AVX512VL: vpminuq
+}
+
+define void @test167(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp ugt <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test167:
+; AVX512VL: vpmaxuq
+}
+
+define void @test168(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp uge <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test168:
+; AVX512VL: vpmaxuq
+}
+
+define void @test169(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp slt <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test169:
+; AVX512VL: vpmaxsq
+}
+
+define void @test170(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp sle <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test170:
+; AVX512VL: vpmaxsq
+}
+
+define void @test171(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp sgt <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test171:
+; AVX512VL: vpminsq
+}
+
+define void @test172(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp sge <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test172:
+; AVX512VL: vpminsq
+}
+
+define void @test173(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp ult <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test173:
+; AVX512VL: vpmaxuq
+}
+
+define void @test174(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp ule <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test174:
+; AVX512VL: vpmaxuq
+}
+
+define void @test175(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp ugt <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test175:
+; AVX512VL: vpminuq
+}
+
+define void @test176(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <4 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <4 x i64>*
+ %load.a = load <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>* %ptr.b, align 2
+ %cmp = icmp uge <4 x i64> %load.a, %load.b
+ %sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
+ store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test176:
+; AVX512VL: vpminuq
+}
+
+define void @test177(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp slt <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test177:
+; AVX512VL: vpminsq
+}
+
+define void @test178(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp sle <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test178:
+; AVX512VL: vpminsq
+}
+
+define void @test179(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp sgt <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test179:
+; AVX512VL: vpmaxsq
+}
+
+define void @test180(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp sge <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test180:
+; AVX512VL: vpmaxsq
+}
+
+define void @test181(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp ult <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test181:
+; AVX512VL: vpminuq
+}
+
+define void @test182(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp ule <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test182:
+; AVX512VL: vpminuq
+}
+
+define void @test183(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp ugt <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test183:
+; AVX512VL: vpmaxuq
+}
+
+define void @test184(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp uge <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test184:
+; AVX512VL: vpmaxuq
+}
+
+define void @test185(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp slt <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test185:
+; AVX512VL: vpmaxsq
+}
+
+define void @test186(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp sle <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test186:
+; AVX512VL: vpmaxsq
+}
+
+define void @test187(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp sgt <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test187:
+; AVX512VL: vpminsq
+}
+
+define void @test188(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp sge <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test188:
+; AVX512VL: vpminsq
+}
+
+define void @test189(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp ult <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test189:
+; AVX512VL: vpmaxuq
+}
+
+define void @test190(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp ule <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test190:
+; AVX512VL: vpmaxuq
+}
+
+define void @test191(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp ugt <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test191:
+; AVX512VL: vpminuq
+}
+
+define void @test192(i32* nocapture %a, i32* nocapture %b) nounwind {
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %gep.a = getelementptr inbounds i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %ptr.a = bitcast i32* %gep.a to <2 x i64>*
+ %ptr.b = bitcast i32* %gep.b to <2 x i64>*
+ %load.a = load <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>* %ptr.b, align 2
+ %cmp = icmp uge <2 x i64> %load.a, %load.b
+ %sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
+ store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
+ %index.next = add i64 %index, 8
+ %loop = icmp eq i64 %index.next, 16384
+ br i1 %loop, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+
+; AVX512VL-LABEL: test192:
+; AVX512VL: vpminuq
}
diff --git a/test/CodeGen/X86/vselect.ll b/test/CodeGen/X86/vselect.ll
index 3bd1dc4..71620af 100644
--- a/test/CodeGen/X86/vselect.ll
+++ b/test/CodeGen/X86/vselect.ll
@@ -6,9 +6,8 @@
define <4 x float> @test1(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: test1:
; CHECK: # BB#0:
-; CHECK-NEXT: andps {{.*}}(%rip), %xmm1
-; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
-; CHECK-NEXT: orps %xmm1, %xmm0
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
@@ -17,8 +16,8 @@ define <4 x float> @test1(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test2(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: test2:
; CHECK: # BB#0:
-; CHECK-NEXT: movsd %xmm0, %xmm1
-; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; CHECK-NEXT: movapd %xmm1, %xmm0
; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
@@ -27,7 +26,7 @@ define <4 x float> @test2(<4 x float> %a, <4 x float> %b) {
define <4 x float> @test3(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: test3:
; CHECK: # BB#0:
-; CHECK-NEXT: movsd %xmm1, %xmm0
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
@@ -53,10 +52,6 @@ define <4 x float> @test5(<4 x float> %a, <4 x float> %b) {
define <8 x i16> @test6(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test6:
; CHECK: # BB#0:
-; CHECK-NEXT: movaps {{.*#+}} xmm1 = [0,65535,0,65535,0,65535,0,65535]
-; CHECK-NEXT: andps %xmm0, %xmm1
-; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
-; CHECK-NEXT: orps %xmm1, %xmm0
; CHECK-NEXT: retq
%1 = select <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>, <8 x i16> %a, <8 x i16> %a
ret <8 x i16> %1
@@ -65,9 +60,8 @@ define <8 x i16> @test6(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test7(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test7:
; CHECK: # BB#0:
-; CHECK-NEXT: andps {{.*}}(%rip), %xmm1
-; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
-; CHECK-NEXT: orps %xmm1, %xmm0
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; CHECK-NEXT: movapd %xmm1, %xmm0
; CHECK-NEXT: retq
%1 = select <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %1
@@ -76,9 +70,7 @@ define <8 x i16> @test7(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test8(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test8:
; CHECK: # BB#0:
-; CHECK-NEXT: andps {{.*}}(%rip), %xmm1
-; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
-; CHECK-NEXT: orps %xmm1, %xmm0
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; CHECK-NEXT: retq
%1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %1
@@ -104,7 +96,7 @@ define <8 x i16> @test10(<8 x i16> %a, <8 x i16> %b) {
define <8 x i16> @test11(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: test11:
; CHECK: # BB#0:
-; CHECK-NEXT: movaps {{.*#+}} xmm2 = <0,65535,65535,0,u,65535,65535,u>
+; CHECK-NEXT: movaps {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,65535,65535]
; CHECK-NEXT: andps %xmm2, %xmm0
; CHECK-NEXT: andnps %xmm1, %xmm2
; CHECK-NEXT: orps %xmm2, %xmm0
@@ -170,7 +162,7 @@ define <8 x i16> @test17(<8 x i16> %a, <8 x i16> %b) {
define <4 x float> @test18(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: test18:
; CHECK: # BB#0:
-; CHECK-NEXT: movss %xmm1, %xmm0
+; CHECK-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
@@ -179,7 +171,7 @@ define <4 x float> @test18(<4 x float> %a, <4 x float> %b) {
define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test19:
; CHECK: # BB#0:
-; CHECK-NEXT: movss %xmm1, %xmm0
+; CHECK-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> %a, <4 x i32> %b
ret <4 x i32> %1
@@ -188,7 +180,7 @@ define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
define <2 x double> @test20(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: test20:
; CHECK: # BB#0:
-; CHECK-NEXT: movsd %xmm1, %xmm0
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; CHECK-NEXT: retq
%1 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %b
ret <2 x double> %1
@@ -197,7 +189,7 @@ define <2 x double> @test20(<2 x double> %a, <2 x double> %b) {
define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test21:
; CHECK: # BB#0:
-; CHECK-NEXT: movsd %xmm1, %xmm0
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; CHECK-NEXT: retq
%1 = select <2 x i1> <i1 false, i1 true>, <2 x i64> %a, <2 x i64> %b
ret <2 x i64> %1
@@ -206,7 +198,7 @@ define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) {
define <4 x float> @test22(<4 x float> %a, <4 x float> %b) {
; CHECK-LABEL: test22:
; CHECK: # BB#0:
-; CHECK-NEXT: movss %xmm0, %xmm1
+; CHECK-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
@@ -216,7 +208,7 @@ define <4 x float> @test22(<4 x float> %a, <4 x float> %b) {
define <4 x i32> @test23(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test23:
; CHECK: # BB#0:
-; CHECK-NEXT: movss %xmm0, %xmm1
+; CHECK-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %a, <4 x i32> %b
@@ -226,8 +218,8 @@ define <4 x i32> @test23(<4 x i32> %a, <4 x i32> %b) {
define <2 x double> @test24(<2 x double> %a, <2 x double> %b) {
; CHECK-LABEL: test24:
; CHECK: # BB#0:
-; CHECK-NEXT: movsd %xmm0, %xmm1
-; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; CHECK-NEXT: movapd %xmm1, %xmm0
; CHECK-NEXT: retq
%1 = select <2 x i1> <i1 true, i1 false>, <2 x double> %a, <2 x double> %b
ret <2 x double> %1
@@ -236,8 +228,8 @@ define <2 x double> @test24(<2 x double> %a, <2 x double> %b) {
define <2 x i64> @test25(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test25:
; CHECK: # BB#0:
-; CHECK-NEXT: movsd %xmm0, %xmm1
-; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; CHECK-NEXT: movapd %xmm1, %xmm0
; CHECK-NEXT: retq
%1 = select <2 x i1> <i1 true, i1 false>, <2 x i64> %a, <2 x i64> %b
ret <2 x i64> %1
@@ -276,6 +268,7 @@ define <16 x double> @select_illegal(<16 x double> %a, <16 x double> %b) {
; CHECK-NEXT: movaps %xmm2, 32(%rdi)
; CHECK-NEXT: movaps %xmm1, 16(%rdi)
; CHECK-NEXT: movaps %xmm0, (%rdi)
+; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
%sel = select <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <16 x double> %a, <16 x double> %b
ret <16 x double> %sel
diff --git a/test/CodeGen/X86/vshift-4.ll b/test/CodeGen/X86/vshift-4.ll
index a060cf8..cda9bc8 100644
--- a/test/CodeGen/X86/vshift-4.ll
+++ b/test/CodeGen/X86/vshift-4.ll
@@ -57,7 +57,7 @@ entry:
define void @shift3a(<8 x i16> %val, <8 x i16>* %dst, <8 x i16> %amt) nounwind {
entry:
; CHECK-LABEL: shift3a:
-; CHECK: movzwl
+; CHECK: pextrw $6
; CHECK: psllw
%shamt = shufflevector <8 x i16> %amt, <8 x i16> undef, <8 x i32> <i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6>
%shl = shl <8 x i16> %val, %shamt
diff --git a/test/CodeGen/X86/vshift-6.ll b/test/CodeGen/X86/vshift-6.ll
index f50d9a6..175b649 100644
--- a/test/CodeGen/X86/vshift-6.ll
+++ b/test/CodeGen/X86/vshift-6.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=corei7 -march=x86-64 -mattr=+sse2 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mattr=+sse2 | FileCheck %s
; This test makes sure that the compiler does not crash with an
; assertion failure when trying to fold a vector shift left
diff --git a/test/CodeGen/X86/widen_conversions.ll b/test/CodeGen/X86/widen_conversions.ll
index 8e5174f..fa85400 100644
--- a/test/CodeGen/X86/widen_conversions.ll
+++ b/test/CodeGen/X86/widen_conversions.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=x86-64 -x86-experimental-vector-widening-legalization -x86-experimental-vector-shuffle-lowering | FileCheck %s
+; RUN: llc < %s -mcpu=x86-64 -x86-experimental-vector-widening-legalization | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
diff --git a/test/CodeGen/X86/widen_load-0.ll b/test/CodeGen/X86/widen_load-0.ll
index d543728..768a1be 100644
--- a/test/CodeGen/X86/widen_load-0.ll
+++ b/test/CodeGen/X86/widen_load-0.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -o - -mtriple=x86_64-linux -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -o - -mtriple=x86_64-linux | FileCheck %s
; PR4891
; Both loads should happen before either store.
diff --git a/test/CodeGen/X86/widen_load-1.ll b/test/CodeGen/X86/widen_load-1.ll
index c59cc58..6137424 100644
--- a/test/CodeGen/X86/widen_load-1.ll
+++ b/test/CodeGen/X86/widen_load-1.ll
@@ -9,8 +9,8 @@
; SSE: movaps %xmm0, (%rsp)
; SSE: callq killcommon
-; AVX: vmovaps compl+128(%rip), %xmm0
-; AVX: vmovaps %xmm0, (%rsp)
+; AVX: vmovdqa compl+128(%rip), %xmm0
+; AVX: vmovdqa %xmm0, (%rsp)
; AVX: callq killcommon
@compl = linkonce global [20 x i64] zeroinitializer, align 64 ; <[20 x i64]*> [#uses=1]
diff --git a/test/CodeGen/X86/widen_load-2.ll b/test/CodeGen/X86/widen_load-2.ll
index 0ec3574..c6bd964 100644
--- a/test/CodeGen/X86/widen_load-2.ll
+++ b/test/CodeGen/X86/widen_load-2.ll
@@ -76,10 +76,9 @@ define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp
; CHECK: pmovzxwd (%{{.*}}), %[[R0:xmm[0-9]+]]
; CHECK-NEXT: pmovzxwd (%{{.*}}), %[[R1:xmm[0-9]+]]
; CHECK-NEXT: paddd %[[R0]], %[[R1]]
-; CHECK-NEXT: movdqa %[[R1]], %[[R0]]
-; CHECK-NEXT: pshufb {{.*}}, %[[R0]]
-; CHECK-NEXT: pmovzxdq %[[R0]], %[[R0]]
; CHECK-NEXT: pextrw $4, %[[R1]], 4(%{{.*}})
+; CHECK-NEXT: pshufb {{.*}}, %[[R1]]
+; CHECK-NEXT: pmovzxdq %[[R1]], %[[R0]]
; CHECK-NEXT: movd %[[R0]], (%{{.*}})
%a = load %i16vec3* %ap, align 16
%b = load %i16vec3* %bp, align 16
@@ -144,10 +143,9 @@ define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) no
; CHECK: pmovzxbd (%{{.*}}), %[[R0:xmm[0-9]+]]
; CHECK-NEXT: pmovzxbd (%{{.*}}), %[[R1:xmm[0-9]+]]
; CHECK-NEXT: paddd %[[R0]], %[[R1]]
-; CHECK-NEXT: movdqa %[[R1]], %[[R0]]
-; CHECK-NEXT: pshufb {{.*}}, %[[R0]]
-; CHECK-NEXT: pmovzxwq %[[R0]], %[[R0]]
; CHECK-NEXT: pextrb $8, %[[R1]], 2(%{{.*}})
+; CHECK-NEXT: pshufb {{.*}}, %[[R1]]
+; CHECK-NEXT: pmovzxwq %[[R1]], %[[R0]]
; CHECK-NEXT: movd %[[R0]], %e[[R2:[abcd]]]x
; CHECK-NEXT: movw %[[R2]]x, (%{{.*}})
%a = load %i8vec3* %ap, align 16
@@ -193,8 +191,9 @@ define void @rot(%i8vec3pack* nocapture sret %result, %i8vec3pack* %X, %i8vec3pa
; CHECK-NEXT: movd %[[CONSTANT1]], %e[[R1:[abcd]]]x
; CHECK-NEXT: movw %[[R1]]x, (%[[PTR1:.*]])
; CHECK-NEXT: movb $1, 2(%[[PTR1]])
-; CHECK-NEXT: pmovzxbd (%[[PTR0]]), %[[X0:xmm[0-9]+]]
-; CHECK-NEXT: pand {{.*}}, %[[X0]]
+; CHECK-NEXT: movl (%[[PTR0]]), [[TMP1:%e[abcd]+x]]
+; CHECK-NEXT: movl [[TMP1]], [[TMP2:.*]]
+; CHECK-NEXT: pmovzxbd [[TMP2]], %[[X0:xmm[0-9]+]]
; CHECK-NEXT: pextrd $1, %[[X0]], %e[[R0:[abcd]]]x
; CHECK-NEXT: shrl %e[[R0]]x
; CHECK-NEXT: movd %[[X0]], %e[[R1:[abcd]]]x
@@ -206,10 +205,9 @@ define void @rot(%i8vec3pack* nocapture sret %result, %i8vec3pack* %X, %i8vec3pa
; CHECK-NEXT: pinsrd $2, %e[[R0]]x, %[[X1]]
; CHECK-NEXT: pextrd $3, %[[X0]], %e[[R0:[abcd]]]x
; CHECK-NEXT: pinsrd $3, %e[[R0]]x, %[[X1]]
-; CHECK-NEXT: movdqa %[[X1]], %[[X2:xmm[0-9]+]]
-; CHECK-NEXT: pshufb %[[SHUFFLE_MASK]], %[[X2]]
-; CHECK-NEXT: pmovzxwq %[[X2]], %[[X3:xmm[0-9]+]]
; CHECK-NEXT: pextrb $8, %[[X1]], 2(%{{.*}})
+; CHECK-NEXT: pshufb %[[SHUFFLE_MASK]], %[[X1]]
+; CHECK-NEXT: pmovzxwq %[[X1]], %[[X3:xmm[0-9]+]]
; CHECK-NEXT: movd %[[X3]], %e[[R0:[abcd]]]x
; CHECK-NEXT: movw %[[R0]]x, (%{{.*}})
diff --git a/test/CodeGen/X86/widen_shuffle-1.ll b/test/CodeGen/X86/widen_shuffle-1.ll
index 70fdbb7..2aa870f 100644
--- a/test/CodeGen/X86/widen_shuffle-1.ll
+++ b/test/CodeGen/X86/widen_shuffle-1.ll
@@ -82,8 +82,8 @@ define void @shuf5(<8 x i8>* %p) nounwind {
; CHECK-LABEL: shuf5:
; CHECK: # BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movdqa {{.*#+}} xmm0 = <4,33,u,u,u,u,u,u>
-; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [33,33,33,33,33,33,33,33]
+; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; CHECK-NEXT: movlpd %xmm0, (%eax)
; CHECK-NEXT: retl
%v = shufflevector <2 x i8> <i8 4, i8 33>, <2 x i8> undef, <8 x i32> <i32 1, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/test/CodeGen/X86/win64_alloca_dynalloca.ll b/test/CodeGen/X86/win64_alloca_dynalloca.ll
index a6b6536..abda227 100644
--- a/test/CodeGen/X86/win64_alloca_dynalloca.ll
+++ b/test/CodeGen/X86/win64_alloca_dynalloca.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -mcpu=generic -enable-misched=false -mtriple=x86_64-mingw32 | FileCheck %s -check-prefix=M64
; RUN: llc < %s -mcpu=generic -enable-misched=false -mtriple=x86_64-win32 | FileCheck %s -check-prefix=W64
+; RUN: llc < %s -mcpu=generic -enable-misched=false -mtriple=x86_64-win32 -code-model=large | FileCheck %s -check-prefix=L64
; RUN: llc < %s -mcpu=generic -enable-misched=false -mtriple=x86_64-win32-macho | FileCheck %s -check-prefix=EFI
; PR8777
; PR8778
@@ -13,19 +14,24 @@ entry:
%buf0 = alloca i8, i64 4096, align 1
; ___chkstk_ms does not adjust %rsp.
-; M64: movq %rsp, %rbp
-; M64: $4096, %rax
+; M64: $4096, %eax
; M64: callq ___chkstk_ms
; M64: subq %rax, %rsp
+; M64: leaq 128(%rsp), %rbp
; __chkstk does not adjust %rsp.
-; W64: movq %rsp, %rbp
-; W64: $4096, %rax
+; W64: $4096, %eax
; W64: callq __chkstk
; W64: subq %rax, %rsp
+; W64: leaq 128(%rsp), %rbp
+
+; Use %r11 for the large model.
+; L64: $4096, %eax
+; L64: movabsq $__chkstk, %r11
+; L64: callq *%r11
+; L64: subq %rax, %rsp
; Freestanding
-; EFI: movq %rsp, %rbp
; EFI: $[[B0OFS:4096|4104]], %rsp
; EFI-NOT: call
@@ -33,8 +39,8 @@ entry:
; M64: leaq 15(%{{.*}}), %rax
; M64: andq $-16, %rax
-; M64: callq ___chkstk
-; M64-NOT: %rsp
+; M64: callq ___chkstk_ms
+; M64: subq %rax, %rsp
; M64: movq %rsp, %rax
; W64: leaq 15(%{{.*}}), %rax
@@ -43,6 +49,13 @@ entry:
; W64: subq %rax, %rsp
; W64: movq %rsp, %rax
+; L64: leaq 15(%{{.*}}), %rax
+; L64: andq $-16, %rax
+; L64: movabsq $__chkstk, %r11
+; L64: callq *%r11
+; L64: subq %rax, %rsp
+; L64: movq %rsp, %rax
+
; EFI: leaq 15(%{{.*}}), [[R1:%r.*]]
; EFI: andq $-16, [[R1]]
; EFI: movq %rsp, [[R64:%r.*]]
@@ -53,12 +66,12 @@ entry:
; M64: subq $48, %rsp
; M64: movq %rax, 32(%rsp)
-; M64: leaq -4096(%rbp), %r9
+; M64: leaq -128(%rbp), %r9
; M64: callq bar
; W64: subq $48, %rsp
; W64: movq %rax, 32(%rsp)
-; W64: leaq -4096(%rbp), %r9
+; W64: leaq -128(%rbp), %r9
; W64: callq bar
; EFI: subq $48, %rsp
@@ -68,9 +81,9 @@ entry:
ret i64 %r
-; M64: movq %rbp, %rsp
+; M64: leaq 3968(%rbp), %rsp
-; W64: movq %rbp, %rsp
+; W64: leaq 3968(%rbp), %rsp
}
@@ -84,7 +97,8 @@ entry:
; M64: leaq 15(%{{.*}}), %rax
; M64: andq $-16, %rax
-; M64: callq ___chkstk
+; M64: callq ___chkstk_ms
+; M64: subq %rax, %rsp
; M64: movq %rsp, [[R2:%r.*]]
; M64: andq $-128, [[R2]]
; M64: movq [[R2]], %rsp
diff --git a/test/CodeGen/X86/win64_call_epi.ll b/test/CodeGen/X86/win64_call_epi.ll
index bc73ad4..71c44b0 100644
--- a/test/CodeGen/X86/win64_call_epi.ll
+++ b/test/CodeGen/X86/win64_call_epi.ll
@@ -44,7 +44,7 @@ b:
done:
ret void
}
-!0 = metadata !{metadata !"branch_weights", i32 100, i32 0}
+!0 = !{!"branch_weights", i32 100, i32 0}
; WIN64-LABEL: foo2:
; WIN64: callq bar
; WIN64: nop
diff --git a/test/CodeGen/X86/win64_eh.ll b/test/CodeGen/X86/win64_eh.ll
index f1f874e..b67ad58 100644
--- a/test/CodeGen/X86/win64_eh.ll
+++ b/test/CodeGen/X86/win64_eh.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -O0 -mcpu=corei7 -mtriple=x86_64-pc-win32 | FileCheck %s -check-prefix=WIN64
-; RUN: llc < %s -O0 -mcpu=corei7 -mtriple=x86_64-pc-mingw32 | FileCheck %s -check-prefix=WIN64
+; RUN: llc < %s -O0 -mattr=sse2 -mtriple=x86_64-pc-windows-itanium | FileCheck %s -check-prefix=WIN64 -check-prefix=NORM
+; RUN: llc < %s -O0 -mattr=sse2 -mtriple=x86_64-pc-mingw32 | FileCheck %s -check-prefix=WIN64 -check-prefix=NORM
+; RUN: llc < %s -O0 -mattr=sse2 -mtriple=x86_64-pc-mingw32 -mcpu=atom | FileCheck %s -check-prefix=WIN64 -check-prefix=ATOM
; Check function without prolog
define void @foo0() uwtable {
@@ -20,7 +21,8 @@ entry:
}
; WIN64-LABEL: foo1:
; WIN64: .seh_proc foo1
-; WIN64: subq $4000, %rsp
+; NORM: subq $4000, %rsp
+; ATOM: leaq -4000(%rsp), %rsp
; WIN64: .seh_stackalloc 4000
; WIN64: .seh_endprologue
; WIN64: addq $4000, %rsp
@@ -35,7 +37,7 @@ entry:
}
; WIN64-LABEL: foo2:
; WIN64: .seh_proc foo2
-; WIN64: movabsq $8000, %rax
+; WIN64: movl $8000, %eax
; WIN64: callq {{__chkstk|___chkstk_ms}}
; WIN64: subq %rax, %rsp
; WIN64: .seh_stackalloc 8000
@@ -83,7 +85,8 @@ entry:
; WIN64: .seh_proc foo3
; WIN64: pushq %rsi
; WIN64: .seh_pushreg 6
-; WIN64: subq $24, %rsp
+; NORM: subq $24, %rsp
+; ATOM: leaq -24(%rsp), %rsp
; WIN64: .seh_stackalloc 24
; WIN64: .seh_endprologue
; WIN64: addq $24, %rsp
@@ -126,7 +129,8 @@ endtryfinally:
; WIN64-LABEL: foo4:
; WIN64: .seh_proc foo4
; WIN64: .seh_handler _d_eh_personality, @unwind, @except
-; WIN64: subq $56, %rsp
+; NORM: subq $56, %rsp
+; ATOM: leaq -56(%rsp), %rsp
; WIN64: .seh_stackalloc 56
; WIN64: .seh_endprologue
; WIN64: addq $56, %rsp
@@ -146,23 +150,24 @@ entry:
; WIN64: .seh_proc foo5
; WIN64: pushq %rbp
; WIN64: .seh_pushreg 5
-; WIN64: movq %rsp, %rbp
; WIN64: pushq %rdi
; WIN64: .seh_pushreg 7
; WIN64: pushq %rbx
; WIN64: .seh_pushreg 3
-; WIN64: andq $-64, %rsp
-; WIN64: subq $128, %rsp
-; WIN64: .seh_stackalloc 48
-; WIN64: .seh_setframe 5, 64
-; WIN64: movaps %xmm7, -32(%rbp) # 16-byte Spill
-; WIN64: movaps %xmm6, -48(%rbp) # 16-byte Spill
-; WIN64: .seh_savexmm 6, 16
-; WIN64: .seh_savexmm 7, 32
+; NORM: subq $96, %rsp
+; ATOM: leaq -96(%rsp), %rsp
+; WIN64: .seh_stackalloc 96
+; WIN64: leaq 96(%rsp), %rbp
+; WIN64: .seh_setframe 5, 96
+; WIN64: movaps %xmm7, -16(%rbp) # 16-byte Spill
+; WIN64: .seh_savexmm 7, 80
+; WIN64: movaps %xmm6, -32(%rbp) # 16-byte Spill
+; WIN64: .seh_savexmm 6, 64
; WIN64: .seh_endprologue
-; WIN64: movaps -48(%rbp), %xmm6 # 16-byte Reload
-; WIN64: movaps -32(%rbp), %xmm7 # 16-byte Reload
-; WIN64: leaq -16(%rbp), %rsp
+; WIN64: andq $-64, %rsp
+; WIN64: movaps -32(%rbp), %xmm6 # 16-byte Reload
+; WIN64: movaps -16(%rbp), %xmm7 # 16-byte Reload
+; WIN64: movq %rbp, %rsp
; WIN64: popq %rbx
; WIN64: popq %rdi
; WIN64: popq %rbp
diff --git a/test/CodeGen/X86/win64_frame.ll b/test/CodeGen/X86/win64_frame.ll
new file mode 100644
index 0000000..ddba716
--- /dev/null
+++ b/test/CodeGen/X86/win64_frame.ll
@@ -0,0 +1,122 @@
+; RUN: llc < %s -mtriple=x86_64-pc-win32 | FileCheck %s
+
+define i32 @f1(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5) "no-frame-pointer-elim"="true" {
+ ; CHECK-LABEL: f1:
+ ; CHECK: movl 48(%rbp), %eax
+ ret i32 %p5
+}
+
+define void @f2(i32 %p, ...) "no-frame-pointer-elim"="true" {
+ ; CHECK-LABEL: f2:
+ ; CHECK: .seh_stackalloc 8
+ ; CHECK: movq %rsp, %rbp
+ ; CHECK: .seh_setframe 5, 0
+ ; CHECK: movq %rdx, 32(%rbp)
+ ; CHECK: leaq 32(%rbp), %rax
+ %ap = alloca i8, align 8
+ call void @llvm.va_start(i8* %ap)
+ ret void
+}
+
+define i8* @f3() "no-frame-pointer-elim"="true" {
+ ; CHECK-LABEL: f3:
+ ; CHECK: movq %rsp, %rbp
+ ; CHECK: .seh_setframe 5, 0
+ ; CHECK: movq 8(%rbp), %rax
+ %ra = call i8* @llvm.returnaddress(i32 0)
+ ret i8* %ra
+}
+
+define i8* @f4() "no-frame-pointer-elim"="true" {
+ ; CHECK-LABEL: f4:
+ ; CHECK: pushq %rbp
+ ; CHECK: .seh_pushreg 5
+ ; CHECK: subq $304, %rsp
+ ; CHECK: .seh_stackalloc 304
+ ; CHECK: leaq 128(%rsp), %rbp
+ ; CHECK: .seh_setframe 5, 128
+ ; CHECK: .seh_endprologue
+ ; CHECK: movq 184(%rbp), %rax
+ alloca [300 x i8]
+ %ra = call i8* @llvm.returnaddress(i32 0)
+ ret i8* %ra
+}
+
+declare void @external(i8*)
+
+define void @f5() "no-frame-pointer-elim"="true" {
+ ; CHECK-LABEL: f5:
+ ; CHECK: subq $336, %rsp
+ ; CHECK: .seh_stackalloc 336
+ ; CHECK: leaq 128(%rsp), %rbp
+ ; CHECK: .seh_setframe 5, 128
+ ; CHECK: leaq -92(%rbp), %rcx
+ ; CHECK: callq external
+ %a = alloca [300 x i8]
+ %gep = getelementptr [300 x i8]* %a, i32 0, i32 0
+ call void @external(i8* %gep)
+ ret void
+}
+
+define void @f6(i32 %p, ...) "no-frame-pointer-elim"="true" {
+ ; CHECK-LABEL: f6:
+ ; CHECK: subq $336, %rsp
+ ; CHECK: .seh_stackalloc 336
+ ; CHECK: leaq 128(%rsp), %rbp
+ ; CHECK: .seh_setframe 5, 128
+ ; CHECK: leaq -92(%rbp), %rcx
+ ; CHECK: callq external
+ %a = alloca [300 x i8]
+ %gep = getelementptr [300 x i8]* %a, i32 0, i32 0
+ call void @external(i8* %gep)
+ ret void
+}
+
+define i32 @f7(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) "no-frame-pointer-elim"="true" {
+ ; CHECK-LABEL: f7:
+ ; CHECK: pushq %rbp
+ ; CHECK: .seh_pushreg 5
+ ; CHECK: subq $304, %rsp
+ ; CHECK: .seh_stackalloc 304
+ ; CHECK: leaq 128(%rsp), %rbp
+ ; CHECK: .seh_setframe 5, 128
+ ; CHECK: andq $-64, %rsp
+ ; CHECK: movl 224(%rbp), %eax
+ ; CHECK: leaq 176(%rbp), %rsp
+ alloca [300 x i8], align 64
+ ret i32 %e
+}
+
+define i32 @f8(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) "no-frame-pointer-elim"="true" {
+ ; CHECK-LABEL: f8:
+ ; CHECK: subq $352, %rsp
+ ; CHECK: .seh_stackalloc 352
+ ; CHECK: leaq 128(%rsp), %rbp
+ ; CHECK: .seh_setframe 5, 128
+
+ %alloca = alloca [300 x i8], align 64
+ ; CHECK: andq $-64, %rsp
+ ; CHECK: movq %rsp, %rbx
+
+ alloca i32, i32 %a
+ ; CHECK: movl %ecx, %eax
+ ; CHECK: leaq 15(,%rax,4), %rax
+ ; CHECK: andq $-16, %rax
+ ; CHECK: callq __chkstk
+ ; CHECK: subq %rax, %rsp
+
+ %gep = getelementptr [300 x i8]* %alloca, i32 0, i32 0
+ call void @external(i8* %gep)
+ ; CHECK: subq $32, %rsp
+ ; CHECK: leaq (%rbx), %rcx
+ ; CHECK: callq external
+ ; CHECK: addq $32, %rsp
+
+ ret i32 %e
+ ; CHECK: movl %esi, %eax
+ ; CHECK: leaq 224(%rbp), %rsp
+}
+
+declare i8* @llvm.returnaddress(i32) nounwind readnone
+
+declare void @llvm.va_start(i8*) nounwind
diff --git a/test/CodeGen/X86/win_chkstk.ll b/test/CodeGen/X86/win_chkstk.ll
index 0c02c1a..4edc89f 100644
--- a/test/CodeGen/X86/win_chkstk.ll
+++ b/test/CodeGen/X86/win_chkstk.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -mtriple=i686-pc-win32 | FileCheck %s -check-prefix=WIN_X32
; RUN: llc < %s -mtriple=x86_64-pc-win32 | FileCheck %s -check-prefix=WIN_X64
+; RUN: llc < %s -mtriple=x86_64-pc-win32 -code-model=large | FileCheck %s -check-prefix=WIN64_LARGE
; RUN: llc < %s -mtriple=i686-pc-mingw32 | FileCheck %s -check-prefix=MINGW_X32
; RUN: llc < %s -mtriple=x86_64-pc-mingw32 | FileCheck %s -check-prefix=MINGW_X64
; RUN: llc < %s -mtriple=i386-pc-linux | FileCheck %s -check-prefix=LINUX
@@ -16,6 +17,8 @@ define i32 @main4k() nounwind {
entry:
; WIN_X32: calll __chkstk
; WIN_X64: callq __chkstk
+; WIN64_LARGE: movabsq $__chkstk, %r11
+; WIN64_LARGE: callq *%r11
; MINGW_X32: calll __alloca
; MINGW_X64: callq ___chkstk_ms
; LINUX-NOT: call __chkstk
@@ -52,6 +55,8 @@ define x86_64_win64cc i32 @main4k_win64() nounwind {
entry:
; WIN_X32: calll __chkstk
; WIN_X64: callq __chkstk
+; WIN64_LARGE: movabsq $__chkstk, %r11
+; WIN64_LARGE: callq *%r11
; MINGW_X32: calll __alloca
; MINGW_X64: callq ___chkstk_ms
; LINUX-NOT: call __chkstk
diff --git a/test/CodeGen/X86/win_cst_pool.ll b/test/CodeGen/X86/win_cst_pool.ll
index e8b853a..199557d 100644
--- a/test/CodeGen/X86/win_cst_pool.ll
+++ b/test/CodeGen/X86/win_cst_pool.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=sse2 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-windows-msvc"
@@ -6,7 +6,7 @@ define double @double() {
ret double 0x0000000000800000
}
; CHECK: .globl __real@0000000000800000
-; CHECK-NEXT: .section .rdata,"rd",discard,__real@0000000000800000
+; CHECK-NEXT: .section .rdata,"dr",discard,__real@0000000000800000
; CHECK-NEXT: .align 8
; CHECK-NEXT: __real@0000000000800000:
; CHECK-NEXT: .quad 8388608
@@ -18,7 +18,7 @@ define <4 x i32> @vec1() {
ret <4 x i32> <i32 3, i32 2, i32 1, i32 0>
}
; CHECK: .globl __xmm@00000000000000010000000200000003
-; CHECK-NEXT: .section .rdata,"rd",discard,__xmm@00000000000000010000000200000003
+; CHECK-NEXT: .section .rdata,"dr",discard,__xmm@00000000000000010000000200000003
; CHECK-NEXT: .align 16
; CHECK-NEXT: __xmm@00000000000000010000000200000003:
; CHECK-NEXT: .long 3
@@ -33,7 +33,7 @@ define <8 x i16> @vec2() {
ret <8 x i16> <i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>
}
; CHECK: .globl __xmm@00000001000200030004000500060007
-; CHECK-NEXT: .section .rdata,"rd",discard,__xmm@00000001000200030004000500060007
+; CHECK-NEXT: .section .rdata,"dr",discard,__xmm@00000001000200030004000500060007
; CHECK-NEXT: .align 16
; CHECK-NEXT: __xmm@00000001000200030004000500060007:
; CHECK-NEXT: .short 7
@@ -53,7 +53,7 @@ define <4 x float> @undef1() {
ret <4 x float> <float 1.0, float 1.0, float undef, float undef>
; CHECK: .globl __xmm@00000000000000003f8000003f800000
-; CHECK-NEXT: .section .rdata,"rd",discard,__xmm@00000000000000003f8000003f800000
+; CHECK-NEXT: .section .rdata,"dr",discard,__xmm@00000000000000003f8000003f800000
; CHECK-NEXT: .align 16
; CHECK-NEXT: __xmm@00000000000000003f8000003f800000:
; CHECK-NEXT: .long 1065353216 # float 1
diff --git a/test/CodeGen/X86/win_eh_prepare.ll b/test/CodeGen/X86/win_eh_prepare.ll
new file mode 100644
index 0000000..f96fed5
--- /dev/null
+++ b/test/CodeGen/X86/win_eh_prepare.ll
@@ -0,0 +1,80 @@
+; RUN: opt -S -winehprepare -mtriple x86_64-pc-windows-msvc < %s | FileCheck %s
+
+; FIXME: Add and test outlining here.
+
+declare void @maybe_throw()
+
+@_ZTIi = external constant i8*
+@g = external global i32
+
+declare i32 @__C_specific_handler(...)
+declare i32 @__gxx_personality_seh0(...)
+declare i32 @llvm.eh.typeid.for(i8*) readnone nounwind
+
+define i32 @use_seh() {
+entry:
+ invoke void @maybe_throw()
+ to label %cont unwind label %lpad
+
+cont:
+ ret i32 0
+
+lpad:
+ %ehvals = landingpad { i8*, i32 } personality i32 (...)* @__C_specific_handler
+ cleanup
+ catch i8* bitcast (i32 (i8*, i8*)* @filt_g to i8*)
+ %ehsel = extractvalue { i8*, i32 } %ehvals, 1
+ %filt_g_sel = call i32 @llvm.eh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filt_g to i8*))
+ %matches = icmp eq i32 %ehsel, %filt_g_sel
+ br i1 %matches, label %ret1, label %eh.resume
+
+ret1:
+ ret i32 1
+
+eh.resume:
+ resume { i8*, i32 } %ehvals
+}
+
+define internal i32 @filt_g(i8*, i8*) {
+ %g = load i32* @g
+ ret i32 %g
+}
+
+; CHECK-LABEL: define i32 @use_seh()
+; CHECK: invoke void @maybe_throw()
+; CHECK-NEXT: to label %cont unwind label %lpad
+; CHECK: eh.resume:
+; CHECK-NEXT: unreachable
+
+
+; A MinGW64-ish EH style. It could happen if a binary uses both MSVC CRT and
+; mingw CRT and is linked with LTO.
+define i32 @use_gcc() {
+entry:
+ invoke void @maybe_throw()
+ to label %cont unwind label %lpad
+
+cont:
+ ret i32 0
+
+lpad:
+ %ehvals = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_seh0
+ cleanup
+ catch i8* bitcast (i8** @_ZTIi to i8*)
+ %ehsel = extractvalue { i8*, i32 } %ehvals, 1
+ %filt_g_sel = call i32 @llvm.eh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filt_g to i8*))
+ %matches = icmp eq i32 %ehsel, %filt_g_sel
+ br i1 %matches, label %ret1, label %eh.resume
+
+ret1:
+ ret i32 1
+
+eh.resume:
+ resume { i8*, i32 } %ehvals
+}
+
+; CHECK-LABEL: define i32 @use_gcc()
+; CHECK: invoke void @maybe_throw()
+; CHECK-NEXT: to label %cont unwind label %lpad
+; CHECK: eh.resume:
+; CHECK: call void @_Unwind_Resume(i8* %exn.obj)
diff --git a/test/CodeGen/X86/x32-lea-1.ll b/test/CodeGen/X86/x32-lea-1.ll
new file mode 100644
index 0000000..7ccb34d
--- /dev/null
+++ b/test/CodeGen/X86/x32-lea-1.ll
@@ -0,0 +1,10 @@
+; RUN: llc < %s -mtriple=x86_64-linux-gnux32 -O0 | FileCheck %s
+; CHECK: leal {{[-0-9]*}}(%r{{s|b}}p),
+; CHECK-NOT: leal {{[-0-9]*}}(%e{{s|b}}p),
+
+define void @foo(i32** %p) {
+ %a = alloca i32, i32 10
+ %addr = getelementptr i32* %a, i32 4
+ store i32* %addr, i32** %p
+ ret void
+}
diff --git a/test/CodeGen/X86/x86-64-and-mask.ll b/test/CodeGen/X86/x86-64-and-mask.ll
index bc6c612..c8a832a 100644
--- a/test/CodeGen/X86/x86-64-and-mask.ll
+++ b/test/CodeGen/X86/x86-64-and-mask.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=corei7 < %s | FileCheck %s
+; RUN: llc < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-apple-darwin8"
diff --git a/test/CodeGen/X86/x86-64-baseptr.ll b/test/CodeGen/X86/x86-64-baseptr.ll
new file mode 100644
index 0000000..7fd94fa
--- /dev/null
+++ b/test/CodeGen/X86/x86-64-baseptr.ll
@@ -0,0 +1,26 @@
+; RUN: llc -mtriple=x86_64-pc-linux -force-align-stack -stack-alignment=32 < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-pc-linux-gnux32 -force-align-stack -stack-alignment=32 < %s | FileCheck -check-prefix=X32ABI %s
+; This should run with NaCl as well ( -mtriple=x86_64-pc-nacl ) but currently doesn't due to PR22655
+
+; Make sure the correct register gets set up as the base pointer
+; This should be rbx for x64 and 64-bit NaCl and ebx for x32
+; CHECK-LABEL: base
+; CHECK: subq $32, %rsp
+; CHECK: movq %rsp, %rbx
+; X32ABI-LABEL: base
+; X32ABI: subl $32, %esp
+; X32ABI: movl %esp, %ebx
+; NACL-LABEL: base
+; NACL: subq $32, %rsp
+; NACL: movq %rsp, %rbx
+
+declare i32 @helper() nounwind
+define void @base() #0 {
+entry:
+ %k = call i32 @helper()
+ %a = alloca i32, i32 %k, align 4
+ store i32 0, i32* %a, align 4
+ ret void
+}
+
+attributes #0 = { nounwind uwtable "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"}
diff --git a/test/CodeGen/X86/x86-64-psub.ll b/test/CodeGen/X86/x86-64-psub.ll
index 183ddf4..2e39c14 100644
--- a/test/CodeGen/X86/x86-64-psub.ll
+++ b/test/CodeGen/X86/x86-64-psub.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=x86_64-pc-linux -mcpu=corei7 < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-pc-linux -mattr=mmx < %s | FileCheck %s
; MMX packed sub opcodes were wrongly marked as commutative.
; This test checks that the operands of packed sub instructions are
diff --git a/test/CodeGen/X86/x86-inline-asm-validation.ll b/test/CodeGen/X86/x86-inline-asm-validation.ll
new file mode 100644
index 0000000..56bdc48
--- /dev/null
+++ b/test/CodeGen/X86/x86-inline-asm-validation.ll
@@ -0,0 +1,34 @@
+; RUN: llc -mtriple i686-gnu -filetype asm -o - %s 2>&1 | FileCheck %s
+
+define void @test_L_ff() {
+entry:
+ call void asm "", "L,~{dirflag},~{fpsr},~{flags}"(i32 255)
+ ret void
+}
+
+; CHECK-NOT: error: invalid operand for inline asm constraint 'L'
+
+define void @test_L_ffff() {
+entry:
+ call void asm "", "L,~{dirflag},~{fpsr},~{flags}"(i32 65535)
+ ret void
+}
+
+; CHECK-NOT: error: invalid operand for inline asm constraint 'L'
+
+define void @test_M_1() {
+entry:
+ call void asm "", "M,~{dirflag},~{fpsr},~{flags}"(i32 1)
+ ret void
+}
+
+; CHECK-NOT: error: invalid operand for inline asm constraint 'M'
+
+define void @test_O_64() {
+entry:
+ call void asm "", "O,~{dirflag},~{fpsr},~{flags}"(i32 64)
+ ret void
+}
+
+; CHECK-NOT: error: invalid operand for inline asm constraint 'O'
+
diff --git a/test/CodeGen/X86/x86-shifts.ll b/test/CodeGen/X86/x86-shifts.ll
index ec47933..a10134e 100644
--- a/test/CodeGen/X86/x86-shifts.ll
+++ b/test/CodeGen/X86/x86-shifts.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mattr=sse2 | FileCheck %s
; Splat patterns below
diff --git a/test/CodeGen/X86/xaluo.ll b/test/CodeGen/X86/xaluo.ll
index 54a4d6aa..668628c 100644
--- a/test/CodeGen/X86/xaluo.ll
+++ b/test/CodeGen/X86/xaluo.ll
@@ -755,4 +755,4 @@ declare {i16, i1} @llvm.umul.with.overflow.i16(i16, i16) nounwind readnone
declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
-!0 = metadata !{metadata !"branch_weights", i32 0, i32 2147483647}
+!0 = !{!"branch_weights", i32 0, i32 2147483647}
diff --git a/test/CodeGen/X86/xop-intrinsics-x86_64.ll b/test/CodeGen/X86/xop-intrinsics-x86_64.ll
index 8af782c..e154e4a 100644
--- a/test/CodeGen/X86/xop-intrinsics-x86_64.ll
+++ b/test/CodeGen/X86/xop-intrinsics-x86_64.ll
@@ -92,13 +92,13 @@ define <4 x i64> @test_int_x86_xop_vpcmov_256_rm(<4 x i64> %a0, <4 x i64> %a1, <
declare <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64>, <4 x i64>, <4 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomeqb(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK:vpcomb
+ ; CHECK:vpcomeqb
%res = call <16 x i8> @llvm.x86.xop.vpcomeqb(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
define <16 x i8> @test_int_x86_xop_vpcomeqb_mem(<16 x i8> %a0, <16 x i8>* %a1) {
; CHECK-NOT: vmovaps
- ; CHECK:vpcomb
+ ; CHECK:vpcomeqb
%vec = load <16 x i8>* %a1
%res = call <16 x i8> @llvm.x86.xop.vpcomeqb(<16 x i8> %a0, <16 x i8> %vec) ;
ret <16 x i8> %res
@@ -106,441 +106,441 @@ define <16 x i8> @test_int_x86_xop_vpcomeqb_mem(<16 x i8> %a0, <16 x i8>* %a1) {
declare <16 x i8> @llvm.x86.xop.vpcomeqb(<16 x i8>, <16 x i8>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomeqw(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomw
+ ; CHECK: vpcomeqw
%res = call <8 x i16> @llvm.x86.xop.vpcomeqw(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomeqw(<8 x i16>, <8 x i16>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomeqd(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomd
+ ; CHECK: vpcomeqd
%res = call <4 x i32> @llvm.x86.xop.vpcomeqd(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomeqd(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomeqq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomq
+ ; CHECK: vpcomeqq
%res = call <2 x i64> @llvm.x86.xop.vpcomeqq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomeqq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomequb(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomub
+ ; CHECK: vpcomequb
%res = call <16 x i8> @llvm.x86.xop.vpcomequb(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomequb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomequd(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomud
+ ; CHECK: vpcomequd
%res = call <4 x i32> @llvm.x86.xop.vpcomequd(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomequd(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomequq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomuq
+ ; CHECK: vpcomequq
%res = call <2 x i64> @llvm.x86.xop.vpcomequq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomequq(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomequw(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomuw
+ ; CHECK: vpcomequw
%res = call <8 x i16> @llvm.x86.xop.vpcomequw(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomequw(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomfalseb(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomb
+ ; CHECK: vpcomfalseb
%res = call <16 x i8> @llvm.x86.xop.vpcomfalseb(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomfalseb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomfalsed(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomd
+ ; CHECK: vpcomfalsed
%res = call <4 x i32> @llvm.x86.xop.vpcomfalsed(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomfalsed(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomfalseq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomq
+ ; CHECK: vpcomfalseq
%res = call <2 x i64> @llvm.x86.xop.vpcomfalseq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomfalseq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomfalseub(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomub
+ ; CHECK: vpcomfalseub
%res = call <16 x i8> @llvm.x86.xop.vpcomfalseub(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomfalseub(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomfalseud(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomud
+ ; CHECK: vpcomfalseud
%res = call <4 x i32> @llvm.x86.xop.vpcomfalseud(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomfalseud(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomfalseuq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomuq
+ ; CHECK: vpcomfalseuq
%res = call <2 x i64> @llvm.x86.xop.vpcomfalseuq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomfalseuq(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomfalseuw(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomuw
+ ; CHECK: vpcomfalseuw
%res = call <8 x i16> @llvm.x86.xop.vpcomfalseuw(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomfalseuw(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomfalsew(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomw
+ ; CHECK: vpcomfalsew
%res = call <8 x i16> @llvm.x86.xop.vpcomfalsew(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomfalsew(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomgeb(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomb
+ ; CHECK: vpcomgeb
%res = call <16 x i8> @llvm.x86.xop.vpcomgeb(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomgeb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomged(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomd
+ ; CHECK: vpcomged
%res = call <4 x i32> @llvm.x86.xop.vpcomged(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomged(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomgeq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomq
+ ; CHECK: vpcomgeq
%res = call <2 x i64> @llvm.x86.xop.vpcomgeq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomgeq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomgeub(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomub
+ ; CHECK: vpcomgeub
%res = call <16 x i8> @llvm.x86.xop.vpcomgeub(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomgeub(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomgeud(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomud
+ ; CHECK: vpcomgeud
%res = call <4 x i32> @llvm.x86.xop.vpcomgeud(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomgeud(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomgeuq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomuq
+ ; CHECK: vpcomgeuq
%res = call <2 x i64> @llvm.x86.xop.vpcomgeuq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomgeuq(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomgeuw(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomuw
+ ; CHECK: vpcomgeuw
%res = call <8 x i16> @llvm.x86.xop.vpcomgeuw(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomgeuw(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomgew(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomw
+ ; CHECK: vpcomgew
%res = call <8 x i16> @llvm.x86.xop.vpcomgew(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomgew(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomgtb(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomb
+ ; CHECK: vpcomgtb
%res = call <16 x i8> @llvm.x86.xop.vpcomgtb(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomgtb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomgtd(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomd
+ ; CHECK: vpcomgtd
%res = call <4 x i32> @llvm.x86.xop.vpcomgtd(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomgtd(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomgtq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomq
+ ; CHECK: vpcomgtq
%res = call <2 x i64> @llvm.x86.xop.vpcomgtq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomgtq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomgtub(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomub
+ ; CHECK: vpcomgtub
%res = call <16 x i8> @llvm.x86.xop.vpcomgtub(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomgtub(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomgtud(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomud
+ ; CHECK: vpcomgtud
%res = call <4 x i32> @llvm.x86.xop.vpcomgtud(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomgtud(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomgtuq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomuq
+ ; CHECK: vpcomgtuq
%res = call <2 x i64> @llvm.x86.xop.vpcomgtuq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomgtuq(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomgtuw(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomuw
+ ; CHECK: vpcomgtuw
%res = call <8 x i16> @llvm.x86.xop.vpcomgtuw(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomgtuw(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomgtw(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomw
+ ; CHECK: vpcomgtw
%res = call <8 x i16> @llvm.x86.xop.vpcomgtw(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomgtw(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomleb(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomb
+ ; CHECK: vpcomleb
%res = call <16 x i8> @llvm.x86.xop.vpcomleb(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomleb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomled(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomd
+ ; CHECK: vpcomled
%res = call <4 x i32> @llvm.x86.xop.vpcomled(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomled(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomleq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomq
+ ; CHECK: vpcomleq
%res = call <2 x i64> @llvm.x86.xop.vpcomleq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomleq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomleub(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomub
+ ; CHECK: vpcomleub
%res = call <16 x i8> @llvm.x86.xop.vpcomleub(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomleub(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomleud(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomud
+ ; CHECK: vpcomleud
%res = call <4 x i32> @llvm.x86.xop.vpcomleud(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomleud(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomleuq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomuq
+ ; CHECK: vpcomleuq
%res = call <2 x i64> @llvm.x86.xop.vpcomleuq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomleuq(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomleuw(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomuw
+ ; CHECK: vpcomleuw
%res = call <8 x i16> @llvm.x86.xop.vpcomleuw(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomleuw(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomlew(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomw
+ ; CHECK: vpcomlew
%res = call <8 x i16> @llvm.x86.xop.vpcomlew(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomlew(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomltb(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomb
+ ; CHECK: vpcomltb
%res = call <16 x i8> @llvm.x86.xop.vpcomltb(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomltb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomltd(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomd
+ ; CHECK: vpcomltd
%res = call <4 x i32> @llvm.x86.xop.vpcomltd(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomltd(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomltq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomq
+ ; CHECK: vpcomltq
%res = call <2 x i64> @llvm.x86.xop.vpcomltq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomltq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomltub(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomub
+ ; CHECK: vpcomltub
%res = call <16 x i8> @llvm.x86.xop.vpcomltub(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomltub(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomltud(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomud
+ ; CHECK: vpcomltud
%res = call <4 x i32> @llvm.x86.xop.vpcomltud(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomltud(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomltuq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomuq
+ ; CHECK: vpcomltuq
%res = call <2 x i64> @llvm.x86.xop.vpcomltuq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomltuq(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomltuw(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomuw
+ ; CHECK: vpcomltuw
%res = call <8 x i16> @llvm.x86.xop.vpcomltuw(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomltuw(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomltw(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomw
+ ; CHECK: vpcomltw
%res = call <8 x i16> @llvm.x86.xop.vpcomltw(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomltw(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomneb(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomb
+ ; CHECK: vpcomneqb
%res = call <16 x i8> @llvm.x86.xop.vpcomneb(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomneb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomned(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomd
+ ; CHECK: vpcomneqd
%res = call <4 x i32> @llvm.x86.xop.vpcomned(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomned(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomneq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomq
+ ; CHECK: vpcomneqq
%res = call <2 x i64> @llvm.x86.xop.vpcomneq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomneq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomneub(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomub
+ ; CHECK: vpcomnequb
%res = call <16 x i8> @llvm.x86.xop.vpcomneub(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomneub(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomneud(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomud
+ ; CHECK: vpcomnequd
%res = call <4 x i32> @llvm.x86.xop.vpcomneud(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomneud(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomneuq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomuq
+ ; CHECK: vpcomnequq
%res = call <2 x i64> @llvm.x86.xop.vpcomneuq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomneuq(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomneuw(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomuw
+ ; CHECK: vpcomnequw
%res = call <8 x i16> @llvm.x86.xop.vpcomneuw(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomneuw(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomnew(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomw
+ ; CHECK: vpcomneqw
%res = call <8 x i16> @llvm.x86.xop.vpcomnew(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomnew(<8 x i16>, <8 x i16>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomtrueb(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomb
+ ; CHECK: vpcomtrueb
%res = call <16 x i8> @llvm.x86.xop.vpcomtrueb(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomtrueb(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomtrued(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomd
+ ; CHECK: vpcomtrued
%res = call <4 x i32> @llvm.x86.xop.vpcomtrued(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomtrued(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomtrueq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomq
+ ; CHECK: vpcomtrueq
%res = call <2 x i64> @llvm.x86.xop.vpcomtrueq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomtrueq(<2 x i64>, <2 x i64>) nounwind readnone
define <16 x i8> @test_int_x86_xop_vpcomtrueub(<16 x i8> %a0, <16 x i8> %a1) {
- ; CHECK: vpcomub
+ ; CHECK: vpcomtrueub
%res = call <16 x i8> @llvm.x86.xop.vpcomtrueub(<16 x i8> %a0, <16 x i8> %a1) ;
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.xop.vpcomtrueub(<16 x i8>, <16 x i8>) nounwind readnone
define <4 x i32> @test_int_x86_xop_vpcomtrueud(<4 x i32> %a0, <4 x i32> %a1) {
- ; CHECK: vpcomud
+ ; CHECK: vpcomtrueud
%res = call <4 x i32> @llvm.x86.xop.vpcomtrueud(<4 x i32> %a0, <4 x i32> %a1) ;
ret <4 x i32> %res
}
declare <4 x i32> @llvm.x86.xop.vpcomtrueud(<4 x i32>, <4 x i32>) nounwind readnone
define <2 x i64> @test_int_x86_xop_vpcomtrueuq(<2 x i64> %a0, <2 x i64> %a1) {
- ; CHECK: vpcomuq
+ ; CHECK: vpcomtrueuq
%res = call <2 x i64> @llvm.x86.xop.vpcomtrueuq(<2 x i64> %a0, <2 x i64> %a1) ;
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.xop.vpcomtrueuq(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomtrueuw(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomuw
+ ; CHECK: vpcomtrueuw
%res = call <8 x i16> @llvm.x86.xop.vpcomtrueuw(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.xop.vpcomtrueuw(<8 x i16>, <8 x i16>) nounwind readnone
define <8 x i16> @test_int_x86_xop_vpcomtruew(<8 x i16> %a0, <8 x i16> %a1) {
- ; CHECK: vpcomw
+ ; CHECK: vpcomtruew
%res = call <8 x i16> @llvm.x86.xop.vpcomtruew(<8 x i16> %a0, <8 x i16> %a1) ;
ret <8 x i16> %res
}
diff --git a/test/CodeGen/X86/xor.ll b/test/CodeGen/X86/xor.ll
index fd8e1b4..ea84a3b 100644
--- a/test/CodeGen/X86/xor.ll
+++ b/test/CodeGen/X86/xor.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mcpu=corei7 -march=x86 -mattr=+sse2 | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -mcpu=corei7 -mtriple=x86_64-linux | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -mcpu=corei7 -mtriple=x86_64-win32 | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s -check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2 | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse2 | FileCheck %s -check-prefix=X64
; Though it is undefined, we want xor undef,undef to produce zero.
define <4 x i32> @test1() nounwind {
diff --git a/test/CodeGen/XCore/dwarf_debug.ll b/test/CodeGen/XCore/dwarf_debug.ll
index 47db82d..8c9c47d 100644
--- a/test/CodeGen/XCore/dwarf_debug.ll
+++ b/test/CodeGen/XCore/dwarf_debug.ll
@@ -13,7 +13,7 @@ define i32 @f(i32 %a) {
entry:
%a.addr = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
- call void @llvm.dbg.declare(metadata !{i32* %a.addr}, metadata !11, metadata !{metadata !"0x102"}), !dbg !12
+ call void @llvm.dbg.declare(metadata i32* %a.addr, metadata !11, metadata !{!"0x102"}), !dbg !12
%0 = load i32* %a.addr, align 4, !dbg !12
%add = add nsw i32 %0, 1, !dbg !12
ret i32 %add, !dbg !12
@@ -23,17 +23,17 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!9, !10}
-!0 = metadata !{metadata !"0x11\0012\00\000\00\000\00\001", metadata !1, metadata !2, metadata !2, metadata !3, metadata !2, metadata !2} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{metadata !"", metadata !""}
-!2 = metadata !{}
-!3 = metadata !{metadata !4}
-!4 = metadata !{metadata !"0x2e\00f\00f\00\002\000\001\000\006\00256\000\002", metadata !1, metadata !5, metadata !6, null, i32 (i32)* @f, null, null, metadata !2} ; [ DW_TAG_subprogram ]
-!5 = metadata !{metadata !"0x29", metadata !1} ; [ DW_TAG_file_type ]
-!6 = metadata !{metadata !"0x15\00\000\000\000\000\000\000", i32 0, null, null, metadata !7, null, null, null} ; [ DW_TAG_subroutine_type ]
-!7 = metadata !{metadata !8, metadata !8}
-!8 = metadata !{metadata !"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ]
-!9 = metadata !{i32 2, metadata !"Dwarf Version", i32 4}
-!10 = metadata !{i32 2, metadata !"Debug Info Version", i32 2}
-!11 = metadata !{metadata !"0x101\00a\0016777218\000", metadata !4, metadata !5, metadata !8} ; [ DW_TAG_arg_variable ]
-!12 = metadata !{i32 2, i32 0, metadata !4, null}
+!0 = !{!"0x11\0012\00\000\00\000\00\001", !1, !2, !2, !3, !2, !2} ; [ DW_TAG_compile_unit ]
+!1 = !{!"", !""}
+!2 = !{}
+!3 = !{!4}
+!4 = !{!"0x2e\00f\00f\00\002\000\001\000\006\00256\000\002", !1, !5, !6, null, i32 (i32)* @f, null, null, !2} ; [ DW_TAG_subprogram ]
+!5 = !{!"0x29", !1} ; [ DW_TAG_file_type ]
+!6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ]
+!7 = !{!8, !8}
+!8 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ]
+!9 = !{i32 2, !"Dwarf Version", i32 4}
+!10 = !{i32 2, !"Debug Info Version", i32 2}
+!11 = !{!"0x101\00a\0016777218\000", !4, !5, !8} ; [ DW_TAG_arg_variable ]
+!12 = !MDLocation(line: 2, scope: !4)